text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 예측도 출력 데이터가 숫자인가 카테고리 값인가에 따라 회귀 분석(regression analysis)과 분류(classification)로 구분된다.
Step2: 분류
Step3: 클러스터링(Clustering)
Step4: 모사(Approximation)
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_digits
digits = load_digits()
plt.imshow(digits.images[0], interpolation='nearest');
plt.grid(False)
digits.images[0]
from sklearn.datasets import fetch_20newsgroups
news = fetch_20newsgroups()
print(news.data[0])
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer(stop_words="english").fit(news.data[:100])
data = vec.transform(news.data[:100])
data
plt.imshow(data.toarray()[:, :200], interpolation='nearest');
from sklearn.datasets import load_boston
boston = load_boston()
print(boston.DESCR)
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df["MEDV"] = boston.target
df.tail()
sns.pairplot(df[["MEDV", "RM", "AGE", "DIS"]]);
from sklearn.linear_model import LinearRegression
predicted = LinearRegression().fit(boston.data, boston.target).predict(boston.data)
plt.scatter(boston.target, predicted, c='r', s=20);
plt.xlabel("Target");
plt.ylabel("Predicted");
from sklearn.datasets import load_iris
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
sy = pd.Series(iris.target, dtype='category')
sy = sy.cat.rename_categories(iris.target_names)
df['species'] = sy
df.tail()
sns.pairplot(df, hue='species');
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
model = SVC(kernel='linear', C=1.0, random_state=0)
model.fit(X_train_std, y_train)
XX_min = X_train_std[:, 0].min() - 1; XX_max = X_train_std[:, 0].max() + 1;
YY_min = X_train_std[:, 1].min() - 1; YY_max = X_train_std[:, 1].max() + 1;
XX, YY = np.meshgrid(np.linspace(XX_min, XX_max, 1000), np.linspace(YY_min, YY_max, 1000))
ZZ = model.predict(np.c_[XX.ravel(), YY.ravel()]).reshape(XX.shape)
cmap = mpl.colors.ListedColormap(sns.color_palette("Set2"))
plt.contourf(XX, YY, ZZ, cmap=cmap)
plt.scatter(X_train_std[y_train == 0, 0], X_train_std[y_train == 0, 1], c=cmap.colors[0], s=100)
plt.scatter(X_train_std[y_train == 1, 0], X_train_std[y_train == 1, 1], c=cmap.colors[2], s=100)
plt.scatter(X_train_std[y_train == 2, 0], X_train_std[y_train == 2, 1], c=cmap.colors[1], s=100)
plt.xlim(XX_min, XX_max)
plt.ylim(YY_min, YY_max)
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
X, labels_true = make_blobs(n_samples=750, centers=[[1, 1], [-1, -1], [1, -1]], cluster_std=0.4, random_state=0)
X = StandardScaler().fit_transform(X)
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
unique_labels = set(db.labels_)
f = plt.figure()
f.add_subplot(1,2,1)
plt.plot(X[:, 0], X[:, 1], 'o', markerfacecolor='k', markeredgecolor='k', markersize=10)
plt.title('Raw Data')
f.add_subplot(1,2,2)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1: col = 'k'
class_member_mask = (db.labels_ == k)
xy = X[class_member_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=10);
plt.title('Estimated number of clusters: %d' % n_clusters_);
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
n_colors = 64
china = load_sample_image("china.jpg")
china = np.array(china, dtype=np.float64) / 255
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
labels = kmeans.predict(image_array)
def recreate_image(codebook, labels, w, h):
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
print("{0:,} bytes -> {1:,} bytes : {2:5.2f}%".format(image_array.nbytes, labels.nbytes, float(labels.nbytes) / image_array.nbytes * 100.0))
f = plt.figure()
ax1 = f.add_subplot(1,2,1)
plt.axis('off')
plt.title('Original image (96,615 colors)')
ax1.imshow(china);
ax2 = f.add_subplot(1,2,2)
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
ax2.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Create Pipelines
Step3: Create Parameter Space
Step4: Conduct Parameter Optmization With Pipeline
Step5: Use Cross Validation To Evaluate Model
|
<ASSISTANT_TASK:>
Python Code:
# Import required packages
import numpy as np
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.preprocessing import StandardScaler
# Load the breast cancer data
dataset = datasets.load_breast_cancer()
# Create X from the dataset's features
X = dataset.data
# Create y from the dataset's output
y = dataset.target
# Create an scaler object
sc = StandardScaler()
# Create a pca object
pca = decomposition.PCA()
# Create a logistic regression object with an L2 penalty
logistic = linear_model.LogisticRegression()
# Create a pipeline of three steps. First, standardize the data.
# Second, tranform the data with PCA.
# Third, train a logistic regression on the data.
pipe = Pipeline(steps=[('sc', sc),
('pca', pca),
('logistic', logistic)])
# Create a list of a sequence of integers from 1 to 30 (the number of features in X + 1)
n_components = list(range(1,X.shape[1]+1,1))
# Create a list of values of the regularization parameter
C = np.logspace(-4, 4, 50)
# Create a list of options for the regularization penalty
penalty = ['l1', 'l2']
# Create a dictionary of all the parameter options
# Note has you can access the parameters of steps of a pipeline by using '__’
parameters = dict(pca__n_components=n_components,
logistic__C=C,
logistic__penalty=penalty)
# Create a grid search object
clf = GridSearchCV(pipe, parameters)
# Fit the grid search
clf.fit(X, y)
# View The Best Parameters
print('Best Penalty:', clf.best_estimator_.get_params()['logistic__penalty'])
print('Best C:', clf.best_estimator_.get_params()['logistic__C'])
print('Best Number Of Components:', clf.best_estimator_.get_params()['pca__n_components'])
# Fit the grid search using 3-Fold cross validation
cross_val_score(clf, X, y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can put in some standard date formats. Pandas' will convert them accordingly.
Step2: We can also create relative time information
Step3: We can also do calculations with thos objects.
Step4: Time series
Step5: With this, we calculate with time in a similar way as above.
Step6: DateTimeProperties object
Step7: Let's take a look the some of the properties.
Step8: We can e.g. call the method day_name() on a date time series to get the name of the day for a date.
Step9: Timestamp Series
Step10: Note, if we import a dataset like this, the time data will be of a simple object data type.
Step11: So we have to convert that data first into a time-based data type with pandas' to_datetime() function.
Step12: Next, we want to see at whick hour of the day most changes were done. We can use the same strategies to get more detailed information like in the previous examples.
Step13: Let's simply count the number of changes per hour.
Step14: And create a little bar chart.
Step15: At the first glance, this looks pretty fine. But there is a problem
Step16: Now we are able to apply the resample function on it with the information that we want to group our data hourly. We also have to decided what we want to do with the
Step17: Display progressions
Step18: Grouping time and data
Step19: For this scenario, we also need some developers.
Step20: Let's add some artificial ones to the changes and also mark each change with a separate column.
Step21: OK, we want to group the changes per week per developer to find out the most active developer of the week (if this makes sense is up to you to find out ;-).
Step22: This give as a Dataframe which lists the number of changes per week for each developers. We sort this list to get a kind of "most active developer per week list"
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd.Timestamp("today")
new_years_dinner = pd.Timestamp("2020-01-01 19:00")
new_years_dinner
time_needed_to_sober_up = pd.Timedelta("1 day")
time_needed_to_sober_up
completely_sober = new_years_dinner + time_needed_to_sober_up
completely_sober
dates = pd.DataFrame(
pd.date_range("2020-03-01", periods=5, freq="m"),
columns=["day"]
)
dates
dates["day_after_tomorrow"] = dates['day'] + pd.Timedelta("2 days")
dates
dt_properties = dates['day'].dt
dt_properties
# this code is just for demonstration purposes and not needed in an analysis
[x for x in dir(dt_properties) if not x.startswith("_")]
dt_properties.day_name()
change_log = pd.read_csv("datasets/change_history.csv")
change_log.head()
change_log.info()
change_log['timestamp'] = pd.to_datetime(change_log['timestamp'])
change_log.info()
change_log['hour'] = change_log['timestamp'].dt.hour
change_log.head()
changes_per_hour = change_log['hour'].value_counts(sort=False)
changes_per_hour.head()
changes_per_hour.plot.bar();
change_log = pd.read_csv("datasets/change_history.csv", parse_dates=[0], index_col=0)
change_log.head()
change_log['changes'] = 1
change_log.head()
hourly_changes = change_log.resample("h").count()
hourly_changes.head()
hourly_changes['hour'] = hourly_changes.index.hour
hourly_changes.head()
changes_per_hour = hourly_changes.groupby("hour").sum()
changes_per_hour.head()
changes_per_hour.plot.bar();
hourly_changes.head()
accumulated_changes = hourly_changes[['changes']].cumsum()
accumulated_changes.head()
accumulated_changes.plot();
change_log = pd.read_csv("datasets/change_history.csv", parse_dates=[0])
change_log.head()
devs = pd.Series(["Alice", "Bob", "John", "Steve", "Yvonne"])
devs
change_log['dev'] = devs.sample(len(change_log), replace=True).values
change_log['changes'] = 1
change_log.head()
weekly_changes_per_dev = \
change_log.groupby([
pd.Grouper(key='timestamp', freq='w'),
'dev']) \
.sum()
weekly_changes_per_dev.head()
weekly_changes_per_dev.sort_values(
by=['timestamp', 'changes'],
ascending=[True, False])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hasta ahora, hemos trabajado con vectores, aplicándolos a la resolución de la ecuación de convección en 1-D. Vamos a ver ahora cómo resolver la ecuación de difusión y cómo aplicar matrices y operaciones de álgebra lineal para resolver un esquema implicito.
Step2: Calcula ahora la matriz traspuesta y la matriz inversa de la matriz D
Step3: Ejercicio 2
Step4: Hasta ahora, no nos hemos preocupado de las condiciones de contorno. Pero como veis, estas tienen gran importancia en el resultado final. Las condiciones de contorno corresponden a los extremos de nuestra matriz D. Trata de implementar condiciones de periodicidad en este problema.
Step5: Pista
Step6: Reto
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt #Esta es otra forma de importar el submódulo pyplot!
#Igual de válida que la que hemos visto en clase
%matplotlib inline
# Introduce aquí tu código
# Introduce aquí tu código
# Introduce aquí tu código
# Introduce aquí tu código
# Introduce aquí tu código
# Introduce tu código aquí
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Processing
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(data_id)
data = np.random.randn(100)
analysis_dict = dict(a=np.mean, b=np.max, c=np.min)
result = analysis_dict[analysis_type](data)
print('Result of analysis "%s" on dataset %d is %.3f.' % (analysis_type, data_id, result))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Copyright 2017 Google LLC.
Step2: 2-Bar Drums Model
Step3: Generate Samples
Step4: Generate Interpolations
Step5: 2-Bar Melody Model
Step6: Generate Samples
Step7: Generate Interpolations
Step8: 16-bar Melody Models
Step9: Generate Samples
Step10: Generate Means
Step11: 16-bar "Trio" Models (lead, bass, drums)
Step12: Generate Samples
Step13: Generate Means
|
<ASSISTANT_TASK:>
Python Code:
#@title Setup Environment
#@test {"output": "ignore"}
import glob
BASE_DIR = "gs://download.magenta.tensorflow.org/models/music_vae/colab2"
print('Installing dependencies...')
!apt-get update -qq && apt-get install -qq libfluidsynth1 fluid-soundfont-gm build-essential libasound2-dev libjack-dev
!pip install -q pyfluidsynth
!pip install -qU magenta
# Hack to allow python to pick up the newly-installed fluidsynth lib.
# This is only needed for the hosted Colab environment.
import ctypes.util
orig_ctypes_util_find_library = ctypes.util.find_library
def proxy_find_library(lib):
if lib == 'fluidsynth':
return 'libfluidsynth.so.1'
else:
return orig_ctypes_util_find_library(lib)
ctypes.util.find_library = proxy_find_library
print('Importing libraries and defining some helper functions...')
from google.colab import files
import magenta.music as mm
from magenta.models.music_vae import configs
from magenta.models.music_vae.trained_model import TrainedModel
import numpy as np
import os
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Necessary until pyfluidsynth is updated (>1.2.5).
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def play(note_sequence):
mm.play_sequence(note_sequence, synth=mm.fluidsynth)
def interpolate(model, start_seq, end_seq, num_steps, max_length=32,
assert_same_length=True, temperature=0.5,
individual_duration=4.0):
Interpolates between a start and end sequence.
note_sequences = model.interpolate(
start_seq, end_seq,num_steps=num_steps, length=max_length,
temperature=temperature,
assert_same_length=assert_same_length)
print('Start Seq Reconstruction')
play(note_sequences[0])
print('End Seq Reconstruction')
play(note_sequences[-1])
print('Mean Sequence')
play(note_sequences[num_steps // 2])
print('Start -> End Interpolation')
interp_seq = mm.sequences_lib.concatenate_sequences(
note_sequences, [individual_duration] * len(note_sequences))
play(interp_seq)
mm.plot_sequence(interp_seq)
return interp_seq if num_steps > 3 else note_sequences[num_steps // 2]
def download(note_sequence, filename):
mm.sequence_proto_to_midi_file(note_sequence, filename)
files.download(filename)
print('Done')
#@title Load Pretrained Models
drums_models = {}
# One-hot encoded.
drums_config = configs.CONFIG_MAP['cat-drums_2bar_small']
drums_models['drums_2bar_oh_lokl'] = TrainedModel(drums_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/drums_2bar_small.lokl.ckpt')
drums_models['drums_2bar_oh_hikl'] = TrainedModel(drums_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/drums_2bar_small.hikl.ckpt')
# Multi-label NADE.
drums_nade_reduced_config = configs.CONFIG_MAP['nade-drums_2bar_reduced']
drums_models['drums_2bar_nade_reduced'] = TrainedModel(drums_nade_reduced_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/drums_2bar_nade.reduced.ckpt')
drums_nade_full_config = configs.CONFIG_MAP['nade-drums_2bar_full']
drums_models['drums_2bar_nade_full'] = TrainedModel(drums_nade_full_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/drums_2bar_nade.full.ckpt')
#@title Generate 4 samples from the prior of one of the models listed above.
drums_sample_model = "drums_2bar_oh_lokl" #@param ["drums_2bar_oh_lokl", "drums_2bar_oh_hikl", "drums_2bar_nade_reduced", "drums_2bar_nade_full"]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
drums_samples = drums_models[drums_sample_model].sample(n=4, length=32, temperature=temperature)
for ns in drums_samples:
play(ns)
#@title Optionally download generated MIDI samples.
for i, ns in enumerate(drums_samples):
download(ns, '%s_sample_%d.mid' % (drums_sample_model, i))
#@title Option 1: Use example MIDI files for interpolation endpoints.
input_drums_midi_data = [
tf.io.gfile.GFile(fn, mode='rb').read()
for fn in sorted(tf.io.gfile.glob(BASE_DIR + '/midi/drums_2bar*.mid'))]
#@title Option 2: upload your own MIDI files to use for interpolation endpoints instead of those provided.
input_drums_midi_data = files.upload().values() or input_drums_midi_data
#@title Extract drums from MIDI files. This will extract all unique 2-bar drum beats using a sliding window with a stride of 1 bar.
drums_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_drums_midi_data]
extracted_beats = []
for ns in drums_input_seqs:
extracted_beats.extend(drums_nade_full_config.data_converter.from_tensors(
drums_nade_full_config.data_converter.to_tensors(ns)[1]))
for i, ns in enumerate(extracted_beats):
print("Beat", i)
play(ns)
#@title Interpolate between 2 beats, selected from those in the previous cell.
drums_interp_model = "drums_2bar_oh_hikl" #@param ["drums_2bar_oh_lokl", "drums_2bar_oh_hikl", "drums_2bar_nade_reduced", "drums_2bar_nade_full"]
start_beat = 0 #@param {type:"integer"}
end_beat = 1 #@param {type:"integer"}
start_beat = extracted_beats[start_beat]
end_beat = extracted_beats[end_beat]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
num_steps = 13 #@param {type:"integer"}
drums_interp = interpolate(drums_models[drums_interp_model], start_beat, end_beat, num_steps=num_steps, temperature=temperature)
#@title Optionally download interpolation MIDI file.
download(drums_interp, '%s_interp.mid' % drums_interp_model)
#@title Load the pre-trained model.
mel_2bar_config = configs.CONFIG_MAP['cat-mel_2bar_big']
mel_2bar = TrainedModel(mel_2bar_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/mel_2bar_big.ckpt')
#@title Generate 4 samples from the prior.
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
mel_2_samples = mel_2bar.sample(n=4, length=32, temperature=temperature)
for ns in mel_2_samples:
play(ns)
#@title Optionally download samples.
for i, ns in enumerate(mel_2_samples):
download(ns, 'mel_2bar_sample_%d.mid' % i)
#@title Option 1: Use example MIDI files for interpolation endpoints.
input_mel_midi_data = [
tf.io.gfile.GFile(fn, 'rb').read()
for fn in sorted(tf.io.gfile.glob(BASE_DIR + '/midi/mel_2bar*.mid'))]
#@title Option 2: Upload your own MIDI files to use for interpolation endpoints instead of those provided.
input_mel_midi_data = files.upload().values() or input_mel_midi_data
#@title Extract melodies from MIDI files. This will extract all unique 2-bar melodies using a sliding window with a stride of 1 bar.
mel_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_mel_midi_data]
extracted_mels = []
for ns in mel_input_seqs:
extracted_mels.extend(
mel_2bar_config.data_converter.from_tensors(
mel_2bar_config.data_converter.to_tensors(ns)[1]))
for i, ns in enumerate(extracted_mels):
print("Melody", i)
play(ns)
#@title Interpolate between 2 melodies, selected from those in the previous cell.
start_melody = 0 #@param {type:"integer"}
end_melody = 1 #@param {type:"integer"}
start_mel = extracted_mels[start_melody]
end_mel = extracted_mels[end_melody]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
num_steps = 13 #@param {type:"integer"}
mel_2bar_interp = interpolate(mel_2bar, start_mel, end_mel, num_steps=num_steps, temperature=temperature)
#@title Optionally download interpolation MIDI file.
download(mel_2bar_interp, 'mel_2bar_interp.mid')
#@title Load the pre-trained models.
mel_16bar_models = {}
hierdec_mel_16bar_config = configs.CONFIG_MAP['hierdec-mel_16bar']
mel_16bar_models['hierdec_mel_16bar'] = TrainedModel(hierdec_mel_16bar_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/mel_16bar_hierdec.ckpt')
flat_mel_16bar_config = configs.CONFIG_MAP['flat-mel_16bar']
mel_16bar_models['baseline_flat_mel_16bar'] = TrainedModel(flat_mel_16bar_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/mel_16bar_flat.ckpt')
#@title Generate 4 samples from the selected model prior.
mel_sample_model = "hierdec_mel_16bar" #@param ["hierdec_mel_16bar", "baseline_flat_mel_16bar"]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
mel_16_samples = mel_16bar_models[mel_sample_model].sample(n=4, length=256, temperature=temperature)
for ns in mel_16_samples:
play(ns)
#@title Optionally download MIDI samples.
for i, ns in enumerate(mel_16_samples):
download(ns, '%s_sample_%d.mid' % (mel_sample_model, i))
#@title Option 1: Use example MIDI files for interpolation endpoints.
input_mel_16_midi_data = [
tf.io.gfile.GFile(fn, 'rb').read()
for fn in sorted(tf.io.gfile.glob(BASE_DIR + '/midi/mel_16bar*.mid'))]
#@title Option 2: upload your own MIDI files to use for interpolation endpoints instead of those provided.
input_mel_16_midi_data = files.upload().values() or input_mel_16_midi_data
#@title Extract melodies from MIDI files. This will extract all unique 16-bar melodies using a sliding window with a stride of 1 bar.
mel_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_mel_16_midi_data]
extracted_16_mels = []
for ns in mel_input_seqs:
extracted_16_mels.extend(
hierdec_mel_16bar_config.data_converter.from_tensors(
hierdec_mel_16bar_config.data_converter.to_tensors(ns)[1]))
for i, ns in enumerate(extracted_16_mels):
print("Melody", i)
play(ns)
#@title Compute the reconstructions and mean of the two melodies, selected from the previous cell.
mel_interp_model = "hierdec_mel_16bar" #@param ["hierdec_mel_16bar", "baseline_flat_mel_16bar"]
start_melody = 0 #@param {type:"integer"}
end_melody = 1 #@param {type:"integer"}
start_mel = extracted_16_mels[start_melody]
end_mel = extracted_16_mels[end_melody]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
mel_16bar_mean = interpolate(mel_16bar_models[mel_interp_model], start_mel, end_mel, num_steps=3, max_length=256, individual_duration=32, temperature=temperature)
#@title Optionally download mean MIDI file.
download(mel_16bar_mean, '%s_mean.mid' % mel_interp_model)
#@title Load the pre-trained models.
trio_models = {}
hierdec_trio_16bar_config = configs.CONFIG_MAP['hierdec-trio_16bar']
trio_models['hierdec_trio_16bar'] = TrainedModel(hierdec_trio_16bar_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/trio_16bar_hierdec.ckpt')
flat_trio_16bar_config = configs.CONFIG_MAP['flat-trio_16bar']
trio_models['baseline_flat_trio_16bar'] = TrainedModel(flat_trio_16bar_config, batch_size=4, checkpoint_dir_or_path=BASE_DIR + '/checkpoints/trio_16bar_flat.ckpt')
#@title Generate 4 samples from the selected model prior.
trio_sample_model = "hierdec_trio_16bar" #@param ["hierdec_trio_16bar", "baseline_flat_trio_16bar"]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
trio_16_samples = trio_models[trio_sample_model].sample(n=4, length=256, temperature=temperature)
for ns in trio_16_samples:
play(ns)
#@title Optionally download MIDI samples.
for i, ns in enumerate(trio_16_samples):
download(ns, '%s_sample_%d.mid' % (trio_sample_model, i))
#@title Option 1: Use example MIDI files for interpolation endpoints.
input_trio_midi_data = [
tf.io.gfile.GFile(fn, 'rb').read()
for fn in sorted(tf.io.gfile.glob(BASE_DIR + '/midi/trio_16bar*.mid'))]
#@title Option 2: Upload your own MIDI files to use for interpolation endpoints instead of those provided.
input_trio_midi_data = files.upload().values() or input_trio_midi_data
#@title Extract trios from MIDI files. This will extract all unique 16-bar trios using a sliding window with a stride of 1 bar.
trio_input_seqs = [mm.midi_to_sequence_proto(m) for m in input_trio_midi_data]
extracted_trios = []
for ns in trio_input_seqs:
extracted_trios.extend(
hierdec_trio_16bar_config.data_converter.from_tensors(
hierdec_trio_16bar_config.data_converter.to_tensors(ns)[1]))
for i, ns in enumerate(extracted_trios):
print("Trio", i)
play(ns)
#@title Compute the reconstructions and mean of the two trios, selected from the previous cell.
trio_interp_model = "hierdec_trio_16bar" #@param ["hierdec_trio_16bar", "baseline_flat_trio_16bar"]
start_trio = 0 #@param {type:"integer"}
end_trio = 1 #@param {type:"integer"}
start_trio = extracted_trios[start_trio]
end_trio = extracted_trios[end_trio]
temperature = 0.5 #@param {type:"slider", min:0.1, max:1.5, step:0.1}
trio_16bar_mean = interpolate(trio_models[trio_interp_model], start_trio, end_trio, num_steps=3, max_length=256, individual_duration=32, temperature=temperature)
#@title Optionally download mean MIDI file.
download(trio_16bar_mean, '%s_mean.mid' % trio_interp_model)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can do math - any output on the last line of the cell will print to the screen
Step2: You can print anything by passing it to the print function.
Step3: Save results to a variable. Variables work like a math equation. The variable name is on the left and whatever it is equal to is on the right.
Step4: We cab see the type of the output by passing it to the type function.
Step5: There are many types in python, but the main ones for values are
Step6: Exercise
Step7: Loops
Step8: We can also loop through two lists by using the zip command. More about zip here
Step9: Exercise
Step10: Functions
Step11: Exercise
Step12: Dictionaries
Step13: We can add to dictionaries by using the square brackets.
Step14: We can get values out of a dictionary (acessing) by using the square brackets again.
Step15: We are not limited to strings. Dictionaries can have many types as their keys, and have many types as their values.
Step16: Excercise
Step17: We can add arrays together - this will add each element of each array to the corresponding element in the other array. This is type of operation is called an 'element-wise' operation and can save you from having to write loops.
Step18: Numpy arrays can be multi-dimensional. Lets focus on 2D arrays which are used in illustris. Note
Step19: We can use indexing to get a certain row or column
Step20: We can compute statistics for the whole array or along different dimension.
Step21: Exercise
|
<ASSISTANT_TASK:>
Python Code:
# This line is a comment -- it does nothing
# you can add comments using the '#' symbol
1+1
3*5 # this will not print
14 % 3 # modulo (remainder) operator - this will print
print(3*5)
print(2**4) # powers use the double star symbol
output = 1+1
type(output)
type(1.+1.2)
1.0+1.2
# we can compare numbers using comparison operators - these return a value of either True or False (boolean type)
1 > 2 # is one greater than two?
# and we can use 'strings' - text
poem = 'Spiral galaxy; Plane splashed across the night sky; Gives us perspective'
#We can collect together a bunch of numbers or strings into a list
favorite_primes = [1, 3, 5, 7, 11]
type(favorite_primes)
#and access them using square brackets
favorite_primes[0] # <-- '[0]' will select the first number
# [-1] will select the last number
favorite_primes[-1]
# we can also select a range of numbers
favorite_primes[::2] # <-- select every other element
favorite_primes[:2] # select the first two elements
favorite_primes[2:] # select from the 3rd element to the last element
# we can do things multiple times in a loop:
for prime in my_favorite_primes: # loop through and get each element of the list
print(prime, prime**2) # print each element and the square of that element
# for loops are one way to loop - while loops are another
# careful! while loops can sometimes loop forever - check that they have a stopping criteria
i = 0 # start at some value
while i < 10: # will loop until this condition evaluates to True
print(i)
i = i + 1
# lets first make a second list
favorite_largenumbers = [10, 300, 5e+5, 7000, 2**32] # note here that python ints can be very large with no problem
for large_number, prime in zip(favorite_largenumbers, favorite_primes):
print(large_number, prime)
# make a new list that has only four numbers
least_favorite_numbers = [-1, 0, 1, 2]
for bad_number, prime in zip(<MODIFY THIS PART>):
print(bad_number, prime)
def square(number):
# this function will take a number and return its square
return number**2
print(square(3))
# to make the function more general we will include a keyword argument - this argument has a default value and can be changed by the user
def raise_to_power(number, power=2):
return number**power
print(raise_to_power(3)) # with default arguments this will square it
print(raise_to_power(3, power=3)) # with a new argument this will return cubic
print(raise_to_power(<MODIFY THIS SOMEHOW>))
definitions = {} # here we are using the squiggly brackets to make an empty dictionary
# add an entry for cosmology
definitions['cosmology'] = 'the branch of astronomy that deals with the general structure and evolution of the universe.'
# and for universe
definitions['universe'] = 'the totality of known or supposed objects and phenomena throughout space; the cosmos; macrocosm.'
definitions['cosmology']
# here we are using the curly braces to make a dictionary of constants. The 'e' syntax is shorthand for 'x10^', so 1e-1 is 0.1
constants_cgs = {'G': 6.67259e-8, 'h': 6.6260756e-27, 'k': 1.380658e-16}
import numpy as np # now we have access to a range of new functions that work with numerical data
# we can make an 'array' -- this is similar to a list but has some advantages
array_of_primes = np.array([1, 3, 5, 7, 11])
# you can do math on the entire array
array_of_primes + 1
# CAREFUL: this only works with numpy arrays. This will not work with lists!! Pay attention to the type that you are working with.
# Illustris data uses numpy arrays mostly, but it is always good to check.
print(type(array_of_primes), type(favorite_primes))
# We can see some info on the size and shape of the array:
print(array_of_primes.shape, array_of_primes.ndim)
# and generate arrays with values
array_of_evens = np.arange(2, 12, 2) # array starting at 2, ending at 12 (exclusive) in steps of 2
array_of_primes + array_of_evens
# we can also compare element-wise:
array_of_evens > array_of_primes
# we can use these arrays of boolean values to select values of interest from an array
array_of_evens[array_of_evens > array_of_primes] # select only the even numbers that are greater than corresponding prime numbers
#Or we can use a 'where' function to get the corresponding indices.
np.where(array_of_evens > array_of_primes)
indices = np.where(array_of_evens > array_of_primes)
array_of_evens[indices]
velocities = np.random.rand(15).reshape(5, 3) # make an array of 15 random values and reshape them into a 2D array with four rows and three columns
#lets examine the results! This will be different for each person
velocities
velocities[:, 0] # get all values (the ':' character) from the first column
velocities[1, :] # get all values from the second row
# print the mean value, the max value, and the min value of the array
velocities.mean(), velocities.min(), velocities.max()
# print the mean in each of the columns - should be a 1D array with three values
velocities.mean(axis=0)
import matplotlib.pyplot as plt # lets import some plotting tools and give it a helpful name
# this next fancy 'magic line' lets us plot right here in the notebook
%matplotlib inline
#making a simple plot is easy - just tell the 'plot' function what you x and y values are - by default it makes a line
x = np.arange(10)
y = x**2
plt.plot(x, y)
# or we can make them points by setting some options
plt.plot(x, y, marker='.', linestyle='none') # turning the line to none and the marker to a period
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This colab shows how to use CARLS to train a model with regularization more efficiently.
Step4: Prepare dataset and build the package with bazel
Step5: Bazel build and run the example.
Step7: Define the model
Step9: Add configuration for knowledge bank that stores dynamic embeddings
Step11: Background thread to load checkpoint and update embeddings
Step12: Build a model for training with graph regularization
Step13: Train
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!apt install curl gnupg
!curl https://bazel.build/bazel-release.pub.gpg | apt-key add -
!echo "deb [arch=amd64] https://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list
!apt update && apt install bazel-3.1.0
!pip install neural-structured-learning
!pip install protobuf==3.9.2
# Clear dir and download neural structured learning codebase
!rm -rf nsl > /dev/null
!git clone https://github.com/tensorflow/neural-structured-learning.git nsl
!cd /content/nsl && bash neural_structured_learning/examples/preprocess/cora/prep_data.sh
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
from absl import app
from absl import flags
from absl import logging
import attr
import tensorflow as tf
from google.protobuf import text_format
NBR_FEATURE_PREFIX = 'NL_nbr_'
NBR_WEIGHT_SUFFIX = '_weight'
@attr.s
class HParams(object):
### dataset parameters
num_classes = attr.ib(default=7)
max_seq_length = attr.ib(default=1433)
### NGM parameters
graph_regularization_multiplier = attr.ib(default=0.1)
num_neighbors = attr.ib(default=1)
### model architecture
num_fc_units = attr.ib(default=[50, 50])
### training parameters
train_epochs = attr.ib(default=10)
batch_size = attr.ib(default=128)
dropout_rate = attr.ib(default=0.5)
### eval parameters
eval_steps = attr.ib(default=None) # Every test instance is evaluated.
hparams = HParams()
NBR_FEATURE_PREFIX = 'NL_nbr_'
NBR_WEIGHT_SUFFIX = '_weight'
def make_dataset(file_path, training, include_nbr_features, hparams):
Returns a `tf.data.Dataset` instance based on data in `file_path`.
def parse_example(example_proto):
Extracts relevant fields from the `example_proto`.
Args:
example_proto: An instance of `tf.train.Example`.
Returns:
A pair whose first value is a dictionary containing relevant features
and whose second value contains the ground truth labels.
# The 'words' feature is a multi-hot, bag-of-words representation of the
# original raw text. A default value is required for examples that don't
# have the feature.
feature_spec = {
'id':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'words':
tf.io.FixedLenFeature([hparams.max_seq_length],
tf.int64,
default_value=tf.constant(
0,
dtype=tf.int64,
shape=[hparams.max_seq_length])),
'label':
tf.io.FixedLenFeature((), tf.int64, default_value=-1),
}
if include_nbr_features:
for i in range(hparams.num_neighbors):
nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'words')
nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, i,
NBR_WEIGHT_SUFFIX)
nbr_id_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'id')
feature_spec[nbr_feature_key] = tf.io.FixedLenFeature(
[hparams.max_seq_length],
tf.int64,
default_value=tf.constant(
0, dtype=tf.int64, shape=[hparams.max_seq_length]))
feature_spec[nbr_weight_key] = tf.io.FixedLenFeature(
[1], tf.float32, default_value=tf.constant([0.0]))
feature_spec[nbr_id_key] = tf.io.FixedLenFeature(
(), tf.string, default_value='')
features = tf.io.parse_single_example(example_proto, feature_spec)
labels = features.pop('label')
return features, labels
# If the dataset is sharded, the following code may be required:
# filenames = tf.data.Dataset.list_files(file_path, shuffle=True)
# dataset = filenames.interleave(load_dataset, cycle_length=1)
dataset = tf.data.TFRecordDataset([file_path])
if training:
dataset = dataset.shuffle(10000)
dataset = dataset.map(parse_example)
dataset = dataset.batch(hparams.batch_size)
return dataset
train_dataset = make_dataset('/tmp/cora/train_merged_examples.tfr', True, True, hparams)
test_dataset = make_dataset('/tmp/cora/test_examples.tfr', False, False, hparams)
print(train_dataset)
!cd /content/nsl && bazel-3.1.0 build research/carls/examples/graph_regularization:graph_keras_mlp_cora
%cd /content/nsl/bazel-bin/research/carls/examples/graph_regularization/graph_keras_mlp_cora.runfiles/org_tensorflow_neural_structured_learning
import neural_structured_learning as nsl
from research.carls import dynamic_embedding_config_pb2 as de_config_pb2
from research.carls import dynamic_embedding_neighbor_cache as de_nb_cache
from research.carls import graph_regularization
from research.carls import kbs_server_helper_pybind as kbs_server_helper
def build_model(hparams):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.InputLayer(
input_shape=(hparams.max_seq_length,), name='words'))
# Input is already one-hot encoded in the integer format. We cast it to
# floating point format here.
model.add(
tf.keras.layers.Lambda(lambda x: tf.keras.backend.cast(x, tf.float32)))
for num_units in hparams.num_fc_units:
model.add(tf.keras.layers.Dense(num_units, activation='relu'))
model.add(tf.keras.layers.Dropout(hparams.dropout_rate))
model.add(tf.keras.layers.Dense(hparams.num_classes, activation='softmax'))
return model
def train_and_evaluate(model, path, train_dataset, test_dataset, hparams):
Compiles, trains, and evaluates a `Keras` model.
Args:
model: An instance of `tf.Keras.Model`.
path: The path of a checkpoint that is used to update embeddings.
train_dataset: An instance of `tf.data.Dataset` representing training data.
test_dataset: An instance of `tf.data.Dataset` representing test data.
hparams: An instance of `Hparams`.
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
callbacks = []
if path:
callbacks = [tf.keras.callbacks.ModelCheckpoint(
'/tmp/carls/output/weights-{epoch:02d}',
save_weights_only=True,
save_freq='epoch'
)]
model.fit(
train_dataset,
epochs=hparams.train_epochs,
verbose=1,
callbacks=callbacks)
eval_results = dict(
zip(model.metrics_names,
model.evaluate(test_dataset, steps=hparams.eval_steps)))
base_model = build_model(hparams)
train_and_evaluate(base_model, None, train_dataset, test_dataset, hparams)
de_config = text_format.Parse(
embedding_dimension: %d
knowledge_bank_config {
initializer {
random_uniform_initializer {
low: -0.5
high: 0.5
}
use_deterministic_seed: true
}
extension {
[type.googleapis.com/carls.InProtoKnowledgeBankConfig] {}
}
}
% hparams.num_classes, de_config_pb2.DynamicEmbeddingConfig())
options = kbs_server_helper.KnowledgeBankServiceOptions(True, -1, 10)
kbs_server = kbs_server_helper.KbsServerHelper(options)
kbs_address = 'localhost:%d' % kbs_server.port()
client = de_nb_cache.DynamicEmbeddingNeighborCache(
'id', de_config, kbs_address, timeout_ms=10 * 1000)
print(kbs_address)
def update_embeddings(
stop_event, model_path, model, neighbor_cache_client, dataset):
Updates embeddings in knowledge bank server.
This runs in a background thread, loads the latest unused checkpoint,
then runs inference on all available data, updates corresponding embeddings
in the knowledge bank server.
Args:
stop_event: `threading.Event` object used to stop updating the embeddings.
model_path: path to the saved model.
model: `GraphRegularizationWithCaching` object for inference.
neighbor_cache_client: `NeighborCacheClient` object to update embeddings.
dataset: dataset for inference to update.
logging.info('Start embedding updates')
last_used_ckpt_path = None
def update_embedding_fn(features, label):
if not stop_event.wait(0.01):
neighbor_cache_client.update(
features['id'], model.base_model(features))
return features, label
# Keep updating until stop_event set to True.
while not stop_event.wait(0.1):
# Keep waiting to load the latest checkpoint until it exists.
while not stop_event.wait(0.1):
try:
latest_ckpt_path = tf.train.latest_checkpoint(model_path)
if last_used_ckpt_path != latest_ckpt_path:
# No new checkpoint since the last update.
last_used_ckpt_path = latest_ckpt_path
model.load_weights(latest_ckpt_path)
break
except tf.errors.NotFoundError:
pass
# Run inference on the dataset and update embeddings.
dataset.map(update_embedding_fn)
logging.info('Finished embedding updates')
graph_reg_config = nsl.configs.make_graph_reg_config(
max_neighbors=hparams.num_neighbors,
multiplier=hparams.graph_regularization_multiplier,
distance_type=nsl.configs.DistanceType.L2,
sum_over_axis=-1)
graph_reg_model = graph_regularization.GraphRegularizationWithCaching(
base_model, graph_reg_config, client)
stop_event = threading.Event()
base_inference_model = build_model(hparams)
graph_inference_model = graph_regularization.GraphRegularizationWithCaching(
base_inference_model, graph_reg_config, client)
update_thread = threading.Thread(
target=update_embeddings,
args=(stop_event, '/tmp/carls/output',
graph_inference_model, client, train_dataset))
update_thread.daemon = True
update_thread.start()
train_and_evaluate(graph_reg_model, '/tmp/carls/output',
train_dataset, test_dataset, hparams)
stop_event.set()
kbs_server.Terminate()
update_thread.join()
!rm -rf /tmp/carls
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Function Decorators
Step2: !!! Order Matters !!!
Step3: Decorators with arguments
Step4: Class Decorators
Step5: staticmethod()
Step6: When we call a static method we don’t get any additional arguments.
Step7: classmethod
|
<ASSISTANT_TASK:>
Python Code:
def bread(test_funct):
def hyderabad():
print("</''''''\>")
test_funct()
print("<\______/>")
return hyderabad
def ingredients(test_funct):
def chennai():
print("#tomatoes#")
test_funct()
print("~salad~")
return chennai
def cheese(food="--Say Cheese--"):
print(food)
ch = bread(test_funct=cheese)
ch()
inn = bread(ingredients(cheese))
inn()
@bread
@ingredients
def sandwich(food="--Say Cheese--"):
print(food)
sandwich()
@ingredients
@bread
def sandwich(food="--Say Cheese--"):
print(food)
sandwich()
@bread
@ingredients
def hotdog(food="Jam"):
print(food)
hotdog()
def diet_sandwitch(inner_func):
def inner():
print("salad")
return inner
@ingredients
@diet_sandwitch
def sandwich(food="--Say Cheese--"):
print(food)
sandwich()
def Names(test_funct):
def inner():
print("{Hello}")
print("\tA-Priya")
print("\tManish Gupta")
print("\tNeha", end="\n\t")
test_funct()
print("(/Hello}")
return inner
@Names
def print_AShanti():
print("A-Shanti")
print_AShanti()
class A(object):
def method(*argv):
return argv
a = A()
a.method
a.method('an arg')
class A(object):
@staticmethod
def method(*argv):
return argv
a = A()
a.method
a.method('an arg')
class A(object):
@classmethod
def method(*argv):
return argv
a = A()
a.method
a.method('an arg')
def test(strg):
print("Name: ", strg)
def hello(func, name):
print("Ja")
func(name)
hello(test, "Mayank")
class B(object):
@classmethod
def method(*argv):
return argv
a = B()
a.method()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will now create the multi-group library using data directly from Appendix A of the C5G7 benchmark documentation. All of the data below will be created at 294K, consistent with the benchmark.
Step2: We will now add the scattering matrix data.
Step3: Now that the UO2 data has been created, we can move on to the remaining materials using the same process.
Step4: Generate 2-D C5G7 Problem Input Files
Step5: Now we can go ahead and produce a materials.xml file for use by OpenMC
Step6: Our next step will be to create the geometry information needed for our assembly and to write that to the geometry.xml file.
Step7: The next step is to take our universes (representing the different pin types) and lay them out in a lattice to represent the assembly types
Step8: Let's now create the core layout in a 3x3 lattice where each lattice position is one of the assemblies we just defined.
Step9: Before we commit to the geometry, we should view it using the Python API's plotting capability
Step10: OK, it looks pretty good, let's go ahead and write the file
Step11: We can now create the tally file information. The tallies will be set up to give us the pin powers in this notebook. We will do this with a mesh filter, with one mesh cell per pin.
Step12: With the geometry and materials finished, we now just need to define simulation parameters for the settings.xml file. Note the use of the energy_mode attribute of our settings_file object. This is used to tell OpenMC that we intend to run in multi-group mode instead of the default continuous-energy mode. If we didn't specify this but our cross sections file was not a continuous-energy data set, then OpenMC would complain.
Step13: Let's go ahead and execute the simulation! You'll notice that the output for multi-group mode is exactly the same as for continuous-energy. The differences are all under the hood.
Step14: Results Visualization
|
<ASSISTANT_TASK:>
Python Code:
import os
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import openmc
%matplotlib inline
# Create a 7-group structure with arbitrary boundaries (the specific boundaries are unimportant)
groups = openmc.mgxs.EnergyGroups(np.logspace(-5, 7, 8))
uo2_xsdata = openmc.XSdata('uo2', groups)
uo2_xsdata.order = 0
# When setting the data let the object know you are setting the data for a temperature of 294K.
uo2_xsdata.set_total([1.77949E-1, 3.29805E-1, 4.80388E-1, 5.54367E-1,
3.11801E-1, 3.95168E-1, 5.64406E-1], temperature=294.)
uo2_xsdata.set_absorption([8.0248E-03, 3.7174E-3, 2.6769E-2, 9.6236E-2,
3.0020E-02, 1.1126E-1, 2.8278E-1], temperature=294.)
uo2_xsdata.set_fission([7.21206E-3, 8.19301E-4, 6.45320E-3, 1.85648E-2,
1.78084E-2, 8.30348E-2, 2.16004E-1], temperature=294.)
uo2_xsdata.set_nu_fission([2.005998E-2, 2.027303E-3, 1.570599E-2, 4.518301E-2,
4.334208E-2, 2.020901E-1, 5.257105E-1], temperature=294.)
uo2_xsdata.set_chi([5.87910E-1, 4.11760E-1, 3.39060E-4, 1.17610E-7,
0.00000E-0, 0.00000E-0, 0.00000E-0], temperature=294.)
# The scattering matrix is ordered with incoming groups as rows and outgoing groups as columns
# (i.e., below the diagonal is up-scattering).
scatter_matrix = \
[[[1.27537E-1, 4.23780E-2, 9.43740E-6, 5.51630E-9, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 3.24456E-1, 1.63140E-3, 3.14270E-9, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 4.50940E-1, 2.67920E-3, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 4.52565E-1, 5.56640E-3, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 1.25250E-4, 2.71401E-1, 1.02550E-2, 1.00210E-8],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 1.29680E-3, 2.65802E-1, 1.68090E-2],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 8.54580E-3, 2.73080E-1]]]
scatter_matrix = np.array(scatter_matrix)
scatter_matrix = np.rollaxis(scatter_matrix, 0, 3)
uo2_xsdata.set_scatter_matrix(scatter_matrix, temperature=294.)
# Initialize the library
mg_cross_sections_file = openmc.MGXSLibrary(groups)
# Add the UO2 data to it
mg_cross_sections_file.add_xsdata(uo2_xsdata)
# And write to disk
mg_cross_sections_file.export_to_hdf5('mgxs.h5')
# For every cross section data set in the library, assign an openmc.Macroscopic object to a material
materials = {}
for xs in ['uo2', 'mox43', 'mox7', 'mox87', 'fiss_chamber', 'guide_tube', 'water']:
materials[xs] = openmc.Material(name=xs)
materials[xs].set_density('macro', 1.)
materials[xs].add_macroscopic(xs)
# Instantiate a Materials collection, register all Materials, and export to XML
materials_file = openmc.Materials(materials.values())
# Set the location of the cross sections file to our pre-written set
materials_file.cross_sections = 'c5g7.h5'
materials_file.export_to_xml()
# Create the surface used for each pin
pin_surf = openmc.ZCylinder(x0=0, y0=0, R=0.54, name='pin_surf')
# Create the cells which will be used to represent each pin type.
cells = {}
universes = {}
for material in materials.values():
# Create the cell for the material inside the cladding
cells[material.name] = openmc.Cell(name=material.name)
# Assign the half-spaces to the cell
cells[material.name].region = -pin_surf
# Register the material with this cell
cells[material.name].fill = material
# Repeat the above for the material outside the cladding (i.e., the moderator)
cell_name = material.name + '_moderator'
cells[cell_name] = openmc.Cell(name=cell_name)
cells[cell_name].region = +pin_surf
cells[cell_name].fill = materials['water']
# Finally add the two cells we just made to a Universe object
universes[material.name] = openmc.Universe(name=material.name)
universes[material.name].add_cells([cells[material.name], cells[cell_name]])
lattices = {}
# Instantiate the UO2 Lattice
lattices['UO2 Assembly'] = openmc.RectLattice(name='UO2 Assembly')
lattices['UO2 Assembly'].dimension = [17, 17]
lattices['UO2 Assembly'].lower_left = [-10.71, -10.71]
lattices['UO2 Assembly'].pitch = [1.26, 1.26]
u = universes['uo2']
g = universes['guide_tube']
f = universes['fiss_chamber']
lattices['UO2 Assembly'].universes = \
[[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, f, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u]]
# Create a containing cell and universe
cells['UO2 Assembly'] = openmc.Cell(name='UO2 Assembly')
cells['UO2 Assembly'].fill = lattices['UO2 Assembly']
universes['UO2 Assembly'] = openmc.Universe(name='UO2 Assembly')
universes['UO2 Assembly'].add_cell(cells['UO2 Assembly'])
# Instantiate the MOX Lattice
lattices['MOX Assembly'] = openmc.RectLattice(name='MOX Assembly')
lattices['MOX Assembly'].dimension = [17, 17]
lattices['MOX Assembly'].lower_left = [-10.71, -10.71]
lattices['MOX Assembly'].pitch = [1.26, 1.26]
m = universes['mox43']
n = universes['mox7']
o = universes['mox87']
g = universes['guide_tube']
f = universes['fiss_chamber']
lattices['MOX Assembly'].universes = \
[[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m],
[m, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, m],
[m, n, n, n, n, g, n, n, g, n, n, g, n, n, n, n, m],
[m, n, n, g, n, o, o, o, o, o, o, o, n, g, n, n, m],
[m, n, n, n, o, o, o, o, o, o, o, o, o, n, n, n, m],
[m, n, g, o, o, g, o, o, g, o, o, g, o, o, g, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, g, o, o, g, o, o, f, o, o, g, o, o, g, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, g, o, o, g, o, o, g, o, o, g, o, o, g, n, m],
[m, n, n, n, o, o, o, o, o, o, o, o, o, n, n, n, m],
[m, n, n, g, n, o, o, o, o, o, o, o, n, g, n, n, m],
[m, n, n, n, n, g, n, n, g, n, n, g, n, n, n, n, m],
[m, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, m],
[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m]]
# Create a containing cell and universe
cells['MOX Assembly'] = openmc.Cell(name='MOX Assembly')
cells['MOX Assembly'].fill = lattices['MOX Assembly']
universes['MOX Assembly'] = openmc.Universe(name='MOX Assembly')
universes['MOX Assembly'].add_cell(cells['MOX Assembly'])
# Instantiate the reflector Lattice
lattices['Reflector Assembly'] = openmc.RectLattice(name='Reflector Assembly')
lattices['Reflector Assembly'].dimension = [1,1]
lattices['Reflector Assembly'].lower_left = [-10.71, -10.71]
lattices['Reflector Assembly'].pitch = [21.42, 21.42]
lattices['Reflector Assembly'].universes = [[universes['water']]]
# Create a containing cell and universe
cells['Reflector Assembly'] = openmc.Cell(name='Reflector Assembly')
cells['Reflector Assembly'].fill = lattices['Reflector Assembly']
universes['Reflector Assembly'] = openmc.Universe(name='Reflector Assembly')
universes['Reflector Assembly'].add_cell(cells['Reflector Assembly'])
lattices['Core'] = openmc.RectLattice(name='3x3 core lattice')
lattices['Core'].dimension= [3, 3]
lattices['Core'].lower_left = [-32.13, -32.13]
lattices['Core'].pitch = [21.42, 21.42]
r = universes['Reflector Assembly']
u = universes['UO2 Assembly']
m = universes['MOX Assembly']
lattices['Core'].universes = [[u, m, r],
[m, u, r],
[r, r, r]]
# Create boundary planes to surround the geometry
min_x = openmc.XPlane(x0=-32.13, boundary_type='reflective')
max_x = openmc.XPlane(x0=+32.13, boundary_type='vacuum')
min_y = openmc.YPlane(y0=-32.13, boundary_type='vacuum')
max_y = openmc.YPlane(y0=+32.13, boundary_type='reflective')
# Create root Cell
root_cell = openmc.Cell(name='root cell')
root_cell.fill = lattices['Core']
# Add boundary planes
root_cell.region = +min_x & -max_x & +min_y & -max_y
# Create root Universe
root_universe = openmc.Universe(name='root universe', universe_id=0)
root_universe.add_cell(root_cell)
root_universe.plot(origin=(0., 0., 0.), width=(3 * 21.42, 3 * 21.42), pixels=(500, 500),
color_by='material')
# Create Geometry and set root Universe
geometry = openmc.Geometry(root_universe)
# Export to "geometry.xml"
geometry.export_to_xml()
tallies_file = openmc.Tallies()
# Instantiate a tally Mesh
mesh = openmc.RegularMesh()
mesh.dimension = [17 * 2, 17 * 2]
mesh.lower_left = [-32.13, -10.71]
mesh.upper_right = [+10.71, +32.13]
# Instantiate tally Filter
mesh_filter = openmc.MeshFilter(mesh)
# Instantiate the Tally
tally = openmc.Tally(name='mesh tally')
tally.filters = [mesh_filter]
tally.scores = ['fission']
# Add tally to collection
tallies_file.append(tally)
# Export all tallies to a "tallies.xml" file
tallies_file.export_to_xml()
# OpenMC simulation parameters
batches = 150
inactive = 50
particles = 5000
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
# Tell OpenMC this is a multi-group problem
settings_file.energy_mode = 'multi-group'
# Set the verbosity to 6 so we dont see output for every batch
settings_file.verbosity = 6
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-32.13, -10.71, -1e50, 10.71, 32.13, 1e50]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.Source(space=uniform_dist)
# Tell OpenMC we want to run in eigenvalue mode
settings_file.run_mode = 'eigenvalue'
# Export to "settings.xml"
settings_file.export_to_xml()
# Run OpenMC
openmc.run()
# Load the last statepoint file and keff value
sp = openmc.StatePoint('statepoint.' + str(batches) + '.h5')
# Get the OpenMC pin power tally data
mesh_tally = sp.get_tally(name='mesh tally')
fission_rates = mesh_tally.get_values(scores=['fission'])
# Reshape array to 2D for plotting
fission_rates.shape = mesh.dimension
# Normalize to the average pin power
fission_rates /= np.mean(fission_rates[fission_rates > 0.])
# Force zeros to be NaNs so their values are not included when matplotlib calculates
# the color scale
fission_rates[fission_rates == 0.] = np.nan
# Plot the pin powers and the fluxes
plt.figure()
plt.imshow(fission_rates, interpolation='none', cmap='jet', origin='lower')
plt.colorbar()
plt.title('Pin Powers')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As can be seen, the keys of the dictionary are the names of the voltage rails while the values are Rail objects which contain three sensors for the voltage, current and power. Due to limitations of the regulators used on the ZCU104 the resolution of most of the power rails are only to 1/8 W.
Step2: We can now use the recorder to monitor the applied sensor. For this example we'll sample the power every half second while sleeping and performing a dummy loop
Step3: The DataRecorder exposes the sensor data as a pandas dataframe
Step4: or by plotting the results using matplotlib
Step5: We can get more information by using the mark function which will increment the invocation number without having to stop and start the recorder
|
<ASSISTANT_TASK:>
Python Code:
import pynq
rails = pynq.get_rails()
rails
recorder = pynq.DataRecorder(rails['12V'].power)
import time
with recorder.record(0.5):
time.sleep(10)
for _ in range(10000000):
pass
time.sleep(10)
recorder.frame
%matplotlib inline
recorder.frame['12V_power'].plot()
recorder.reset()
with recorder.record(0.5):
time.sleep(10)
recorder.mark()
for _ in range(10000000):
pass
recorder.mark()
time.sleep(10)
recorder.frame.plot(subplots=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-1', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load software and filenames definitions
Step2: Data folder
Step3: List of data files
Step4: Data load
Step5: Load the leakage coefficient from disk
Step6: Load the direct excitation coefficient ($d_{exAA}$) from disk
Step7: Load the gamma-factor ($\gamma$) from disk
Step8: Update d with the correction coefficients
Step9: Laser alternation selection
Step10: We need to define some parameters
Step11: We should check if everithing is OK with an alternation histogram
Step12: If the plot looks good we can apply the parameters with
Step13: Measurements infos
Step14: Or check the measurements duration
Step15: Compute background
Step16: Burst search and selection
Step17: Donor Leakage fit
Step18: Burst sizes
Step19: Fret fit
Step20: Weighted mean of $E$ of each burst
Step21: Gaussian fit (no weights)
Step22: Gaussian fit (using burst size as weights)
Step23: Stoichiometry fit
Step24: The Maximum likelihood fit for a Gaussian population is the mean
Step25: Computing the weighted mean and weighted standard deviation we get
Step26: Save data to file
Step27: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
Step28: This is just a trick to format the different variables
|
<ASSISTANT_TASK:>
Python Code:
ph_sel_name = "None"
data_id = "7d"
# data_id = "7d"
from fretbursts import *
init_notebook()
from IPython.display import display
data_dir = './data/singlespot/'
import os
data_dir = os.path.abspath(data_dir) + '/'
assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
from glob import glob
file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)
## Selection for POLIMI 2012-11-26 datatset
labels = ['17d', '27d', '7d', '12d', '22d']
files_dict = {lab: fname for lab, fname in zip(labels, file_list)}
files_dict
data_id
d = loader.photon_hdf5(filename=files_dict[data_id])
leakage_coeff_fname = 'results/usALEX - leakage coefficient DexDem.csv'
leakage = np.loadtxt(leakage_coeff_fname)
print('Leakage coefficient:', leakage)
dir_ex_coeff_fname = 'results/usALEX - direct excitation coefficient dir_ex_aa.csv'
dir_ex_aa = np.loadtxt(dir_ex_coeff_fname)
print('Direct excitation coefficient (dir_ex_aa):', dir_ex_aa)
gamma_fname = 'results/usALEX - gamma factor - all-ph.csv'
gamma = np.loadtxt(gamma_fname)
print('Gamma-factor:', gamma)
d.leakage = leakage
d.dir_ex = dir_ex_aa
d.gamma = gamma
d.ph_times_t[0][:3], d.ph_times_t[0][-3:]#, d.det_t
print('First and last timestamps: {:10,} {:10,}'.format(d.ph_times_t[0][0], d.ph_times_t[0][-1]))
print('Total number of timestamps: {:10,}'.format(d.ph_times_t[0].size))
d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)
plot_alternation_hist(d)
loader.alex_apply_period(d)
print('D+A photons in D-excitation period: {:10,}'.format(d.D_ex[0].sum()))
print('D+A photons in A-excitation period: {:10,}'.format(d.A_ex[0].sum()))
d
d.time_max
d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)
dplot(d, timetrace_bg)
d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa
d.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel('all'))
print(d.ph_sel)
dplot(d, hist_fret);
# if data_id in ['7d', '27d']:
# ds = d.select_bursts(select_bursts.size, th1=20)
# else:
# ds = d.select_bursts(select_bursts.size, th1=30)
ds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)
n_bursts_all = ds.num_bursts[0]
def select_and_plot_ES(fret_sel, do_sel):
ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)
ds_do = ds.select_bursts(select_bursts.ES, **do_sel)
bpl.plot_ES_selection(ax, **fret_sel)
bpl.plot_ES_selection(ax, **do_sel)
return ds_fret, ds_do
ax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)
if data_id == '7d':
fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)
do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '12d':
fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '17d':
fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '22d':
fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '27d':
fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
n_bursts_do = ds_do.num_bursts[0]
n_bursts_fret = ds_fret.num_bursts[0]
n_bursts_do, n_bursts_fret
d_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)
print('D-only fraction:', d_only_frac)
dplot(ds_fret, hist2d_alex, scatter_alpha=0.1);
dplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);
bandwidth = 0.03
E_range_do = (-0.1, 0.15)
E_ax = np.r_[-0.2:0.401:0.0002]
E_pr_do_kde = bext.fit_bursts_kde_peak(ds_do, bandwidth=bandwidth, weights='size',
x_range=E_range_do, x_ax=E_ax, save_fitter=True)
mfit.plot_mfit(ds_do.E_fitter, plot_kde=True, bins=np.r_[E_ax.min(): E_ax.max(): bandwidth])
plt.xlim(-0.3, 0.5)
print("%s: E_peak = %.2f%%" % (ds.ph_sel, E_pr_do_kde*100))
nt_th1 = 50
dplot(ds_fret, hist_size, which='all', add_naa=False)
xlim(-0, 250)
plt.axvline(nt_th1)
Th_nt = np.arange(35, 120)
nt_th = np.zeros(Th_nt.size)
for i, th in enumerate(Th_nt):
ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)
nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th
plt.figure()
plot(Th_nt, nt_th)
plt.axvline(nt_th1)
nt_mean = nt_th[np.where(Th_nt == nt_th1)][0]
nt_mean
E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')
E_fitter = ds_fret.E_fitter
E_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
E_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))
E_fitter.fit_res[0].params.pretty_print()
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(E_fitter, ax=ax[0])
mfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))
display(E_fitter.params*100)
ds_fret.fit_E_m(weights='size')
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')
E_kde_w = E_fitter.kde_max_pos[0]
E_gauss_w = E_fitter.params.loc[0, 'center']
E_gauss_w_sig = E_fitter.params.loc[0, 'sigma']
E_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))
E_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr
E_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr
S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)
S_fitter = ds_fret.S_fitter
S_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
S_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(S_fitter, ax=ax[0])
mfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))
display(S_fitter.params*100)
S_kde = S_fitter.kde_max_pos[0]
S_gauss = S_fitter.params.loc[0, 'center']
S_gauss_sig = S_fitter.params.loc[0, 'sigma']
S_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))
S_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr
S_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr
S = ds_fret.S[0]
S_ml_fit = (S.mean(), S.std())
S_ml_fit
weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)
S_mean = np.dot(weights, S)/weights.sum()
S_std_dev = np.sqrt(
np.dot(weights, (S - S_mean)**2)/weights.sum())
S_wmean_fit = [S_mean, S_std_dev]
S_wmean_fit
sample = data_id
variables = ('sample n_bursts_all n_bursts_do n_bursts_fret '
'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '
'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '
'E_pr_do_kde nt_mean\n')
variables_csv = variables.replace(' ', ',')
fmt_float = '{%s:.6f}'
fmt_int = '{%s:d}'
fmt_str = '{%s}'
fmt_dict = {**{'sample': fmt_str},
**{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}
var_dict = {name: eval(name) for name in variables.split()}
var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n'
data_str = var_fmt.format(**var_dict)
print(variables_csv)
print(data_str)
# NOTE: The file name should be the notebook name but with .csv extension
with open('results/usALEX-5samples-E-corrected-all-ph.csv', 'a') as f:
f.seek(0, 2)
if f.tell() == 0:
f.write(variables_csv)
f.write(data_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualizing the MNIST data
Step3: Building a Feed-Forward Neural Network
Step5: Now let's use this to construct a linear softmax classifier as before, which we will expand into a near state-of-the-art feed-forward model for MNIST. We first create an abstract BaseSoftmaxClassifier base class that houses common functionality between the models. Each specific model will then provide a build_model method that represents the logic of that specific model.
Step6: If we wanted to reimplement the linear softmax classifier from before, we just need to override build_model() to perform one projection from the input to the output logits, like this
Step8: In order to build a deeper model, let's add several layers with multiple transformations and rectied linear units (relus)
Step9: We can now create a linear model, i.e. a 784-10 architecture (note that there is only one possible linear model going from 784 inputs to 10 outputs), as follows
Step10: Compute the gradients
Step11: BACKPROP
Step13: Ppphhhhhheeeeewwwww, ok, come up for a breather. We know. This takes a while to wrap your head around. For now, just try to get the high-level picture! Depending on your background, this may well be the toughest part of the week.
Step16: Training deep neural networks (10min)
Step17: Wrapping everything together and verifying that it works (10min)
Step18: Instead of just training and checking that the loss goes down, it is usually a good idea to try to overfit a small subset of your training data. We will do this below by training a 1 hidden layer network on a subset of the MNIST training data, by setting the train_only_on_fraction training hyperparameter to 0.05 (i.e. 5%). We turn off early stopping for this. The following diagram illustrates the difference between under-fitting and over-fitting. Note that the diagram is idealised and it's not always this clear in practice!
Step19: Above we plot the training loss vs the validation loss and the training accuracy vs the validation accuracy on only 5% (train_only_on_fraction=5e-3) of the training data (so that it doesn't take too long, and also so that our model can overfit easier). We see that the loss is coming down and the accuracies are going up, as expected! By training on a small subset of the training data, we established that
Step20: ~91% is quite a bad score on MNIST! Now let's build a deeper model, to improve on that score.
Step21: 97.7% is a much more decent score! The 1 hidden layer model gives a much better score than the linear model, so let's see if we can do better by adding another layer!
Step22: You should get around 97.4%. Shouldn't deeper do better?! Why is it that the 2-hidden layer model
Step23: We will play around with the strength of the L2 regularization. Let's use SGD+Momentum, and run for a fixed budget of 20 epochs
Step24: Notice the huuuuge variance in the test accuracies! This is just a toy run, but hopefully it illustrates how important choosing the right architectures and training hyperparameters is to getting good results in deep learning.
|
<ASSISTANT_TASK:>
Python Code:
# Import TensorFlow and some other libraries we'll be using.
import datetime
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Download the MNIST dataset onto the local machine.
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
from matplotlib import pyplot as plt
plt.ioff()
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Helper plotting routine.
def display_images(gens, title=""):
fig, axs = plt.subplots(1, 10, figsize=(25, 3))
fig.suptitle(title, fontsize=14, fontweight='bold')
for i in xrange(10):
reshaped_img = (gens[i].reshape(28, 28) * 255).astype(np.uint8)
axs.flat[i].imshow(reshaped_img)
#axs.flat[i].axis('off')
return fig, axs
batch_xs, batch_ys = mnist.train.next_batch(10)
list_of_images = np.split(batch_xs, 10)
_ = display_images(list_of_images, "Some Examples from the Training Set.")
plt.show()
def _dense_linear_layer(inputs, layer_name, input_size, output_size):
Builds a layer that takes a batch of inputs of size `input_size` and returns
a batch of outputs of size `output_size`.
Args:
inputs: A `Tensor` of shape [batch_size, input_size].
layer_name: A string representing the name of the layer.
input_size: The size of the inputs
output_size: The size of the outputs
Returns:
out, weights: tuple of layer outputs and weights.
# Name scopes allow us to logically group together related variables.
# Setting reuse=False avoids accidental reuse of variables between different runs.
with tf.variable_scope(layer_name, reuse=False):
# Create the weights for the layer
layer_weights = tf.get_variable("weights",
shape=[input_size, output_size],
dtype=tf.float32,
initializer=tf.random_normal_initializer())
# Create the biases for the layer
layer_bias = tf.get_variable("biases",
shape=[output_size],
dtype=tf.float32,
initializer=tf.random_normal_initializer())
## IMPLEMENT-ME: ...
outputs = tf.matmul(inputs, layer_weights) + layer_bias
return (outputs, layer_weights)
class BaseSoftmaxClassifier(object):
def __init__(self, input_size, output_size, l2_lambda):
# Define the input placeholders. The "None" dimension means that the
# placeholder can take any number of images as the batch size.
self.x = tf.placeholder(tf.float32, [None, input_size])
self.y = tf.placeholder(tf.float32, [None, output_size])
self.input_size = input_size
self.output_size = output_size
self.l2_lambda = l2_lambda
self._all_weights = [] # Used to compute L2 regularization in compute_loss().
# You should override these in your build_model() function.
self.logits = None
self.predictions = None
self.loss = None
self.build_model()
def get_logits(self):
return self.logits
def build_model(self):
# OVERRIDE THIS FOR YOUR PARTICULAR MODEL.
raise NotImplementedError("Subclasses should implement this function!")
def compute_loss(self):
All models share the same softmax cross-entropy loss.
assert self.logits is not None # Ensure that logits has been created!
# IMPLEMENT-ME: ...
data_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y))
# IMPLEMENT-ME: ...
reg_loss = 0.
for w in self._all_weights:
reg_loss += tf.nn.l2_loss(w)
return data_loss + self.l2_lambda * reg_loss
def accuracy(self):
# Calculate accuracy.
assert self.predictions is not None # Ensure that pred has been created!
correct_prediction = tf.equal(tf.argmax(self.predictions, 1), tf.argmax(self.y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
return accuracy
class LinearSoftmaxClassifier(BaseSoftmaxClassifier):
def __init__(self, intput_size, output_size, l2_lambda):
super(LinearSoftmaxClassifier, self).__init__(input_size, output_size, l2_lambda)
def build_model(self):
# The model takes x as input and produces output_size outputs.
self.logits, weights = _dense_linear_layer(
self.x, "linear_layer", self.input_size, self.output_size)
self._all_weights.append(weights)
# IMPLEMENT-ME: ...
self.predictions = tf.nn.softmax(self.logits)
self.loss = self.compute_loss()
class DNNClassifier(BaseSoftmaxClassifier):
DNN = Deep Neural Network - now we're doing Deep Learning! :)
def __init__(self,
input_size=784, # There are 28x28 = 784 pixels in MNIST images
hidden_sizes=[], # List of hidden layer dimensions, empty for linear model.
output_size=10, # There are 10 possible digit classes
act_fn=tf.nn.relu, # The activation function to use in the hidden layers
l2_lambda=0.): # The strength of regularisation, off by default.
self.hidden_sizes = hidden_sizes
self.act_fn = act_fn
super(DNNClassifier, self).__init__(input_size, output_size, l2_lambda)
def build_model(self):
prev_layer = self.x
prev_size = self.input_size
for layer_num, size in enumerate(self.hidden_sizes):
layer_name = "layer_" + str(layer_num)
## IMPLEMENT-ME: ...
layer, weights = _dense_linear_layer(prev_layer, layer_name, prev_size, size)
self._all_weights.append(weights)
## IMPLEMENT-ME: ...
layer = self.act_fn(layer)
prev_layer, prev_size = layer, size
# The final layer is our predictions and goes from prev_size inputs to
# output_size outputs. The outputs are "logits", un-normalised scores.
self.logits, out_weights = _dense_linear_layer(prev_layer, "output", prev_size, self.output_size)
self._all_weights.append(out_weights)
self.predictions = tf.nn.softmax(self.logits)
self.loss = self.compute_loss()
tf.set_random_seed(1234)
np.random.seed(1234)
# Generate a batch of 100 "images" of 784 pixels consisting of Gaussian noise.
x_rnd = np.random.randn(100, 784)
print "Sample of random data:\n", x_rnd[:5,:] # Print the first 5 "images"
print "Shape: ", x_rnd.shape
# Generate some random one-hot labels.
y_rnd = np.eye(10)[np.random.choice(10, 100)]
print "Sample of random labels:\n", y_rnd[:5,:]
print "Shape: ", y_rnd.shape
# Model without regularization.
tf.reset_default_graph()
tf_linear_model = DNNClassifier(l2_lambda=0.0)
x, y = tf_linear_model.x, tf_linear_model.y
with tf.Session() as sess:
# Initialize variables.
init = tf.global_variables_initializer()
sess.run(init)
avg_cross_entropy = -tf.log(tf.reduce_mean(tf_linear_model.predictions))
loss_no_reg = tf_linear_model.loss
manual_avg_xent, loss_no_reg = sess.run([avg_cross_entropy, loss_no_reg],
feed_dict={x : x_rnd, y: y_rnd})
# Sanity check: Loss should be about log(10) = 2.3026
print '\nSanity check manual avg cross entropy: ', manual_avg_xent
print 'Model loss (no reg): ', loss_no_reg
# Model with regularization.
tf.reset_default_graph()
tf_linear_model = DNNClassifier(l2_lambda=1.0)
x, y = tf_linear_model.x, tf_linear_model.y
with tf.Session() as sess:
# Initialize variables.
init = tf.global_variables_initializer()
sess.run(init)
loss_w_reg = tf_linear_model.loss.eval(feed_dict={x : x_rnd, y: y_rnd})
# Sanity check: Loss should go up when you add regularization
print 'Sanity check loss (with regularization, should be higher): ', loss_w_reg
# PSEUDOCODE:
def fprop(x, weights, biases, per_layer_nonlinearities):
# Initialise the input. We pretend inputs are the first pre- and post-activations.
z = a = x
cache = [(z, a)] # We'll save z's and a's for the backprop phase.
for W, b, act_fn in zip(weights, biases, per_layer_nonlinearities):
z = np.dot(W, a) + b # "pre-activations" / logits
a = act_fn(z) # "outputs" / (post-)activations of the current layer
# NOTE: We save both pre-activations and (post-)activations for the backwards phase!
cache.append((z, a)
return cache # Per-layer pre- and post-activations.
# PSEUDOCODE
def backprop(target, fprop_cache, weights):
# Pop/remove the model prediction (last activation `a` we computed above)
# off the cache we created during the fprop phase.
(_, pred) = fprop_cache.pop()
# Intialise delta^{L} (at the output layer) as dE/dz (cross-entropy).
delta_above = (target - pred)
grads = []
# Unroll backwards from the output:
for (z_below, a_below), W_between in reversed(zip(fprop_cache, weights)):
# Compute dE/dW:
Wgrad = np.dot(delta_above, a_below.T) # Outer product
# Compute dE/db:
bgrad = delta_above
# Save these:
grads.append((Wgrad, bgrad))
# Update for the *next* iteration/layer. Note the elem-wise multiplication.
# Note the use of z_below, the preactivations in the layer below!
# delta^i = delta^{(i+1)}.(W^i)^T .* sigma'(z_i):
delta_above = np.dot(delta_above, W_between.T) * dsigmoid(z_below)
grads.reverse()
return grads
# PSEUDOCODE:
biases, weights = [b1, b2], [W1, W2]
x, y = ..., ...
non_linearities = [relu, softmax]
fprop_cache = fprop(x, weights, biases, non_linearities)
grads = backprop(y, fprop_cache, weights)
class MNISTFraction(object):
A helper class to extract only a fixed fraction of MNIST data.
def __init__(self, mnist, fraction):
self.mnist = mnist
self.num_images = int(mnist.num_examples * fraction)
self.image_data, self.label_data = mnist.images[:self.num_images], mnist.labels[:self.num_images]
self.start = 0
def next_batch(self, batch_size):
start = self.start
end = min(start + batch_size, self.num_images)
self.start = 0 if end == self.num_images else end
return self.image_data[start:end], self.label_data[start:end]
def train_tf_model(tf_model,
session, # The active session.
num_epochs, # Max epochs/iterations to train for.
batch_size=50, # Number of examples per batch.
keep_prob=1.0, # (1. - dropout) probability, none by default.
train_only_on_fraction=1., # Fraction of training data to use.
optimizer_fn=None, # TODO(sgouws): more correct to call this optimizer_obj
report_every=1, # Report training results every nr of epochs.
eval_every=1, # Evaluate on validation data every nr of epochs.
stop_early=True, # Use early stopping or not.
verbose=True):
# Get the (symbolic) model input, output, loss and accuracy.
x, y = tf_model.x, tf_model.y
loss = tf_model.loss
accuracy = tf_model.accuracy()
# Compute the gradient of the loss with respect to the model parameters
# and create an op that will perform one parameter update using the specific
# optimizer's update rule in the direction of the gradients.
if optimizer_fn is None:
optimizer_fn = tf.train.AdamOptimizer()
optimizer_step = optimizer_fn.minimize(loss)
# Get the op which, when executed, will initialize the variables.
init = tf.global_variables_initializer()
# Actually initialize the variables (run the op).
session.run(init)
# Save the training loss and accuracies on training and validation data.
train_costs = []
train_accs = []
val_costs = []
val_accs = []
if train_only_on_fraction < 1:
mnist_train_data = MNISTFraction(mnist.train, train_only_on_fraction)
else:
mnist_train_data = mnist.train
prev_c_eval = 1000000
# Main training cycle.
for epoch in range(num_epochs):
avg_cost = 0.
avg_acc = 0.
total_batch = int(train_only_on_fraction * mnist.train.num_examples / batch_size)
# Loop over all batches.
for i in range(total_batch):
batch_x, batch_y = mnist_train_data.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value),
# and compute the accuracy of the model.
feed_dict = {x: batch_x, y: batch_y}
if keep_prob < 1.:
feed_dict["keep_prob:0"] = keep_prob
_, c, a = session.run(
[optimizer_step, loss, accuracy], feed_dict=feed_dict)
# Compute average loss/accuracy
avg_cost += c / total_batch
avg_acc += a / total_batch
train_costs.append((epoch, avg_cost))
train_accs.append((epoch, avg_acc))
# Display logs per epoch step
if epoch % report_every == 0 and verbose:
print "Epoch:", '%04d' % (epoch+1), "Training cost=", \
"{:.9f}".format(avg_cost)
if epoch % eval_every == 0:
val_x, val_y = mnist.validation.images, mnist.validation.labels
feed_dict = {x : val_x, y : val_y}
if keep_prob < 1.:
feed_dict['keep_prob:0'] = 1.0
c_eval, a_eval = session.run([loss, accuracy], feed_dict=feed_dict)
if verbose:
print "Epoch:", '%04d' % (epoch+1), "Validation acc=", \
"{:.9f}".format(a_eval)
if c_eval >= prev_c_eval and stop_early:
print "Validation loss stopped improving, stopping training early after %d epochs!" % (epoch + 1)
break
prev_c_eval = c_eval
val_costs.append((epoch, c_eval))
val_accs.append((epoch, a_eval))
print "Optimization Finished!"
return train_costs, train_accs, val_costs, val_accs
# Helper functions to plot training progress.
def my_plot(list_of_tuples):
Take a list of (epoch, value) and split these into lists of
epoch-only and value-only. Pass these to plot to make sure we
line up the values at the correct time-steps.
plt.plot(*zip(*list_of_tuples))
def plot_multi(values_lst, labels_lst, y_label, x_label='epoch'):
# Plot multiple curves.
assert len(values_lst) == len(labels_lst)
plt.subplot(2, 1, 2)
for v in values_lst:
my_plot(v)
plt.legend(labels_lst, loc='upper left')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
##### BUILD MODEL #####
tf.reset_default_graph() # Clear the graph.
model = DNNClassifier() # Choose model hyperparameters.
with tf.Session() as sess:
##### TRAIN MODEL #####
train_losses, train_accs, val_losses, val_accs = train_tf_model(
model,
session=sess,
num_epochs=10,
train_only_on_fraction=1e-1,
optimizer_fn=tf.train.GradientDescentOptimizer(learning_rate=1e-3),
report_every=1,
eval_every=2,
stop_early=False)
##### EVALUATE MODEL ON TEST DATA #####
# Get the op which calculates model accuracy.
accuracy_op = model.accuracy() # Get the symbolic accuracy operation
# Connect the MNIST test images and labels to the model input/output
# placeholders, and compute the accuracy given the trained parameters.
accuracy = accuracy_op.eval(feed_dict = {model.x: mnist.test.images,
model.y: mnist.test.labels})
print "Accuracy on test set:", accuracy
# NOTE THE CODE TEMPLATE BELOW WRAPS ALL OF THE ABOVE CODE, AND EXPOSES ONLY THE
# DIFFERENT HYPERPARAMETER CHOICES. MAKE SURE YOU UNDERSTAND EXACTLY HOW
# THE ABOVE CODE WORKS FIRST. TO SAVE SOME SPACE, WE WILL COPY AND MODIFY THE
# CODE BELOW TO BUILD AND TRAIN DIFFERENT MODELS IN THE REST OF THIS PRACTICAL.
# YOU CAN USE WHICHEVER VERSION YOU PREFER FOR YOUR OWN EXPERIMENTS.
# Helper to wrap building, training, evaluating and plotting model accuracy.
def build_train_eval_and_plot(build_params, train_params, verbose=True):
tf.reset_default_graph()
m = DNNClassifier(**build_params)
with tf.Session() as sess:
# Train model on the MNIST dataset.
train_losses, train_accs, val_losses, val_accs = train_tf_model(
m,
sess,
verbose=verbose,
**train_params)
# Now evaluate it on the test set:
accuracy_op = m.accuracy() # Get the symbolic accuracy operation
# Calculate the accuracy using the test images and labels.
accuracy = accuracy_op.eval({m.x: mnist.test.images,
m.y: mnist.test.labels})
if verbose:
print "Accuracy on test set:", accuracy
# Plot losses and accuracies.
plot_multi([train_losses, val_losses], ['train', 'val'], 'loss', 'epoch')
plot_multi([train_accs, val_accs], ['train', 'val'], 'accuracy', 'epoch')
ret = {'train_losses': train_losses, 'train_accs' : train_accs,
'val_losses' : val_losses, 'val_accs' : val_accs,
'test_acc' : accuracy}
return m, ret
#################################CODE TEMPLATE##################################
# Specify the model hyperparameters (NOTE: All the defaults can be omitted):
model_params = {
#'input_size' : 784, # There are 28x28 = 784 pixels in MNIST images
'hidden_sizes' : [512], # List of hidden layer dimensions, empty for linear model.
#'output_size' : 10, # There are 10 possible digit classes
#'act_fn' : tf.nn.relu, # The activation function to use in the hidden layers
'l2_lambda' : 0. # Strength of L2 regularization.
}
# Specify the training hyperparameters:
training_params = {'num_epochs' : 100, # Max epochs/iterations to train for.
#'batch_size' : 100, # Number of examples per batch, 100 default.
#'keep_prob' : 1.0, # (1. - dropout) probability, none by default.
'train_only_on_fraction' : 5e-2, # Fraction of training data to use, 1. for everything.
'optimizer_fn' : None, # Optimizer, None for Adam.
'report_every' : 1, # Report training results every nr of epochs.
'eval_every' : 2, # Evaluate on validation data every nr of epochs.
'stop_early' : False, # Use early stopping or not.
}
# Build, train, evaluate and plot the results!
trained_model, training_results = build_train_eval_and_plot(
model_params,
training_params,
verbose=True # Modify as desired.
)
###############################END CODE TEMPLATE################################
%%time
# Train the linear model on the full dataset.
################################################################################
# Specify the model hyperparameters.
model_params = {'l2_lambda' : 0.}
# Specify the training hyperparameters:
training_params = {'num_epochs' : 50, # Max epochs/iterations to train for.
'optimizer_fn' : None, # Now we're using Adam.
'report_every' : 1, # Report training results every nr of epochs.
'eval_every' : 1, # Evaluate on validation data every nr of epochs.
'stop_early' : True
}
# Build, train, evaluate and plot the results!
trained_model, training_results = build_train_eval_and_plot(
model_params,
training_params,
verbose=True # Modify as desired.
)
################################################################################
%%time
# Specify the model hyperparameters (NOTE: All the defaults can be omitted):
model_params = {
'hidden_sizes' : [512], # List of hidden layer dimensions, empty for linear model.
'l2_lambda' : 1e-3 # Strength of L2 regularization.
}
# Specify the training hyperparameters:
training_params = {
'num_epochs' : 50, # Max epochs/iterations to train for.
'report_every' : 1, # Report training results every nr of epochs.
'eval_every' : 1, # Evaluate on validation data every nr of epochs.
'stop_early' : True # Use early stopping or not.
}
# Build, train, evaluate and plot the results!
trained_model, training_results = build_train_eval_and_plot(
model_params,
training_params,
verbose=True # Modify as desired.
)
%%time
# Specify the model hyperparameters (NOTE: All the defaults can be omitted):
model_params = {
'hidden_sizes' : [512, 512], # List of hidden layer dimensions, empty for linear model.
'l2_lambda' : 1e-3 # Strength of L2 regularization.
}
# Specify the training hyperparameters:
training_params = {
'num_epochs' : 200, # Max epochs/iterations to train for.
'report_every' : 1, # Report training results every nr of epochs.
'eval_every' : 1, # Evaluate on validation data every nr of epochs.
'stop_early' : True, # Use early stopping or not.
}
# Build, train, evaluate and plot the results!
trained_model, training_results = build_train_eval_and_plot(
model_params,
training_params,
verbose=True # Modify as desired.
)
def sample_log_scale(v_min=1e-6, v_max=1.):
'''Sample uniformly on a log-scale from 10**v_min to 10**v_max.'''
return np.exp(np.random.uniform(np.log(v_min), np.log(v_max)))
def sample_model_architecture_and_hyperparams(max_num_layers=2,
lr_min=1e-6,
lr_max=1.,
mom_min=0.5,
mom_max=1.,
l2_min=1e-4,
l2_max=1.):
'''Generate a random model architecture & hyperparameters.'''
# Sample the architecture.
num_layers = np.random.choice(range(1, max_num_layers+1))
hidden_sizes = []
layer_ranges=[128, 256]
for l in range(num_layers):
hidden_sizes.append(np.random.choice(layer_ranges))
# Sample the training parameters.
l2_lambda = sample_log_scale(l2_min, l2_max)
lr = sample_log_scale(lr_min, lr_max)
mom_coeff = sample_log_scale(mom_min, mom_max)
# Build base model definitions:
model_params = {
'hidden_sizes' : hidden_sizes,
'l2_lambda' : l2_lambda}
# Specify the training hyperparameters:
training_params = {
'num_epochs' : 20,
'optimizer_fn' : tf.train.MomentumOptimizer(
learning_rate=lr,
momentum=mom_coeff),
'report_every' : 1,
'eval_every' : 1,
'stop_early' : True}
return model_params, training_params
# TEST THIS: Run this cell a few times and look at the different outputs.
# Each of these will be a different model trained with different hyperparameters.
m, t = sample_model_architecture_and_hyperparams()
print m
print t
results = []
# Perform a random search over hyper-parameter space this many times.
NUM_EXPERIMENTS = 10
for i in range(NUM_EXPERIMENTS):
# Sample the model and hyperparams we are using.
model_params, training_params = sample_model_architecture_and_hyperparams()
print "RUN: %d out of %d:" % (i, NUM_EXPERIMENTS)
print "Sampled Architecture: \n", model_params
print "Hyper-parameters:\n", training_params
# Build, train, evaluate
model, performance = build_train_eval_and_plot(
model_params, training_params, verbose=False)
# Save results
results.append((performance['test_acc'], model_params, training_params))
# Display (best?) results/variance/etc:
results.sort(key=lambda x : x[0], reverse=True)
for r in results:
print r # Tuples of (test_accuracy, model_hyperparameters, training_hyperparameters)
%%time
# Specify the model hyperparameters (NOTE: All the defaults can be omitted):
model_params = {
'hidden_sizes' : [500, 300], # List of hidden layer dimensions, empty for linear model.
'l2_lambda' : 1e-3 # Strength of L2 regularization.
}
# Specify the training hyperparameters:
training_params = {'num_epochs' : 100, # Max epochs/iterations to train for.
'optimizer_fn' : tf.train.MomentumOptimizer(learning_rate=2e-3, momentum=0.98),
'report_every' : 1, # Report training results every nr of epochs.
'eval_every' : 1, # Evaluate on validation data every nr of epochs.
'stop_early' : True, # Use early stopping or not.
}
# Build, train, evaluate and plot the results!
trained_model, training_results = build_train_eval_and_plot(
model_params,
training_params,
verbose=True # Modify as desired.
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Try GAIA with 2MASS
Step2: Try GAIA and WISE
Step3: Try GAIA + WISE with ProperMotion limit
Step4: Try GAIA-WISE-2MASS directly
|
<ASSISTANT_TASK:>
Python Code:
from astroquery.gaia import Gaia
tables = Gaia.load_tables(only_names=True)
for table in (tables):
print (table.get_qualified_name())
#obj = ["3C 454.3", 343.49062, 16.14821, 1.0]
obj = ["PKS J0006-0623", 1.55789, -6.39315, 1]
#obj = ["M87", 187.705930, 12.391123, 1.0]
#### name, ra, dec, radius of cone (in deg)
obj_name = obj[0]
obj_ra = obj[1]
obj_dec = obj[2]
cone_radius = obj[3]
obj_coord = coordinates.SkyCoord(ra=obj_ra, dec=obj_dec, unit=(u.deg, u.deg), frame="icrs")
# cmd = "SELECT * \
# FROM gaiadr2.gaia_source \
# WHERE CONTAINS(POINT('ICRS',gaiadr2.gaia_source.ra,gaiadr2.gaia_source.dec), \
# CIRCLE('ICRS'," + str(obj_ra) + "," + str(obj_dec) + "," + str(cone_radius) + "))=1;"
cmd = "SELECT * FROM gaiadr2.gaia_source AS g, \
gaiadr2.tmass_best_neighbour AS tbest, \
gaiadr1.tmass_original_valid AS tmass \
WHERE g.source_id = tbest.source_id AND tbest.tmass_oid = tmass.tmass_oid AND CONTAINS(POINT('ICRS',g.ra,g.dec),\
CIRCLE('ICRS'," + str(obj_ra) + "," + str(obj_dec) + "," + str(cone_radius) + "))=1;"
print(cmd)
job = Gaia.launch_job_async(cmd, dump_to_file=True)
print (job)
# GAIA
r = job.get_results()
print(len(r['source_id']))
print(r['phot_g_mean_mag', 'phot_bp_mean_mag', 'phot_rp_mean_mag', 'j_m', 'h_m', 'ks_m', 'tmass_oid'])
cmd = "SELECT * FROM gaiadr2.gaia_source AS g, \
gaiadr2.allwise_best_neighbour AS wbest, \
gaiadr1.allwise_original_valid AS allwise \
WHERE g.source_id = wbest.source_id AND wbest.allwise_oid = allwise.allwise_oid AND CONTAINS(POINT('ICRS',g.ra,g.dec),\
CIRCLE('ICRS'," + str(obj_ra) + "," + str(obj_dec) + "," + str(cone_radius) + "))=1;"
print(cmd)
job = Gaia.launch_job_async(cmd, dump_to_file=True)
print(job)
r = job.get_results()
print(len(r['source_id']))
print(r['w1mpro', 'w2mpro', 'w3mpro', 'w4mpro'])
cmd = "SELECT * FROM gaiadr2.gaia_source AS g, \
gaiadr2.allwise_best_neighbour AS wbest, \
gaiadr1.allwise_original_valid AS allwise \
WHERE g.source_id = wbest.source_id AND wbest.allwise_oid = allwise.allwise_oid AND CONTAINS(POINT('ICRS',g.ra,g.dec),\
CIRCLE('ICRS'," + str(obj_ra) + "," + str(obj_dec) + "," + str(cone_radius) + "))=1 \
AND pmra IS NOT NULL AND abs(pmra)<10 \
AND pmdec IS NOT NULL AND abs(pmdec)<10;"
print(cmd)
job = Gaia.launch_job_async(cmd, dump_to_file=True)
print(job)
r = job.get_results()
print(len(r['source_id']))
print(r['pmra', 'pmdec', 'w1mpro'])
cmd = "SELECT * FROM gaiadr2.gaia_source AS g, \
gaiadr2.allwise_best_neighbour AS wbest, \
gaiadr1.allwise_original_valid AS allwise, \
gaiadr1.tmass_original_valid AS tmass \
WHERE g.source_id = wbest.source_id AND wbest.allwise_oid = allwise.allwise_oid AND CONTAINS(POINT('ICRS',g.ra,g.dec),\
CIRCLE('ICRS'," + str(obj_ra) + "," + str(obj_dec) + "," + str(cone_radius) + "))=1\
AND allwise.tmass_key IS NOT NULL \
AND allwise.tmass_key = tmass.tmass_oid;"
print(cmd)
job = Gaia.launch_job_async(cmd, dump_to_file=True)
print(job)
r = job.get_results()
print(len(r['source_id']))
print(r.colnames)
r['ra', 'dec', 'ra_2', 'dec_2', 'ra_3', 'dec_3', 'phot_g_mean_mag', 'j_m', 'w1mpro', 'tmass_key', 'tmass_oid']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate data
Step2: Transport source samples onto target samples
Step3: Fig 1
Step4: Fig 2
Step5: Fig 3
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Remi Flamary <remi.flamary@unice.fr>
# Stanislas Chambon <stan.chambon@gmail.com>
#
# License: MIT License
import matplotlib.pylab as pl
import ot
n_samples_source = 150
n_samples_target = 150
Xs, ys = ot.datasets.make_data_classif('3gauss', n_samples_source)
Xt, yt = ot.datasets.make_data_classif('3gauss2', n_samples_target)
# unsupervised domain adaptation
ot_sinkhorn_un = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn_un.fit(Xs=Xs, Xt=Xt)
transp_Xs_sinkhorn_un = ot_sinkhorn_un.transform(Xs=Xs)
# semi-supervised domain adaptation
ot_sinkhorn_semi = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn_semi.fit(Xs=Xs, Xt=Xt, ys=ys, yt=yt)
transp_Xs_sinkhorn_semi = ot_sinkhorn_semi.transform(Xs=Xs)
# semi supervised DA uses available labaled target samples to modify the cost
# matrix involved in the OT problem. The cost of transporting a source sample
# of class A onto a target sample of class B != A is set to infinite, or a
# very large value
# note that in the present case we consider that all the target samples are
# labeled. For daily applications, some target sample might not have labels,
# in this case the element of yt corresponding to these samples should be
# filled with -1.
# Warning: we recall that -1 cannot be used as a class label
pl.figure(1, figsize=(10, 10))
pl.subplot(2, 2, 1)
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Source samples')
pl.subplot(2, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.xticks([])
pl.yticks([])
pl.legend(loc=0)
pl.title('Target samples')
pl.subplot(2, 2, 3)
pl.imshow(ot_sinkhorn_un.cost_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Cost matrix - unsupervised DA')
pl.subplot(2, 2, 4)
pl.imshow(ot_sinkhorn_semi.cost_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Cost matrix - semisupervised DA')
pl.tight_layout()
# the optimal coupling in the semi-supervised DA case will exhibit " shape
# similar" to the cost matrix, (block diagonal matrix)
pl.figure(2, figsize=(8, 4))
pl.subplot(1, 2, 1)
pl.imshow(ot_sinkhorn_un.coupling_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nUnsupervised DA')
pl.subplot(1, 2, 2)
pl.imshow(ot_sinkhorn_semi.coupling_, interpolation='nearest')
pl.xticks([])
pl.yticks([])
pl.title('Optimal coupling\nSemi-supervised DA')
pl.tight_layout()
# display transported samples
pl.figure(4, figsize=(8, 4))
pl.subplot(1, 2, 1)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.5)
pl.scatter(transp_Xs_sinkhorn_un[:, 0], transp_Xs_sinkhorn_un[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.title('Transported samples\nEmdTransport')
pl.legend(loc=0)
pl.xticks([])
pl.yticks([])
pl.subplot(1, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=0.5)
pl.scatter(transp_Xs_sinkhorn_semi[:, 0], transp_Xs_sinkhorn_semi[:, 1], c=ys,
marker='+', label='Transp samples', s=30)
pl.title('Transported samples\nSinkhornTransport')
pl.xticks([])
pl.yticks([])
pl.tight_layout()
pl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Find Your Object Storage Credentials
Step2: Now use the SwiftClient connection to programmatically
|
<ASSISTANT_TASK:>
Python Code:
#!pip install --user --upgrade python-keystoneclient
#!pip install --user --upgrade python-swiftclient
credentials = {
'auth_uri':'',
'global_account_auth_uri':'',
'username':'xx',
'password':"xx",
'auth_url':'https://identity.open.softlayer.com',
'project':'xx',
'project_id':'xx',
'region':'dallas',
'user_id':'xx',
'domain_id':'xx',
'domain_name':'xx',
'tenantId':'xx'
}
import swiftclient.client as swiftclient
conn = swiftclient.Connection(
key=credentials['password'],
authurl=credentials['auth_url']+"/v3",
auth_version='3',
os_options={
"project_id": credentials['project_id'],
"user_id": credentials['user_id'],
"region_name": credentials['region']})
examplefile = 'my_team_name_data_folder/zipfiles/classification_1_narrowband.zip'
etag = conn.put_object('some_container', 'classification_1_narrowband.zip', open(examplefile).read())
classification_results_file = 'my_team_name_data_folder/results/my_final_testset_classes.csv'
etag = conn.put_object('some_container', 'my_final_testset_classes.csv', open(examplefile).read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: target data for training
|
<ASSISTANT_TASK:>
Python Code:
# load the training data
data = pd.read_csv(os.path.abspath('__file__' + "/../../../data/TrainSet.txt"),sep='\t')
data.drop(['Intensity','Odor','Replicate','Dilution'],axis=1, inplace=1)
data.columns = ['#oID', 'individual'] + list(data.columns)[2:]
data.head()
# load leaderboard data and reshape them to match the training data
LB_data_high = pd.read_csv(os.path.abspath('__file__' + "/../../../data/LBs1.txt"),sep='\t')
LB_data_high = LB_data_high.pivot_table(index=['#oID','individual'],columns='descriptor',values='value')
LB_data_high.reset_index(level=[0,1],inplace=1)
LB_data_high.rename(columns={' CHEMICAL':'CHEMICAL'}, inplace=True)
LB_data_high = LB_data_high[data.columns]
LB_data_high.head()
# load leaderboard low intensity data and reshape them to match the training data
LB_data_low = pd.read_csv(os.path.abspath('__file__' + "/../../../data/leaderboard_set_Low_Intensity.txt"),sep='\t')
LB_data_low = LB_data_low.pivot_table(index=['#oID','individual'],columns='descriptor',values='value')
LB_data_low.reset_index(level=[0,1],inplace=1)
LB_data_low.rename(columns={' CHEMICAL':'CHEMICAL'}, inplace=True)
LB_data_low = LB_data_low[data.columns]
LB_data_low.head()
# put them all together
selection_data = pd.concat((data,LB_data_high,LB_data_low),ignore_index=True)
# replace descriptor data with np.nan if intensity is zero
for descriptor in [u'VALENCE/PLEASANTNESS', u'BAKERY', u'SWEET', u'FRUIT', u'FISH',
u'GARLIC', u'SPICES', u'COLD', u'SOUR', u'BURNT', u'ACID', u'WARM',
u'MUSKY', u'SWEATY', u'AMMONIA/URINOUS', u'DECAYED', u'WOOD',
u'GRASS', u'FLOWER', u'CHEMICAL']:
selection_data[descriptor][(selection_data['INTENSITY/STRENGTH'] == 0)] = np.nan
selection_data = selection_data.groupby('#oID').mean()
selection_data.reset_index(inplace=1)
selection_data.drop('individual',1,inplace=1)
selection_data.head()
selection_data = pd.concat((selection_data,selection_data)).sort('#oID')
selection_data.index=range(len(selection_data))
# load test CIDs to remove them from russdata
with open(os.path.abspath('__file__' + "/../../../data/CID_testset.txt")) as f:
content = f.readlines()
test_CIDs = list(content)
test_CIDs = [int(x) for x in test_CIDs]
intensity = russdata[~russdata.CID.isin(test_CIDs)]
intensity.index = range(len(intensity))
print((intensity.CID != selection_data['#oID']).sum())
intensity = intensity['INTENSITY/STRENGTH']
selection_data['INTENSITY/STRENGTH'] = intensity
selection_data.head()
selection_data.shape
selection_data.to_csv('targets_for_feature_selection.csv')
selection_data.head()
# load the train data
data = pd.read_csv(os.path.abspath('__file__' + "/../../../data/TrainSet.txt"),sep='\t')
data.drop(['Odor','Replicate'],axis=1, inplace=1)
data.columns = [u'#oID','Intensity','Dilution', u'individual', u'INTENSITY/STRENGTH', u'VALENCE/PLEASANTNESS', u'BAKERY', u'SWEET', u'FRUIT', u'FISH', u'GARLIC', u'SPICES', u'COLD', u'SOUR', u'BURNT', u'ACID', u'WARM', u'MUSKY', u'SWEATY', u'AMMONIA/URINOUS', u'DECAYED', u'WOOD', u'GRASS', u'FLOWER', u'CHEMICAL']
data.head()
#load LB data
LB_data_high = pd.read_csv(os.path.abspath('__file__' + "/../../../data/LBs1.txt"),sep='\t')
LB_data_high = LB_data_high.pivot_table(index=['#oID','individual'],columns='descriptor',values='value')
LB_data_high.reset_index(level=[0,1],inplace=1)
LB_data_high.rename(columns={' CHEMICAL':'CHEMICAL'}, inplace=True)
LB_data_high['Dilution'] = '1/1,000 '
LB_data_high['Intensity'] = 'high '
LB_data_high = LB_data_high[data.columns]
LB_data_high.head()
# put them together
data = pd.concat((data,LB_data_high),ignore_index=True)
# replace descriptor data with np.nan if intensity is zero
for descriptor in [u'VALENCE/PLEASANTNESS', u'BAKERY', u'SWEET', u'FRUIT', u'FISH',
u'GARLIC', u'SPICES', u'COLD', u'SOUR', u'BURNT', u'ACID', u'WARM',
u'MUSKY', u'SWEATY', u'AMMONIA/URINOUS', u'DECAYED', u'WOOD',
u'GRASS', u'FLOWER', u'CHEMICAL']:
data[descriptor][(data['INTENSITY/STRENGTH'] == 0)] = np.nan
# average the duplicates
data = data.groupby(['individual','#oID','Dilution','Intensity']).mean()
data.reset_index(level=[2,3], inplace=True)
# filter out data with high intensity
data = data[data.Intensity == 'high ']
# drop Dilution and Intensity columns
data.drop(['Dilution','Intensity'],inplace=1,axis=1)
data.reset_index(level=[0,1], inplace=True)
data = data.groupby('#oID').mean()
data.shape
data.head()
#duplacate the matrix, add neglog10 (Dilution) and Intensity data from russdata
data.reset_index(inplace=1)
data = pd.concat((data,data)).sort('#oID')
data.index=range(len(data))
# remove test targets from russdata intensity (Nans)
intensity = russdata[~russdata.CID.isin(test_CIDs)]
intensity.index = range(len(intensity))
print((intensity.CID != data['#oID']).sum()) # check if data orders are the same - should get 0 if they are the same
intensity = intensity['INTENSITY/STRENGTH']
data['INTENSITY/STRENGTH'] = intensity
data.head()
data.shape
#save it
data.to_csv('target.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Google Places API Configuration
Step2: Write a function to calculate the distance from each listing to top trip advisor attractions
Step3: 2. ta_detail(ta_list, city)
Step4: 3. latlong_tuple(ta_df)
Step5: 4. clean_csv(data_in, geo_tuples)
Step6: Reading in the data
Step7: The cell below reads in the original csv file, removes some unwanted listings, and adds a new column that has the average distance from the top 10 Trip Advisor approved(!!) tourist attractions.
Step8: We then make a copy dataframe listing to play around with.
Step9: Visualizing the Data
Step10: Then I used the groupby function for neighbourhoods to see a price comparison between different New York neighbourhoods
Step11: The most expensive neighbourhood
Step12: The Second Most Expensive
Step13: Room Type
Step14: Plotting the Entire Room listings without the top 20 most expensive ones show that there are 2 concentrated correlated areas between average distance and price. The bimodal distribution in average distance might be the concentration of Airbnb listings in Manhattan and Brooklyn
Step15: Plotting a violin diagram of the prices of all entire homes in different neighbourhood groups show us that Manhattan has more distrubted price range of apartments, albeit on the higher end, while Queens and Bronx have higher concentration of listings at a specific point at a lower price range.
Step16: It is likely that some of the listings listed above are specifically for events and photography, rather than for traveler's accomodation. Also it seems like some of the hosts who didn't want to remove their listing from Airbnb but wasn't available to host rather listed the price as 9,900 USD.
Step17: The 2 plots above try to find if there would be any relationship between the number of reviews per month (trust and approval) as well as the average distance from the top attractions. Reviews per month plot does not seem to display any positive correlation between price and user approval, which makes sense as there are many other factors that determine an apartment rental price than user approval.
Step18: The scatterplot above shows how big of a discrepancy apartment prices in Manhattan is. The top 25% of the apartments in Manhattan range in price from 400 USD to more than 700 USD, while those in Bronx span range of just 200 to 300.
|
<ASSISTANT_TASK:>
Python Code:
import sys
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import numpy as np
import seaborn as sns
import statistics
import csv
from scipy import stats
from bs4 import BeautifulSoup as bs
import urllib.request
from googleplaces import GooglePlaces, types, lang
from geopy.distance import great_circle
import geocoder
%matplotlib inline
print('Python version: ', sys.version)
print('Pandas version: ', pd.__version__)
print('Today: ', dt.date.today())
apikey = 'AIzaSyAiZn9omCnuF2q89cArLpVfxmlGV7nnjFA'
gplaces = GooglePlaces(apikey)
def tripadvisor_attractions(url, how_many):
page = urllib.request.urlopen(url)
#using beautiful soup to select targeted div
soup = bs(page.read(), "lxml")
filtered = soup.find("div", {"id": "FILTERED_LIST"})
top_list = filtered.find_all("div", class_="property_title")
sites = []
#save the text within hyperlink into an empty list
for site in top_list:
site = (site.a).text
site = str(site)
if not any(char.isdigit() for char in site):
sites.append(site)
#splices the list by how many places user wants to include
sites = sites[:how_many]
return sites
#ta short for tourist attraction
def ta_detail(ta_list, city):
ta_df = pd.DataFrame( {'Tourist Attraction' : '',
'place_id' : '',
'longitude' : '',
'latitude' : '' },
index = range(len(ta_list)))
for i in range(len(ta_list)):
query_result = gplaces.nearby_search(
location = city,
keyword = ta_list[i],
radius=20000)
#get only the top first query
query = query_result.places[0]
ta_df.loc[i, 'Tourist Attraction'] = query.name
ta_df.loc[i, 'longitude'] = query.geo_location['lng']
ta_df.loc[i, 'latitude'] = query.geo_location['lat']
ta_df.loc[i, 'place_id'] = query.place_id
return ta_df
def latlong_tuple(ta_df):
tuple_list = []
for j, ta in ta_df.iterrows():
ta_geo = (float(ta['latitude']), float(ta['longitude']))
tuple_list.append(ta_geo)
return tuple_list
def clean_csv(data_in, geo_tuples):
#automatically generates a cleaned csv file with the same name with _out.csv extension
index = data_in.find('.csv')
data_out = data_in[:index] + '_out' + data_in[index:]
#some error checking when opening
try:
s = open(data_in, 'r')
except:
print('File not found or cannot be opened')
else:
t = open(data_out, 'w')
print('\n Output from an iterable object created from the csv file')
reader = csv.reader(s)
writer = csv.writer(t, delimiter=',')
#counter for number or rows removed during filtering
removed = 0
added = 0
header = True
for row in reader:
if header:
header = False
for i in range(len(row)):
#saving indices for specific columns
if row[i] == 'latitude':
lat = i
elif row[i] == 'longitude':
lng = i
row.append('avg_dist')
writer.writerow(row)
#only add the row if the number of reviews is more than 1
elif(int(row[-1]) > 7):
#creaing a geo tuple for easy calculation later on
tlat = row[lat]
tlng = row[lng]
ttuple = (tlat, tlng)
dist_calc = []
#calculate the distance from each listing and to every top tourist attractions we saved
#if the distance is for some reason greater than 100, don't add it as it would skew the result.
for i in geo_tuples:
dist_from_spot = round(great_circle(i, ttuple).kilometers, 2)
if (dist_from_spot < 100):
dist_calc.append(dist_from_spot)
else:
print(ta['Tourist Attraction'] + " is too far.")
#calculates the average distance between the listing and all of the toursist attractions
avg_dist = round(statistics.mean(dist_calc), 3)
row.append(avg_dist)
writer.writerow(row)
added += 1
else:
removed += 1
s.close()
t.close()
print('Function Finished')
print(added, 'listings saved')
print(removed, 'listings removed')
url = "https://www.tripadvisor.com/Attractions-g60763-Activities-New_York_City_New_York.html"
top_10 = tripadvisor_attractions(url, 10)
print(top_10)
ta_df = ta_detail(top_10, 'New York, NY')
geo_tuples = latlong_tuple(ta_df)
ta_df
clean_csv("data/listings.csv", geo_tuples)
df = pd.read_csv('data/listings_out.csv')
print('Dimensions:', df.shape)
df.head()
listing = df.copy()
listing.head()
area = listing.groupby('neighbourhood_group')
nbhood_price = area['price'].agg([np.sum, np.mean, np.std])
nbhood_dist = area['avg_dist'].agg([np.sum, np.mean, np.std])
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True)
fig.suptitle('NY Neighbourhoods: Price vs Average Distance to Top Spots', fontsize=10, fontweight='bold')
nbhood_price['mean'].plot(kind='bar', ax=ax[0], color='mediumslateblue')
nbhood_dist['mean'].plot(kind='bar', ax=ax[1], color = 'orchid')
ax[0].set_ylabel('Price', fontsize=10)
ax[1].set_ylabel('Average Distance', fontsize=10)
area2 = listing.groupby('neighbourhood')
nb_price = area2['price'].agg([np.sum, np.mean, np.std]).sort_values(['mean'])
nb_dist = area2['avg_dist'].agg([np.sum, np.mean, np.std])
fig, ax = plt.subplots(figsize=(4, 35))
fig.suptitle('Most Expensive Neighbourhoods on Airbnb', fontsize=10, fontweight='bold')
nb_price['mean'].plot(kind='barh', ax=ax, color='salmon')
breezy = listing.loc[listing['neighbourhood'] == 'Breezy Point']
breezy
beach = listing.loc[listing['neighbourhood'] == 'Manhattan Beach']
beach
area = listing.groupby('room_type')
room_price = area['price'].agg([np.sum, np.mean, np.std])
room_dist = area['avg_dist'].agg([np.sum, np.mean, np.std])
room_price['mean'].plot(title="Average Price by Room Type")
apt = listing.loc[listing['room_type'] == 'Entire home/apt']
apt = apt.sort_values('price', ascending=False)
apt.drop(apt.head(20).index, inplace=True)
apt.head()
sns.jointplot(x='avg_dist', y="price", data=apt, kind='kde')
f, ax = plt.subplots(figsize=(11, 6))
sns.violinplot(x="neighbourhood_group", y="price", data=apt, palette="Set3")
fancy = listing.sort_values('price', ascending=False).iloc[:50]
fancy.head(10)
fancy.describe()
reviewed = listing.loc[listing['number_of_reviews'] > 1]
reviewed.describe()
reviewed = reviewed[((reviewed['price'] - reviewed['price'].mean()) / reviewed['price'].std()).abs() < 3]
reviewed.describe()
fig, axs = plt.subplots(1, 2, sharey=True)
fig.suptitle('Do Reviews and Price Matter?', fontsize=20, fontweight='bold')
reviewed.plot(kind='scatter', x='reviews_per_month', y='price', ax=axs[0], figsize=(16, 8))
reviewed.plot(kind='scatter', x='avg_dist', y='price', ax=axs[1])
f, ax = plt.subplots(figsize=(11, 5))
sns.boxplot(x="neighbourhood_group", y="price", hue="room_type", data=reviewed, palette="PRGn")
reviewed2 = reviewed[((reviewed['price'] - reviewed['price'].mean()) / reviewed['price'].std()).abs() < 2]
sns.jointplot(x='avg_dist', y="price", data=reviewed2, kind='kde')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Asset prices have time-varying volatility (variance of day over day returns). In some periods, returns are highly variable, while in others very stable. Stochastic volatility models model this with a latent volatility variable, modeled as a stochastic process. The following model is similar to the one described in the No-U-Turn Sampler paper, Hoffman (2011) p21.
Step2: Specifying the model in pymc3 mirrors its statistical specification.
Step3: Fit Model
Step4: We do a short initial run to get near the right area, then start again using a new Hessian at the new starting point to get faster sampling due to better scaling. We do a short run since this is an interactive example.
Step5: Looking at the returns over time and overlaying the estimated standard deviation we can see how the model tracks the volatility over time.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pymc3 as pm
from pymc3.distributions.timeseries import GaussianRandomWalk
from scipy.sparse import csc_matrix
from scipy import optimize
%pylab inline
n = 400
returns = np.genfromtxt("data/SP500.csv")[-n:]
returns[:5]
plt.plot(returns)
model = pm.Model()
with model:
sigma = pm.Exponential('sigma', 1./.02, testval=.1)
nu = pm.Exponential('nu', 1./10)
s = GaussianRandomWalk('s', sigma**-2, shape=n)
r = pm.T('r', nu, lam=pm.exp(-2*s), observed=returns)
with model:
start = pm.find_MAP(vars=[s], fmin=optimize.fmin_l_bfgs_b)
with model:
step = pm.NUTS(vars=[s, nu,sigma],scaling=start, gamma=.25)
start2 = pm.sample(100, step, start=start)[-1]
# Start next run at the last sampled position.
step = pm.NUTS(vars=[s, nu,sigma],scaling=start2, gamma=.55)
trace = pm.sample(2000, step, start=start2)
figsize(12,6)
pm.traceplot(trace, model.vars[:-1]);
figsize(12,6)
title(str(s))
plot(trace[s][::10].T,'b', alpha=.03);
xlabel('time')
ylabel('log volatility')
plot(returns)
plot(np.exp(trace[s][::10].T), 'r', alpha=.03);
sd = np.exp(trace[s].T)
plot(-np.exp(trace[s][::10].T), 'r', alpha=.03);
xlabel('time')
ylabel('returns')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: crc diffs from observed omnipod single bit diff messages
Step2: Look at single bit diffs with unshift_xor()
|
<ASSISTANT_TASK:>
Python Code:
poly = 0x104c1
crc = crcmod.mkCrcFun(poly, initCrc=0, xorOut=0x0, rev=False)
# Now consider two CRC values obtained from two 1-bit messages,
# where the 1 bits are in adjacent positions. The resulting CRCs
# will differ by just one shift-xor cycle. To be precise, if
# C1 corresponds to the message with a 1 in position i, and
# C2 corresponds to the message with a 1 in position i+1, then
# C1 is derived from applying one shift-xor cycle to C2.
# (If this seems backwards, it's because the further the 1
# bit is from the end of the message, the more shift-xor cycles
# get applied to the CRC.)
# The unshift_xor() function tries to reverse a shift-xor cycle
def unshift_xor(a,b):
return ((b << 1) ^ a) & 0xffff
def view_diff(msg, bit_to_flip):
m1 = msg.decode('hex')
m2 = flip_bit(m1,bit_to_flip)
diff = xor_strings(m1,m2)
print "m1 %s, crc = %s" % (bin_str(m1), bin_short(crc(m1)))
print "m2 %s, crc = %s" % (bin_str(m2), bin_short(crc(m2)))
print "diff %s, crc = %s" % (bin_str(diff), bin_short(crc(diff)))
crc_diff = crc(m1) ^ crc(m2)
print " xored crcs = %s" % bin_short(crc_diff)
return crc_diff
msg = "deadbeef"
crc_diff4 = view_diff(msg, 4)
print "=" * 80
crc_diff5 = view_diff(msg, 5)
print "=" * 80
crc_diff6 = view_diff(msg, 6)
print "unshift_xor at 4 = %s" % bin_short(unshift_xor(crc_diff4, crc_diff5) & 0xffff)
print "unshift_xor at 5 = %s" % bin_short(unshift_xor(crc_diff5, crc_diff6) & 0xffff)
print "original poly = %s" % bin_short(poly & 0xffff)
# The entry at crc_diff_dict[10][30] means we observed two 10 bytes messages that differed
# only by a single bit, and the value at that entry is the xor of their crcs.
crc_diff_dict = {
# Messages of length 10
10: {
30: 0b1000001010011011,
31: 0b1000000101010001,
34: 0b0000001110010010,
35: 0b1000001100101000,
36: 0b0000001100001110,
37: 0b1000000100001011,
},
# Messages of length 3
3: {
30: 0b1000000100011111,
31: 0b0000001000111100,
34: 0b1000001011011100,
35: 0b0000000111010111,
36: 0b1000001101011001,
37: 0b1000000000011000,
38: 0b1000001010001110,
116: 0b0000000000101010,
117: 0b0000000011011010,
}
}
def crc_for(l,n):
crc = crc_diff_dict[l][n]
#crc = single_bit_crcs10[n]
# collapse 5 bit 'hole'
#crc = (crc & 0b1111111111) + ((crc >> 5) & 0b10000000000)
# Drop separate high bit
#crc = crc & 0b1111111111
# Just look at lowest 8 bits
#crc = crc & 0b11111111
return crc
def diff_at(l,n):
return unshift_xor(crc_for(l,n), crc_for(l,n+1))
def show_diff_at(l,n):
print "crc(%d) = %s" % (n, bin_short(crc_for(l,n)))
print "crc(%d) = %s" % (n+1, bin_short(crc_for(l,n+1)))
d = diff_at(l,n)
print "unshift_xor at %d = %s" % (n, bin_short(d))
print "=" * 80
show_diff_at(3,30)
show_diff_at(3,34)
show_diff_at(3,35)
show_diff_at(3,36)
show_diff_at(3,37)
show_diff_at(3,116)
print "*" * 80
show_diff_at(10,30)
show_diff_at(10,34)
show_diff_at(10,35)
show_diff_at(10,36)
#df = pd.DataFrame(map(lambda x: [x, diff_at(x)], [30, 34, 35, 36]), columns=['idx', 'crc'])
#df.plot.scatter(x='idx', y='crc', marker='.')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can also define your own problems (how?).
Step2: 3.5.2 Depth-First Search
Step3: 3.6.1 A* Search
Step4: 3.7.2 A* Search with Multiple Path Pruning
Step5: 3.8.1 Branch-and-bound Search
|
<ASSISTANT_TASK:>
Python Code:
# Run this to import pre-defined problems
from aipython.searchProblem import search_simple1, search_simple2, search_cyclic_delivery, search_acyclic_delivery, search_tree, search_extended_tree, search_cyclic, search_vancouver_neighbour, search_misleading_heuristic, search_multiple_path_pruning, search_module_4_graph, search_module_5_graph, search_bicycle_courier_acyclic, search_bicycle_courier_cyclic
# Run this to import utilities that support self-defined problems
from aipython.searchProblem import Arc, Search_problem_from_explicit_graph
from aipython.searchGeneric import Searcher
s = Searcher(problem=search_simple2)
# Visualization options
# For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options
s.sleep_time = 0.2 # The time, in seconds, between each step in auto solving
s.line_width = 2.0 # The thickness of edges
s.text_size = 13 # The fontsize of the text
s.detail_level = 2 # 0=no text, 1=truncated text, 2=full text
s.show_edge_costs = True
s.show_node_heuristics = False
# Controls the layout engine used. Either "force" for force layout, or "tree".
s.layout_method = "force"
# s.layout_method = "tree"
# Display the widget
display(s)
s.search()
from aipython.searchGeneric import AStarSearcher
s_astar = AStarSearcher(problem=search_simple1)
# Visualization options
# For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options
s_astar.sleep_time = 0.2 # The time, in seconds, between each step in auto solving
s_astar.line_width = 2.0 # The thickness of edges
s_astar.text_size = 13 # The fontsize of the text
s_astar.detail_level = 2 # 0=no text, 1=truncated text, 2=full text
s_astar.show_edge_costs = True
s_astar.show_node_heuristics = True
# Display the widget
display(s_astar)
s_astar.search()
from aipython.searchMPP import SearcherMPP
s_mpp = SearcherMPP(problem=search_simple1)
# Visualization options
# For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options
s_mpp.sleep_time = 0.2 # The time, in seconds, between each step in auto solving
s_mpp.line_width = 2.0 # The thickness of edges
s_mpp.text_size = 13 # The fontsize of the text
s_mpp.detail_level = 1 # 0=no text, 1=truncated text, 2=full text
s_mpp.show_edge_costs = True
s_mpp.show_node_heuristics = True
# Display the widget
display(s_mpp)
s_mpp.search()
from aipython.searchBranchAndBound import DF_branch_and_bound
s_dfbb = DF_branch_and_bound(problem=search_simple1)
# Visualization options
# For more explanation please visit: https://aispace2.github.io/AISpace2/tutorial.html#tutorial-common-visualization-options
s_dfbb.sleep_time = 0.2 # The time, in seconds, between each step in auto solving
s_dfbb.line_width = 2.0 # The thickness of edges
s_dfbb.text_size = 13 # The fontsize of the text
s_dfbb.detail_level = 2 # 0=no text, 1=truncated text, 2=full text
s_dfbb.show_edge_costs = True
s_dfbb.show_node_heuristics = True
# Display the widget
display(s_dfbb)
s_dfbb.search()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image classification with TensorFlow Lite Model Maker
Step2: Import the required packages.
Step3: Simple End-to-End Example
Step4: You could replace image_path with your own image folders. As for uploading data to colab, you could find the upload button in the left sidebar shown in the image below with the red rectangle. Just have a try to upload a zip file and unzip it. The root file path is the current path.
Step5: Step 2. Customize the TensorFlow model.
Step6: Step 3. Evaluate the model.
Step7: Step 4. Export to TensorFlow Lite model.
Step8: After this simple 4 steps, we could further use TensorFlow Lite model file in on-device applications like in image classification reference app.
Step 1
Step9: Use ImageClassifierDataLoader class to load data.
Step10: Split it to training data (80%), validation data (10%, optional) and testing data (10%).
Step11: Show 25 image examples with labels.
Step12: Step 2
Step13: Have a look at the detailed model structure.
Step14: Step 3
Step15: We could plot the predicted results in 100 test images. Predicted labels with red color are the wrong predicted results while others are correct.
Step16: If the accuracy doesn't meet the app requirement, one could refer to Advanced Usage to explore alternatives such as changing to a larger model, adjusting re-training parameters etc.
Step 4
Step17: See example applications and guides of image classification for more details about how to integrate the TensorFlow Lite model into mobile apps.
Step18: You can also evalute the tflite model with the evaluate_tflite method.
Step19: Advanced Usage
Step20: Then we export TensorFlow Lite model with such configuration.
Step21: In Colab, you can download the model named model_quant.tflite from the left sidebar, same as the uploading part mentioned above.
Step22: Evaluate the newly retrained MobileNetV2 model to see the accuracy and loss in testing data.
Step23: Change to the model in TensorFlow Hub
Step24: Then, by setting parameter model_spec to inception_v3_spec in create method, we could retrain the Inception V3 model.
Step25: Evaluate the newly retrained model with 10 training epochs.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tflite-model-maker
import numpy as np
import tensorflow as tf
assert tf.__version__.startswith('2')
from tflite_model_maker import configs
from tflite_model_maker import ExportFormat
from tflite_model_maker import image_classifier
from tflite_model_maker import ImageClassifierDataLoader
from tflite_model_maker import model_spec
import matplotlib.pyplot as plt
image_path = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data = ImageClassifierDataLoader.from_folder(image_path)
train_data, test_data = data.split(0.9)
model = image_classifier.create(train_data)
loss, accuracy = model.evaluate(test_data)
model.export(export_dir='.')
image_path = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
data = ImageClassifierDataLoader.from_folder(image_path)
train_data, rest_data = data.split(0.8)
validation_data, test_data = rest_data.split(0.5)
plt.figure(figsize=(10,10))
for i, (image, label) in enumerate(data.dataset.take(25)):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image.numpy(), cmap=plt.cm.gray)
plt.xlabel(data.index_to_label[label.numpy()])
plt.show()
model = image_classifier.create(train_data, validation_data=validation_data)
model.summary()
loss, accuracy = model.evaluate(test_data)
# A helper function that returns 'red'/'black' depending on if its two input
# parameter matches or not.
def get_label_color(val1, val2):
if val1 == val2:
return 'black'
else:
return 'red'
# Then plot 100 test images and their predicted labels.
# If a prediction result is different from the label provided label in "test"
# dataset, we will highlight it in red color.
plt.figure(figsize=(20, 20))
predicts = model.predict_top_k(test_data)
for i, (image, label) in enumerate(test_data.dataset.take(100)):
ax = plt.subplot(10, 10, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image.numpy(), cmap=plt.cm.gray)
predict_label = predicts[i][0][0]
color = get_label_color(predict_label,
test_data.index_to_label[label.numpy()])
ax.xaxis.label.set_color(color)
plt.xlabel('Predicted: %s' % predict_label)
plt.show()
model.export(export_dir='.')
model.export(export_dir='.', export_format=ExportFormat.LABEL)
model.evaluate_tflite('model.tflite', test_data)
config = configs.QuantizationConfig.create_full_integer_quantization(representative_data=test_data, is_integer_only=True)
model.export(export_dir='.', tflite_filename='model_quant.tflite', quantization_config=config)
model = image_classifier.create(train_data, model_spec=model_spec.mobilenet_v2_spec, validation_data=validation_data)
loss, accuracy = model.evaluate(test_data)
inception_v3_spec = model_spec.ImageModelSpec(
uri='https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1')
inception_v3_spec.input_image_shape = [299, 299]
model = image_classifier.create(train_data, validation_data=validation_data, epochs=10)
loss, accuracy = model.evaluate(test_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Affine Decomposition
Step2: 4. Main program
Step3: 4.2. Create Finite Element Space (Taylor-Hood P2-P1)
Step4: 4.3. Allocate an object of the NavierStokes class
Step5: 4.4. Prepare reduction with a POD-Galerkin method
Step6: 4.5. Perform the offline phase
Step7: 4.6. Perform an online solve
Step8: 4.7. Perform an error analysis
Step9: 4.8. Perform a speedup analysis
|
<ASSISTANT_TASK:>
Python Code:
from ufl import transpose
from dolfin import *
from rbnics import *
@ExactParametrizedFunctions()
@PullBackFormsToReferenceDomain()
@ShapeParametrization(
("x[0]", "x[1]"), # subdomain 1
("x[0]", "mu[1] / 2. * x[1] + (2. - mu[1])"), # subdomain 2
)
class NavierStokes(NavierStokesProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
NavierStokesProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
dup = TrialFunction(V)
(self.du, self.dp) = split(dup)
(self.u, _) = split(self._solution)
vq = TestFunction(V)
(self.v, self.q) = split(vq)
self.dx = Measure("dx")(subdomain_data=self.subdomains)
self.ds = Measure("ds")(subdomain_data=self.boundaries)
# ... as well as forcing terms and inlet velocity
self.inlet = Expression(("1. / 2.25 * (x[1] - 2) * (5 - x[1])", "0."), degree=2)
self.f = Constant((0.0, 0.0))
self.g = Constant(0.0)
# Customize nonlinear solver parameters
self._nonlinear_solver_parameters.update({
"linear_solver": "mumps",
"maximum_iterations": 20,
"report": True
})
# Return custom problem name
def name(self):
return "NavierStokesExact2"
# Return theta multiplicative terms of the affine expansion of the problem.
@compute_theta_for_derivatives
@compute_theta_for_supremizers
def compute_theta(self, term):
mu = self.mu
if term == "a":
theta_a0 = 1.
return (theta_a0,)
elif term in ("b", "bt"):
theta_b0 = 1.
return (theta_b0,)
elif term == "c":
theta_c0 = 1.
return (theta_c0,)
elif term == "f":
theta_f0 = 1.
return (theta_f0,)
elif term == "g":
theta_g0 = 1.
return (theta_g0,)
elif term == "dirichlet_bc_u":
theta_bc00 = mu[0]
return (theta_bc00,)
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
@assemble_operator_for_derivatives
@assemble_operator_for_supremizers
def assemble_operator(self, term):
dx = self.dx
if term == "a":
u = self.du
v = self.v
a0 = inner(grad(u) + transpose(grad(u)), grad(v)) * dx
return (a0,)
elif term == "b":
u = self.du
q = self.q
b0 = - q * div(u) * dx
return (b0,)
elif term == "bt":
p = self.dp
v = self.v
bt0 = - p * div(v) * dx
return (bt0,)
elif term == "c":
u = self.u
v = self.v
c0 = inner(grad(u) * u, v) * dx
return (c0,)
elif term == "f":
v = self.v
f0 = inner(self.f, v) * dx
return (f0,)
elif term == "g":
q = self.q
g0 = self.g * q * dx
return (g0,)
elif term == "dirichlet_bc_u":
bc0 = [DirichletBC(self.V.sub(0), self.inlet, self.boundaries, 1),
DirichletBC(self.V.sub(0), Constant((0.0, 0.0)), self.boundaries, 2)]
return (bc0,)
elif term == "inner_product_u":
u = self.du
v = self.v
x0 = inner(grad(u), grad(v)) * dx
return (x0,)
elif term == "inner_product_p":
p = self.dp
q = self.q
x0 = inner(p, q) * dx
return (x0,)
else:
raise ValueError("Invalid term for assemble_operator().")
# Customize the resulting reduced problem
@CustomizeReducedProblemFor(NavierStokesProblem)
def CustomizeReducedNavierStokes(ReducedNavierStokes_Base):
class ReducedNavierStokes(ReducedNavierStokes_Base):
def __init__(self, truth_problem, **kwargs):
ReducedNavierStokes_Base.__init__(self, truth_problem, **kwargs)
self._nonlinear_solver_parameters.update({
"report": True,
"line_search": "wolfe"
})
return ReducedNavierStokes
mesh = Mesh("data/backward_facing_step.xml")
subdomains = MeshFunction("size_t", mesh, "data/backward_facing_step_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/backward_facing_step_facet_region.xml")
element_u = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_p = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_u, element_p)
V = FunctionSpace(mesh, element, components=[["u", "s"], "p"])
problem = NavierStokes(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [(1.0, 70.0), (1.5, 2.5)]
problem.set_mu_range(mu_range)
reduction_method = PODGalerkin(problem)
reduction_method.set_Nmax(20)
lifting_mu = (1.0, 2.0)
problem.set_mu(lifting_mu)
reduction_method.initialize_training_set(100, sampling=EquispacedDistribution())
reduced_problem = reduction_method.offline()
online_mu = (70.0, 1.5)
reduced_problem.set_mu(online_mu)
reduced_solution = reduced_problem.solve()
plot(reduced_solution, reduced_problem=reduced_problem, component="u")
plot(reduced_solution, reduced_problem=reduced_problem, component="p")
reduction_method.initialize_testing_set(36, sampling=EquispacedDistribution())
reduction_method.error_analysis()
reduction_method.speedup_analysis()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Vinet EOS
Step2: Set pressure range and number of data points.
Step3: Calculate volume of MgO at different pressures.
Step4: 3. What if you use those numbers for a wrong equation
Step5: You will systematically underestimate pressure by using $K_0$ and $K_0'$ obtained from Vinet equation in BM3 equation.
Step6: Generate parameters. Note that k0p is uncertainty formatted and therefore, you need to take nominal value only by .n.
Step7: Fix parameters.
Step8: Run fitting
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pytheos as eos
import uncertainties as uct
from uncertainties import unumpy as unp
v0 = {'Pt': 3.9231**3, 'Au': 4.07860**3, 'MgO': 74.698}
k0 = {'Pt': 277.3, 'Au': 167.0, 'MgO': 160.3}
k0p = {'Pt': uct.ufloat(5.226, 0.033), 'Au': uct.ufloat(5.813, 0.022), 'MgO': uct.ufloat(4.109, 0.022)}
p_max = 150.
n_pts = 100
p = np.linspace(0.,p_max, n_pts)
standard = 'MgO'
v = eos.vinet_v(p, v0[standard], k0[standard], k0p[standard])
plt.plot(p,unp.nominal_values(v))
plt.xlabel('Pressure (GPa)')
plt.ylabel('Unit-cell volume ($\mathrm{\AA}^3$)');
v_bm3 = eos.bm3_v(p, v0[standard], k0[standard], k0p[standard])
plt.plot(p,unp.nominal_values(v - v_bm3))
plt.xlabel('Pressure (GPa)')
plt.ylabel('$\Delta$ P (GPa)');
model_bm3 = eos.BM3Model()
params = model_bm3.make_params(v0=v0[standard], k0=k0[standard], k0p=k0p[standard].n)
params['v0'].vary = False
params['k0'].vary = False
fitresult_bm3 = model_bm3.fit(p, params, v=unp.nominal_values(v))
print(fitresult_bm3.fit_report())
eos.plot.static_fit_result(fitresult_bm3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. xarray
Step2: Printing the dataset content gives you an overview of the dimension and variable names, their sizes, and the global file attributes.
Step3: This is very similar to the ncdump output, and corresponds to the output from xarray.
Step4: Ah, that's better. Here we can see the time displayed in a readable way, because xarray use the datetime64 module under the hood. Also the variable and coordinate attributes are shown.
Step5: 3.3 Select variable and coordinate variables
Step6: 2. PyNIO
Step7: The variables have different data types
Step8: 3.3 Dimensions, shape and size
Step9: 2. PyNIO
Step10: 3.4 Variable attributes
Step11: 2. PyNIO
Step12: Let's see how we can get the content of an attribute.
Step13: 2. PyNIO
Step14: 3.5 Time
Step15: 2. PyNIO
Step16: The returned time value is the value stored in the netCDF file and it has to be converted to a date string.
Step17: 3.6 Read a GRIB file
Step18: 2. PyNIO
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
fname = './data/tsurf.nc'
import xarray as xr
ds = xr.open_dataset(fname)
print(ds)
import Nio
f = Nio.open_file(fname,"r")
print(f)
coords = ds.coords
variables = ds.variables
print('--> coords: \n\n', coords)
print('--> variables: \n\n', variables)
coords_nio = f.dimensions.keys()
variables_nio = f.variables.keys()
print(coords_nio)
print(variables_nio)
#print f.variables['varName']
coord_nio = f.dimensions.keys()
varNames = f.variables.keys()
for i in varNames:
print(f.variables[i])
print(f.variables[i][:])
tsurf = ds.tsurf
lat = tsurf.lat
lon = tsurf.lon
print('Variable tsurf: \n', tsurf.data)
print('\nCoordinate variable lat: \n', lat.data)
print('\nCoordinate variable lon: \n', lon.data)
tsurf_nio = f.variables['tsurf'][:,:,:]
lat_nio = f.variables['lat'][:]
lon_nio = f.variables['lon'][:]
print('Variable tsurf_nio: \n', tsurf_nio)
print('\nCoordinate variable lat_nio: \n', lat_nio)
print('\nCoordinate variable lon_nio: \n', lon_nio)
print(type(tsurf))
print(type(tsurf_nio))
dimensions = ds.dims
shape = tsurf.shape
size = tsurf.size
rank = len(shape)
print('dimensions: ', dimensions)
print('shape: ', shape)
print('size: ', size)
print('rank: ', rank)
dimensions_nio = f.dimensions
shape_nio = tsurf_nio.shape
size_nio = tsurf_nio.size
rank_nio = len(shape_nio) # or rank_nio = f.variables["tsurf"].rank
print('dimensions: ', dimensions_nio)
print('shape: ', shape_nio)
print('size: ', size_nio)
print('rank_nio: ', rank_nio)
attributes = list(tsurf.attrs)
print('attributes: ', attributes)
attributes_nio = list(f.variables['tsurf'].attributes.keys())
print('attributes_nio: ', attributes_nio)
long_name = tsurf.long_name
units = tsurf.units
print('long_name: ', long_name)
print('units: ', units)
long_name_nio = f.variables["tsurf"].attributes['long_name']
units_nio = f.variables["tsurf"].attributes['units']
print('long_name_nio: ', long_name_nio)
print('units_nio: ', units_nio)
time = ds.time.data
print('timestep 0: ', time[0])
time_nio = f.variables['time'][:]
print('timestep 0: ', time_nio[0])
import netCDF4
time_nio_units = f.variables["time"].attributes['units']
time_nio_calendar = f.variables["time"].attributes['calendar']
date_nio = netCDF4.num2date(time_nio[0], units=time_nio_units, calendar=time_nio_calendar)
print('timestep 0: ', date_nio)
import cfgrib
ds2 = xr.open_dataset('./data/MET9_IR108_cosmode_0909210000.grb2', engine='cfgrib')
variables2 = ds2.variables
print('--> variables2: \n\n', variables2)
f2 = Nio.open_file('./data/MET9_IR108_cosmode_0909210000.grb2',"r")
variables_nio2 = f2.variables.keys()
for i in variables_nio2:
print(f2.variables[i])
print(f2.variables[i][:])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, let's consider
Step2: Now let's build a Numpy array regularly sampling this function at a density of 5 samples per mm
Step3: We can visualize this array (and thus the function f) either using a Raster, which uses the array's own integer-based coordinate system (which we will call "array" coordinates), or an Image, which uses a continuous coordinate system, or as a HeatMap labelling each value explicitly
Step4: Both the Raster and Image Element types accept the same input data and show the same arrangement of colors, but a visualization of the Raster type reveals the underlying raw array indexing, while the Image type has been labelled with the coordinate system from which we know the data has been sampled. All Image operations work with this continuous coordinate system instead, while the corresponding operations on a Raster use raw array indexing.
Step5: You can see that the Raster and the underlying .data elements both use Numpy's raw integer indexing, while the Image uses floating-point values that are then mapped onto the appropriate array element.
Step6: The image now has higher resolution, but still visualizes the same underlying continuous function, now evaluated at 100 grid positions instead of 25
Step7: The array-based indexes used by Raster and the Numpy array in .data still return the second item in the first row of the array, but this array element now corresponds to location (-0.35,0.4) in the continuous function, and so the value is different. These indexes thus do not refer to the same location in continuous space as they did for the other array density, because raw Numpy-based indexing is not independent of density or resolution.
Step8: This slice has selected those array elements whose centers are contained within the specified continuous space. To do this, the continuous coordinates are first converted by HoloViews into the floating-point range (5.125,2.250) (2.125,5.250) of array coordinates, and all those elements whose centers are in that range are selected
Step9: Hopefully these examples make it clear that if you are using data that is sampled from some underlying continuous system, you should use the continuous coordinates offered by HoloViews objects like Image so that your programs can be independent of the resolution or sampling density of that data, and so that your axes and indexes can be expressed naturally, using the actual units of the underlying continuous space. The data will still be stored in the same Numpy array, but now you can treat it consistently like the approximation to continuous values that it is.
Step10: Similarly, if we ask for the value of a given y location in continuous space, we will get a Curve with the array row closest to that y value in the Image 2D array returned as an array of $x$ values and the corresponding z value from the image
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import holoviews as hv
hv.notebook_extension()
np.set_printoptions(precision=2, linewidth=80)
%opts HeatMap (cmap="hot")
def f(x,y):
return x+y/3.1
region=(-0.5,-0.5,0.5,0.5)
def coords(bounds,samples):
l,b,r,t=bounds
hc=0.5/samples
return np.meshgrid(np.linspace(l+hc,r-hc,samples),
np.linspace(b+hc,t-hc,samples))
f5=f(*coords(region,5))
f5
r5 = hv.Raster(f5, label="R5")
i5 = hv.Image( f5, label="I5", bounds=region)
h5 = hv.HeatMap([(x, y, f5[4-y,x]) for x in range(0,5) for y in range(0,5)], label="H5")
r5+i5+h5
"r5[0,1]=%0.2f r5.data[0,1]=%0.2f i5[-0.2,0.4]=%0.2f i5[-0.24,0.37]=%0.2f i5.data[0,1]=%0.2f" % \
(r5[1,0], r5.data[0,1], i5[-0.2,0.4], i5[-0.24,0.37], i5.data[0,1])
f10=f(*coords(region,10))
f10
r10 = hv.Raster(f10, label="R10")
i10 = hv.Image(f10, label="I10", bounds=region)
r10+i10
"r10[0,1]=%0.2f r10.data[0,1]=%0.2f i10[-0.2,0.4]=%0.2f i10[-0.24,0.37]=%0.2f i10.data[0,1]=%0.2f" % \
(r10[1,0], r10.data[0,1], i10[-0.2,0.4], i10[-0.24,0.37], i10.data[0,1])
sl10=i10[-0.275:0.025,-0.0125:0.2885]
sl10.data
sl10
r5[0:3,1:3] + r5[0:3,1:2]
e10=i10.sample(x=-0.275, y=0.2885)
e10
r10=i10.sample(y=0.2885)
r10
r10.data
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import data
Step2: Train MLP using random grid search
Step3: Select best model
Step5: Best known H2O MLP for MNIST
Step7: Create Submission
|
<ASSISTANT_TASK:>
Python Code:
# imports
import h2o
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
from h2o.grid.grid_search import H2OGridSearch
# display matplotlib graphics in notebook
%matplotlib inline
# start and connect to h2o server
h2o.init()
# load clean data
path = '../data/'
# define input variable measurement levels
# strings automatically parsed as enums (nominal)
# numbers automatically parsed as numeric
col_types = {'label': 'enum'}
train = h2o.import_file(path=path + 'train.csv', col_types=col_types) # multi-threaded import
test = h2o.import_file(path=path + 'test.csv')
train.describe()
# split into 40% training, 30% validation, and 30% test
train, valid = train.split_frame([0.8])
# assign target and inputs
y = 'label'
X = [name for name in train.columns if name != y]
print(y)
print(X)
# set target to factor - for multinomial classification
train[y] = train[y].asfactor()
valid[y] = valid[y].asfactor()
# NN with random hyperparameter search
# train many different NN models with random hyperparameters
# and select best model based on validation error
# define random grid search parameters
hyper_parameters = {'hidden': [[500, 500], [250, 250, 250, 250], [1000, 500], [500, 1000], [1000, 500, 250], [1000, 1000]],
'l1':[s/1e4 for s in range(0, 1000, 100)],
'l2':[s/1e5 for s in range(0, 1000, 100)],
'input_dropout_ratio':[s/1e2 for s in range(0, 20, 2)]}
# define search strategy
search_criteria = {'strategy':'RandomDiscrete',
'max_models':100,
'max_runtime_secs':60000}
# initialize grid search
gsearch = H2OGridSearch(H2ODeepLearningEstimator,
hyper_params=hyper_parameters,
search_criteria=search_criteria)
# execute training w/ grid search
gsearch.train(x=X,
y=y,
training_frame=train,
validation_frame=valid,
activation='RectifierWithDropout',
epochs=8000,
stopping_rounds=20,
sparse=True, # handles data w/ many zeros more efficiently
ignore_const_cols=True,
adaptive_rate=True)
# view detailed results at http://host:ip/flow/index.html
# show grid search results
gsearch.show()
# select best model
mnist_model = gsearch.get_grid()[0]
# print model information
mnist_model
# hit-ratio = ((TP + TN)/(TP + TN + FP + FN)), for two-classes
best_model = H2ODeepLearningEstimator(
activation = 'RectifierWithDropout',
hidden = [1024,1024,2048],
epochs = 8000,
l1 = 1e-5,
input_dropout_ratio = 0.2,
train_samples_per_iteration = -1,
classification_stop = -1)
# create time stamp
import re
import time
time_stamp = re.sub('[: ]', '_', time.asctime())
# score unlabeled test data
sub = mnist_model.predict(test)
# save file for submission
sub = sub['predict']
import numpy as np # create ID column
sub = h2o.H2OFrame(np.arange(1, 28001)).cbind(sub)
sub.columns = ['ImageId', 'Label']
print(sub.head())
sub_fname = '../data/submission_' + str(time_stamp) + '.csv'
h2o.download_csv(sub, sub_fname)
# shutdown h2o - this will erase all your unsaved frames and models in H2O
h2o.cluster().shutdown(prompt=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 히스토그램을 그리면 다음과 같다.
Step2: 이 히스토그램에서 -0.143394 부터 0.437156 사이의 값이 전체의 약 24%를 차지하고 있음을 알 수 있다. 그럼 만약 -0.01 부터 0.01 사이의 구간에 대한 정보를 얻고 싶다면? 더 세부적인 구간에 대해 정보를 구하고 싶다면 히스토그램의 구간을 더 작게 나누어야 한다.
Step3: 정확한 묘사를 위해 구간의 수를 증가시키면 몇 가지 문제가 발생한다.
Step4: 예를 들어 어떤 데이터의 분포를 묘사하기 위해 데이터의 히스토그램을 그리거나 기술 통계 수치를 제시할 필요없이 다음과 같이 말하는 것 만으로 데이터의 분포에 대한 정보를 완벽하게 전달할 수 있다.
Step5: 이 데이터는 사실 진짜로 컴퓨터 주사위를 던져 만든 숫자이다. 이 숫자를 만드는 코드는 아래와 같다.
|
<ASSISTANT_TASK:>
Python Code:
sp.random.seed(0)
x = sp.random.normal(size=1000)
ns, bins, ps = plt.hist(x, bins=10)
plt.show()
pd.DataFrame([bins, ns/1000])
ns, bins, ps = plt.hist(x, bins=100)
plt.show()
pd.DataFrame([bins, ns/1000])
x = np.linspace(-3, 3, 100)
y = sp.stats.norm.pdf(x)
plt.plot(x, y)
plt.show()
x = np.array([5, 6, 1, 4, 4, 4, 2, 4, 6, 3, 5, 1, 1, 5, 3, 2, 1, 2, 6, 2, 6, 1, 2,
5, 4, 1, 4, 6, 1, 3, 4, 1, 2, 4, 6, 4, 4, 1, 2, 2, 2, 1, 3, 5, 4, 4,
3, 5, 3, 1, 1, 5, 6, 6, 1, 5, 2, 5, 2, 3, 3, 1, 2, 2, 2, 2, 4, 4, 3,
4, 1, 4, 6, 5, 2, 3, 5, 4, 5, 5, 5, 4, 5, 5, 5, 1, 5, 4, 3, 6, 6, 6])
np.random.seed(0)
np.random.randint(1, 7, 92)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Variables
Step2: Lists
Step3: Appending new items to a list
Step4: Loops and iteration
Step5: If / else conditionals
Step6: Functions
Step7: Dictionaries
Step8: You can iterate through dictionaries too
Step10: Strings
Step11: Strings have many built in methods
Step12: Strings are also a kind of list, and substrings can be accessed with string[start,end]
Step13: Functions
Step14: We can expand on this a bit, adding some features
Step15: Example
Step16: But there is an error! R always appears as "RR" -- so we will replace "RR" with "R"
Step17: Wordcloud library
Step18: Freqency counts
Step19: A better way to do this is
Step20: Outputting to files
Step21: Then expand this to writing a file object
|
<ASSISTANT_TASK:>
Python Code:
2 + 3
2 / 3
2 * 3
2 ** 3
num = 2 ** 3
print(num)
num
type(num)
name = "The Hacker Within"
type(name)
name + 8
name + str(8)
num_list = [0,1,2,3,4,5,6,7,8]
print(num_list)
type(num_list)
num_list[3]
num_list[3] = 10
print(num_list)
num_list.append(3)
print(num_list)
for num in num_list:
print(num)
for num in num_list:
print(num, num * num)
num_list.append("LOL")
print(num_list)
for num in num_list:
if type(num) is int or type(num) is float:
print(num, num * num)
else:
print("ERROR!", num, "is not an int")
def process_list(input_list):
for num in input_list:
if type(num) is int or type(num) is float:
print(num, num * num)
else:
print("ERROR!", num, "is not an int")
process_list(num_list)
process_list([1,3,4,14,1,9])
yearly_value = {2001: 10, 2002: 14, 2003: 18, 2004: 20}
print(yearly_value)
yearly_value = {}
yearly_value[2001] = 10
yearly_value[2002] = 14
yearly_value[2003] = 18
yearly_value[2004] = 20
print(yearly_value)
yearly_value.pop(2001)
yearly_value
yearly_value[2001] = 10213
for key, value in yearly_value.items():
print(key, value)
for key, value in yearly_value.items():
print(key, value * 1.05)
name = "the hacker within"
name_long =
~*~*~*~*~*~*~*~*~*~*~
THE HACKER WITHIN
~*~*~*~*~*~*~*~*~*~*~
print(name)
print(name_long)
print(name.upper())
print(name.split())
print(name.upper().split())
print(name[4:10])
print(name[4:])
print(name[:4])
count = 0
for character in name:
print(count, character)
count = count + 1
print(name.find('hacker'))
print(name[name.find('hacker'):])
def square_num(num):
return num * num
print(square_num(10))
print(square_num(9.1))
print(square_num(square_num(10)))
def yearly_adjustment(yearly_dict, adjustment):
for key, value in yearly_dict.items():
print(key, value * adjustment)
yearly_adjustment(yearly_value, 1.05)
def yearly_adjustment(yearly_dict, adjustment, print_values = False):
adjusted_dict = {}
for key, value in yearly_value.items():
if print_values is True:
print(key, value * adjustment)
adjusted_dict[key] = value * adjustment
return adjusted_dict
adjusted_yearly = yearly_adjustment(yearly_value, 1.05)
adjusted_yearly = yearly_adjustment(yearly_value, 1.05, print_values = True)
adjusted_yearly
!curl -o thw.txt http://stuartgeiger.com/thw.txt
# and that's how it works, that's how you get to curl
with open('thw.txt') as f:
text = f.read()
text
words = text.split()
lines = text.split("\n")
lines[0:5]
text.replace("RR", "R")
text = text.replace("RR", "R")
words = text.split()
lines = text.split("\n")
lines[0:5]
!pip install wordcloud
from wordcloud import WordCloud
wordcloud = WordCloud()
wordcloud.generate(text)
wordcloud.to_image()
wordcloud = WordCloud(width=800, height=300, prefer_horizontal=1, stopwords=None)
wordcloud.generate(text)
wordcloud.to_image()
freq_dict = {}
for word in words:
if word in freq_dict:
freq_dict[word] = freq_dict[word] + 1
else:
freq_dict[word] = 1
print(freq_dict)
freq_dict = {}
for word in words:
freq_dict[word] = freq_dict.get(word, 0) + 1
print(freq_dict)
for word, freq in sorted(freq_dict.items()):
line = word + "\t" + str(freq)
print(line)
with open("freq_dict_thw.csv", 'w') as f:
for word, freq in sorted(freq_dict.items()):
line = word + ", " + str(freq) + "\n"
f.write(line)
!head -10 freq_dict_thw.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 市区町村別の件数を集計
Step2: 成約時点別×市区町村別の件数を集計
Step3: 成約時点別×地域ブロック別の件数を集計
Step4: Histogram
Step5: 価格(自然対数)
Step6: 建築後年数
Step7: Plot
Step8: Main Analysis
Step9: 青がOLSの誤差、緑がOLSと深層学習を組み合わせた誤差。
|
<ASSISTANT_TASK:>
Python Code:
s_data = data[['P', 'S', 'L', 'R', 'A', 'RW', 'TS', 'TT']]
s_data.describe()
print(data['CITY_NAME'].value_counts())
print(data.pivot_table(index=['TDQ'], columns=['CITY_NAME']))
print(data.pivot_table(index=['TDQ'], columns=['BLOCK']))
data['P'].hist()
(np.log(data['P'])).hist()
data['A'].hist()
plt.figure(figsize=(20,8))
plt.subplot(4, 2, 1)
data['P'].hist()
plt.title(u"成約価格")
plt.subplot(4, 2, 2)
data['S'].hist()
plt.title("専有面積")
plt.subplot(4, 2, 3)
data['L'].hist()
plt.title("土地面積")
plt.subplot(4, 2, 4)
data['R'].hist()
plt.title("部屋数")
plt.subplot(4, 2, 5)
data['A'].hist()
plt.title("建築後年数")
plt.subplot(4, 2, 6)
data['RW'].hist()
plt.title("前面道路幅員")
plt.subplot(4, 2, 7)
data['TS'].hist()
plt.title("最寄駅までの距離")
plt.subplot(4, 2, 8)
data['TT'].hist()
plt.title(u"東京駅までの時間")
plt.figure(figsize=(20,8))
data['TDQ'].value_counts().plot(kind='bar')
plt.figure(figsize=(20,8))
data['CITY_NAME'].value_counts().plot(kind='bar') #市区町村別の件数
vars = ['P', 'S', 'L', 'R', 'RW', 'A', 'TS', 'TT', 'WOOD', 'SOUTH', 'CMD', 'IDD', 'FAR', 'X', 'Y']
eq = fml_build(vars)
y, X = dmatrices(eq, data=data, return_type='dataframe')
CITY_NAME = pd.get_dummies(data['CITY_NAME'])
TDQ = pd.get_dummies(data['TDQ'])
X = pd.concat((X, CITY_NAME, TDQ), axis=1)
datas = pd.concat((y, X), axis=1)
datas = datas[datas['12世田谷区'] == 1][0:5000]
datas.head()
vars = ['S', 'L', 'R', 'RW', 'A', 'TS', 'TT', 'WOOD', 'SOUTH', 'CMD', 'IDD', 'FAR']
#vars += vars + list(TDQ.columns)
class CAR(Chain):
def __init__(self, unit1, unit2, unit3, col_num):
self.unit1 = unit1
self.unit2 = unit2
self.unit3 = unit3
super(CAR, self).__init__(
l1 = L.Linear(col_num, unit1),
l2 = L.Linear(self.unit1, self.unit1),
l3 = L.Linear(self.unit1, self.unit2),
l4 = L.Linear(self.unit2, self.unit3),
l5 = L.Linear(self.unit3, self.unit3),
l6 = L.Linear(self.unit3, 1),
)
def __call__(self, x, y):
fv = self.fwd(x, y)
print(fv.data.shape)
print(y.data.shape)
loss = F.mean_squared_error(fv, y)
return loss
def fwd(self, x, y):
h1 = F.sigmoid(self.l1(x))
h2 = F.sigmoid(self.l2(h1))
h3 = F.sigmoid(self.l3(h2))
h4 = F.sigmoid(self.l4(h3))
h5 = F.sigmoid(self.l5(h4))
h6 = self.l6(h5)
return h6
class OLS_DLmodel(object):
def __init__(self, data, vars, bs=200, n=1000):
self.vars = vars
eq = fml_build(vars)
y, X = dmatrices(eq, data=datas, return_type='dataframe')
self.y_in = y[:-n]
self.X_in = X[:-n]
self.y_ex = y[-n:]
self.X_ex = X[-n:]
self.ido_in = data['X'][:-n]
self.keido_in = data['Y'][:-n]
self.ido_ex = data['X'][-n:]
self.keido_ex = data['Y'][-n:]
self.logy_in = np.log(self.y_in)
self.logy_ex = np.log(self.y_ex)
self.bs = bs
def OLS(self):
X_in = self.X_in
model = sm.OLS(self.logy_in, X_in, intercept=False)
self.reg = model.fit()
print(self.reg.summary())
def directDL(self, ite=100, bs=200, add=False):
logy_in = np.array(self.logy_in, dtype='float32')
X_in = np.array(self.X_in, dtype='float32')
y = Variable(logy_in)
x = Variable(X_in)
num, col_num = X_in.shape
if add is False:
self.model1 = CAR(15, 15, 5, col_num)
optimizer = optimizers.SGD()
optimizer.setup(self.model1)
for j in range(ite):
sffindx = np.random.permutation(num)
for i in range(0, num, bs):
x = Variable(X_in[sffindx[i:(i+bs) if (i+bs) < num else num]])
y = Variable(logy_in[sffindx[i:(i+bs) if (i+bs) < num else num]])
self.model1.zerograds()
loss = self.model1(x, y)
loss.backward()
optimizer.update()
if j % 1000 == 0:
loss_val = loss.data
print('epoch:', j)
print('train mean loss={}'.format(loss_val))
print(' - - - - - - - - - ')
y_ex = np.array(self.y_ex, dtype='float32').reshape(len(self.y_ex))
X_ex = np.array(self.X_ex, dtype='float32')
X_ex = Variable(X_ex)
logy_pred = self.model1.fwd(X_ex, X_ex).data
y_pred = np.exp(logy_pred)
error = y_ex - y_pred.reshape(len(y_pred),)
plt.hist(error[:])
def DL(self, ite=100, bs=200, add=False):
y_in = np.array(self.y_in, dtype='float32').reshape(len(self.y_in))
resid = y_in - np.exp(self.reg.predict())
resid = np.array(resid, dtype='float32').reshape(len(resid),1)
X_in = np.array(self.X_in, dtype='float32')
y = Variable(resid)
x = Variable(X_in)
num, col_num = X_in.shape
if add is False:
self.model1 = CAR(10, 10, 3, col_num)
optimizer = optimizers.Adam()
optimizer.setup(self.model1)
for j in range(ite):
sffindx = np.random.permutation(num)
for i in range(0, num, bs):
x = Variable(X_in[sffindx[i:(i+bs) if (i+bs) < num else num]])
y = Variable(resid[sffindx[i:(i+bs) if (i+bs) < num else num]])
self.model1.zerograds()
loss = self.model1(x, y)
loss.backward()
optimizer.update()
if j % 1000 == 0:
loss_val = loss.data
print('epoch:', j)
print('train mean loss={}'.format(loss_val))
print(' - - - - - - - - - ')
resid_pred = self.model1.fwd(X_in, X_in).data
y_in = np.array(self.y_in, dtype='float32').reshape(len(self.y_in))
logy_pred = np.matrix(self.X_in)*np.matrix(self.reg.params).T
self.pred = np.exp(logy_pred) + resid_pred
self.resid2 = np.array(y_in - self.pred.reshape(len(self.pred),))[0]
def predict(self):
y_ex = np.array(self.y_ex, dtype='float32').reshape(len(self.y_ex))
X_ex = np.array(self.X_ex, dtype='float32')
X_ex = Variable(X_ex)
resid_pred = self.model1.fwd(X_ex, X_ex).data
print(resid_pred[:10])
self.logy_pred = np.matrix(self.X_ex)*np.matrix(self.reg.params).T
self.error1 = np.array(y_ex - np.exp(self.logy_pred.reshape(len(self.logy_pred),)))[0]
self.pred = np.exp(self.logy_pred) + resid_pred
self.error2 = np.array(y_ex - self.pred.reshape(len(self.pred),))[0]
def compare(self):
plt.hist(self.error1)
plt.hist(self.error2)
def sar(self):
weight = ((np.array([self.ido_in]).T - np.array([self.ido_in]))**2 +
(np.array([self.keido_in]).T - np.array([self.keido_in]))**2)
for i in range(len(weight)):
N = sum(weight[i])
weight[i] = weight[i]/N
Ws = np.matrix(weight)*np.matrix(self.resid2).T
model2 = sm.OLS(np.matrix(self.resid2).T, Ws)
reg = model2.fit()
rho = reg.params
self.sigmas = []
for i in range(len(self.ido_ex)):
x = np.append(np.array(self.ido_in), np.array(self.ido_ex)[i])
y = np.append(np.array(self.keido_in), np.array(self.keido_ex)[i])
weight = ((np.array([x]).T - np.array([x]))**2 +
(np.array([y]).T - np.array([y]))**2)
N = np.array([list(np.matrix(weight)*np.matrix(np.ones(len(weight), 1)))[0] for k in range(len(weight))])
weight[j] = weight/N
res = np.append(model.resid2, 0)
Ws = np.matrix(weight)*np.matrix(res).T
sigma = rho*Ws.T
sigma = np.array(sigma)[0][-1]
self.sigmas.append(sigma)
vars = ['P', 'S', 'L', 'R', 'RW', 'A', 'TS', 'TT', 'WOOD', 'SOUTH', 'CMD', 'IDD', 'FAR', 'X', 'Y']
#vars += vars + list(TDQ.columns)
model = OLS_DLmodel(datas, vars)
model.OLS()
model.DL(ite=10, bs=200)
model.predict()
model.compare()
model.DL(30000, bs=200, add=True)
serializers.save_npz('model1(1)', model.model1)
model.DL(10000, bs=200, add=True)
serializers.save_npz('model1(2)', model.model1)
model.predict()
model.compare()
fig = plt.figure()
ax = fig.add_subplot(111)
errors = [model.error1, model.error2]
bp = ax.boxplot(errors)
plt.grid()
plt.ylim([-5000,5000])
plt.title('分布の箱ひげ図')
plt.show()
print(np.mean(model.error1))
print(np.mean(model.error2))
print(np.var(model.error1))
print(np.var(model.error2))
model.sar()
error2 = (model.error2[:741] - np.array(model.sigmas))
fig = plt.figure()
ax = fig.add_subplot(111)
errors = [model.error1, error2]
bp = ax.boxplot(errors)
plt.grid()
plt.ylim([-5000,5000])
plt.title('分布の箱ひげ図')
plt.show()
print(np.mean(model.error1))
print(np.mean(error2))
print(np.mean(np.abs(model.error1)))
print(np.mean(np.abs(error2)))
print(max(np.abs(model.error1)))
print(max(np.abs(error2)))
print(np.var(model.error1))
print(np.var(error2))
X = model.X_ex['X'].values
Y = model.X_ex['Y'].values
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig=plt.figure()
ax=Axes3D(fig)
ax.scatter3D(X[:741], Y[:741], error2)
plt.show()
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig=plt.figure()
ax=Axes3D(fig)
ax.scatter3D(X, Y, model.error2)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.tri as mtri
#============
# First plot
#============
# Plot the surface. The triangles in parameter space determine which x, y, z
# points are connected by an edge.
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.plot_trisurf(X, Y, e)
ax.set_zlim(-1, 1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports, logging, and data
Step2: Creating a new CollaborativeFiltering object with data
Step3: Parameters of the collaborative filtering algorithm
Step4: It has the same meaning as in the baseline recommendation
Step5: Indeed, collaborative filtering cannot necessarily provide recommendations for all customers. Specifically, it fails to do so if the customer in question only bought articles that no other customer has bought. For these cases, we need a fallback solution, which is provided by the algorithm specified through the baseline attribute. As you can see, that algorithm is currently a Baseline instance. We could, of course, also provide the baseline algorithm manually.
Step6: More about that later. There is one more paramter to be explored first.
Step7: In short, collaborative filtering (as it is implemented in bestPy) works by recommending articles that are most similar to the articles the target customer has already bought. What exactly similar means, however, is not set in stone and quite a few similarity measures are available.
Step8: And that's it for the parameters of the collaborative filtering algorithm.
Step9: And, voilà, your recommendation. Again, a higher number means that the article with the same index as that number is more highly recommended for the target customer.
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('../..')
from bestPy import write_log_to
from bestPy.datastructures import Transactions
from bestPy.algorithms import Baseline, CollaborativeFiltering # Additionally import CollaborativeFiltering
logfile = 'logfile.txt'
write_log_to(logfile, 20)
file = 'examples_data.csv'
data = Transactions.from_csv(file)
recommendation = CollaborativeFiltering().operating_on(data)
recommendation.has_data
recommendation.binarize
recommendation.baseline
recommendation.baseline = Baseline()
recommendation.baseline
recommendation.similarity
from bestPy.algorithms.similarities import dice, jaccard, sokalsneath, russellrao, cosine, cosine_binary
recommendation.similarity = dice
recommendation.similarity
customer = data.user.index_of['5']
recommendation.for_one(customer)
customer = data.user.index_of['4']
recommendation.for_one(customer)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.3 search
Step2: search 函数将返回字符串开始模式首次出现的位置
Step3: 2.4 匹配多个字符串
Step4: 2.5 匹配任意单个字符(.)
Step5: 2.6 创建字符集合([ ])
Step6: 2.7 分组
Step7: 2.7.2 分组表示
Step8: 2.8 字符串开头或者单词边界
Step9: 2.8.2 单词边界
Step10: 2.9 find 模块
Step11: 2.10 sub()和subn()函数
Step12: 2.11 split分割
Step13: 3 搜索和匹配的比较,“贪婪”匹配
Step14: 由于通配符“.”默认贪心的,所以'.+'将会匹配尽可能多的字符,所以
|
<ASSISTANT_TASK:>
Python Code:
import re
m = re.match('foo', 'foo')
if m is not None: m.group()
m
m = re.match('foo', 'bar')
if m is not None: m.group()
re.match('foo', 'foo on the table').group()
# raise attributeError
re.match('bar', 'foo on the table').group()
m = re.match('foo','seafood')
if m is not None: m.group()
re.search('foo', 'seafood').group()
bt = 'bat|bet|bit'
re.match(bt,'bat').group()
re.match(bt, 'blt').group()
re.match(bt, 'He bit me!').group()
re.search(bt, 'He bit me!').group()
anyend='.end'
re.match(anyend, 'bend').group()
re.match(anyend, 'end').group()
re.search(anyend, '\nend').group()
pattern = '[cr][23][dp][o2]'
re.match(pattern, 'c3po').group()
re.match(pattern, 'c3do').group()
re.match('r2d2|c3po', 'c2do').group()
re.match('r2d2|c3po', 'r2d2').group()
patt = '\w+@(\w+\.)?\w+\.com'
re.match(patt, 'nobady@xxx.com').group()
re.match(patt, 'nobody@www.xxx.com').group()
# 匹配多个子域名
patt = '\w+@(\w+\.)*\w+\.com'
re.match(patt, 'nobody@www.xxx.yyy.zzz.com').group()
patt = '(\w\w\w)-(\d\d\d)'
m = re.match(patt, 'abc-123')
m.group()
m.group(1)
m.group(2)
m.groups()
m = re.match('ab', 'ab')
m.group()
m.groups()
m = re.match('(ab)','ab')
m.groups()
m.group(1)
m = re.match('(a(b))', 'ab')
m.group()
m.group(1)
m.group(2)
m.groups()
re.match('^The', 'The end.').group()
# raise attributeError
re.match('^The', 'end. The').group()
re.search(r'\bthe', 'bite the dog').group()
re.search(r'\bthe', 'bitethe dog').group()
re.search(r'\Bthe', 'bitthe dog').group()
re.findall('car', 'car')
re.findall('car', 'scary')
re.findall('car', 'carry, the barcardi to the car')
(re.sub('X', 'Mr. Smith', 'attn: X\n\nDear X, \n'))
print re.subn('X', 'Mr. Smith', 'attn: X\n\nDear X, \n')
re.sub('[ae]', 'X', 'abcdedf')
re.split(':','str1:str2:str3')
from os import popen
from re import split
f = popen('who', 'r')
for eachLine in f.readlines():
print split('\s\s+|\t', eachLine.strip())
f.close()
string = 'Thu Feb 15 17:46:04 2007::gaufung@cumt.edu.cn::1171590364-6-8'
patt = '.+\d+-\d+-\d+'
re.match(patt, string).group()
patt = '.+(\d+-\d+-\d+)'
re.match(patt, string).group(1)
patt = '.+?(\d+-\d+-\d+)'
re.match(patt, string).group(1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in train & val data
Step2: Extract X and Y matrices
Step4: Convert to SystemML Matrices
Step6: Trigger Caching (Optional)
Step8: Save Matrices (Optional)
Step10: Softmax Classifier
Step12: Train
Step14: Eval
Step16: LeNet-like ConvNet
Step18: Hyperparameter Search
Step20: Train
Step22: Eval
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
import matplotlib.pyplot as plt
import numpy as np
from pyspark.sql.functions import col, max
import systemml # pip3 install systemml
from systemml import MLContext, dml
plt.rcParams['figure.figsize'] = (10, 6)
ml = MLContext(sc)
# Settings
size=256
grayscale = False
c = 1 if grayscale else 3
p = 0.01
folder = "data"
if p < 1:
tr_filename = os.path.join(folder, "train_{}_sample_{}{}.parquet".format(p, size, "_grayscale" if grayscale else ""))
val_filename = os.path.join(folder, "val_{}_sample_{}{}.parquet".format(p, size, "_grayscale" if grayscale else ""))
else:
tr_filename = os.path.join(folder, "train_{}{}.parquet".format(size, "_grayscale" if grayscale else ""))
val_filename = os.path.join(folder, "val_{}{}.parquet".format(size, "_grayscale" if grayscale else ""))
train_df = spark.read.load(tr_filename)
val_df = spark.read.load(val_filename)
train_df, val_df
tc = train_df.count()
vc = val_df.count()
tc, vc, tc + vc
train_df.select(max(col("__INDEX"))).show()
train_df.groupBy("tumor_score").count().show()
val_df.groupBy("tumor_score").count().show()
# Note: Must use the row index column, or X may not
# necessarily correspond correctly to Y
X_df = train_df.select("__INDEX", "sample")
X_val_df = val_df.select("__INDEX", "sample")
y_df = train_df.select("__INDEX", "tumor_score")
y_val_df = val_df.select("__INDEX", "tumor_score")
X_df, X_val_df, y_df, y_val_df
script =
# Scale images to [-1,1]
X = X / 255
X_val = X_val / 255
X = X * 2 - 1
X_val = X_val * 2 - 1
# One-hot encode the labels
num_tumor_classes = 3
n = nrow(y)
n_val = nrow(y_val)
Y = table(seq(1, n), y, n, num_tumor_classes)
Y_val = table(seq(1, n_val), y_val, n_val, num_tumor_classes)
outputs = ("X", "X_val", "Y", "Y_val")
script = dml(script).input(X=X_df, X_val=X_val_df, y=y_df, y_val=y_val_df).output(*outputs)
X, X_val, Y, Y_val = ml.execute(script).get(*outputs)
X, X_val, Y, Y_val
# script =
# # Trigger conversions and caching
# # Note: This may take a while, but will enable faster iteration later
# print(sum(X))
# print(sum(Y))
# print(sum(X_val))
# print(sum(Y_val))
#
# script = dml(script).input(X=X, X_val=X_val, Y=Y, Y_val=Y_val)
# ml.execute(script)
# script =
# write(X, "data/X_"+p+"_sample_binary", format="binary")
# write(Y, "data/Y_"+p+"_sample_binary", format="binary")
# write(X_val, "data/X_val_"+p+"_sample_binary", format="binary")
# write(Y_val, "data/Y_val_"+p+"_sample_binary", format="binary")
#
# script = dml(script).input(X=X, X_val=X_val, Y=Y, Y_val=Y_val, p=p)
# ml.execute(script)
script =
source("softmax_clf.dml") as clf
# Hyperparameters & Settings
lr = 1e-2 # learning rate
mu = 0.9 # momentum
decay = 0.999 # learning rate decay constant
batch_size = 32
epochs = 500
log_interval = 1
n = 200 # sample size for overfitting sanity check
# Train
[W, b] = clf::train(X[1:n,], Y[1:n,], X[1:n,], Y[1:n,], lr, mu, decay, batch_size, epochs, log_interval)
outputs = ("W", "b")
script = dml(script).input(X=X, Y=Y, X_val=X_val, Y_val=Y_val).output(*outputs)
W, b = ml.execute(script).get(*outputs)
W, b
script =
source("softmax_clf.dml") as clf
# Hyperparameters & Settings
lr = 5e-7 # learning rate
mu = 0.5 # momentum
decay = 0.999 # learning rate decay constant
batch_size = 32
epochs = 1
log_interval = 10
# Train
[W, b] = clf::train(X, Y, X_val, Y_val, lr, mu, decay, batch_size, epochs, log_interval)
outputs = ("W", "b")
script = dml(script).input(X=X, Y=Y, X_val=X_val, Y_val=Y_val).output(*outputs)
W, b = ml.execute(script).get(*outputs)
W, b
script =
source("softmax_clf.dml") as clf
# Eval
probs = clf::predict(X, W, b)
[loss, accuracy] = clf::eval(probs, Y)
probs_val = clf::predict(X_val, W, b)
[loss_val, accuracy_val] = clf::eval(probs_val, Y_val)
outputs = ("loss", "accuracy", "loss_val", "accuracy_val")
script = dml(script).input(X=X, Y=Y, X_val=X_val, Y_val=Y_val, W=W, b=b).output(*outputs)
loss, acc, loss_val, acc_val = ml.execute(script).get(*outputs)
loss, acc, loss_val, acc_val
script =
source("convnet.dml") as clf
# Hyperparameters & Settings
lr = 1e-2 # learning rate
mu = 0.9 # momentum
decay = 0.999 # learning rate decay constant
lambda = 0 #5e-04
batch_size = 32
epochs = 300
log_interval = 1
dir = "models/lenet-cnn/sanity/"
n = 200 # sample size for overfitting sanity check
# Train
[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] = clf::train(X[1:n,], Y[1:n,], X[1:n,], Y[1:n,], C, Hin, Win, lr, mu, decay, lambda, batch_size, epochs, log_interval, dir)
outputs = ("Wc1", "bc1", "Wc2", "bc2", "Wc3", "bc3", "Wa1", "ba1", "Wa2", "ba2")
script = (dml(script).input(X=X, X_val=X_val, Y=Y, Y_val=Y_val,
C=c, Hin=size, Win=size)
.output(*outputs))
Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = ml.execute(script).get(*outputs)
Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2
script =
source("convnet.dml") as clf
dir = "models/lenet-cnn/hyperparam-search/"
# TODO: Fix `parfor` so that it can be efficiently used for hyperparameter tuning
j = 1
while(j < 2) {
#parfor(j in 1:10000, par=6) {
# Hyperparameter Sampling & Settings
lr = 10 ^ as.scalar(rand(rows=1, cols=1, min=-7, max=-1)) # learning rate
mu = as.scalar(rand(rows=1, cols=1, min=0.5, max=0.9)) # momentum
decay = as.scalar(rand(rows=1, cols=1, min=0.9, max=1)) # learning rate decay constant
lambda = 10 ^ as.scalar(rand(rows=1, cols=1, min=-7, max=-1)) # regularization constant
batch_size = 32
epochs = 1
log_interval = 10
trial_dir = dir + "j/"
# Train
[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] = clf::train(X, Y, X_val, Y_val, C, Hin, Win, lr, mu, decay, lambda, batch_size, epochs, log_interval, trial_dir)
# Eval
#probs = clf::predict(X, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
#[loss, accuracy] = clf::eval(probs, Y)
probs_val = clf::predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
[loss_val, accuracy_val] = clf::eval(probs_val, Y_val)
# Save hyperparams
str = "lr: " + lr + ", mu: " + mu + ", decay: " + decay + ", lambda: " + lambda + ", batch_size: " + batch_size
name = dir + accuracy_val + "," + j #+","+accuracy+","+j
write(str, name)
j = j + 1
}
script = (dml(script).input(X=X, X_val=X_val, Y=Y, Y_val=Y_val, C=c, Hin=size, Win=size))
ml.execute(script)
script =
source("convnet.dml") as clf
# Hyperparameters & Settings
lr = 0.00205 # learning rate
mu = 0.632 # momentum
decay = 0.99 # learning rate decay constant
lambda = 0.00385
batch_size = 32
epochs = 1
log_interval = 10
dir = "models/lenet-cnn/train/"
# Train
[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] =
clf::train(X, Y, X_val, Y_val, C, Hin, Win, lr, mu, decay,
lambda, batch_size, epochs, log_interval, dir)
outputs = ("Wc1", "bc1", "Wc2", "bc2", "Wc3", "bc3",
"Wa1", "ba1", "Wa2", "ba2")
script = (dml(script).input(X=X, X_val=X_val, Y=Y, Y_val=Y_val,
C=c, Hin=size, Win=size)
.output(*outputs))
outs = ml.execute(script).get(*outputs)
Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = outs
script =
source("convnet.dml") as clf
# Eval
probs = clf::predict(X, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
[loss, accuracy] = clf::eval(probs, Y)
probs_val = clf::predict(X_val, C, Hin, Win, Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2)
[loss_val, accuracy_val] = clf::eval(probs_val, Y_val)
outputs = ("loss", "accuracy", "loss_val", "accuracy_val")
script = (dml(script).input(X=X, X_val=X_val, Y=Y, Y_val=Y_val,
C=c, Hin=size, Win=size,
Wc1=Wc1, bc1=bc1,
Wc2=Wc2, bc2=bc2,
Wc3=Wc3, bc3=bc3,
Wa1=Wa1, ba1=ba1,
Wa2=Wa2, ba2=ba2)
.output(*outputs))
loss, acc, loss_val, acc_val = ml.execute(script).get(*outputs)
loss, acc, loss_val, acc_val
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the command in the cell below to install gcsfs package.
Step2: Prepare lab dataset
Step3: Next, create the BigQuery dataset and upload the Covertype csv data into a table.
Step4: Configure environment settings
Step5: HINT
Step6: Explore the Covertype dataset
Step7: Create training and validation splits
Step8: Use the bq extract command to export the BigQuery training table to GCS at $TRAINING_FILE_PATH.
Step9: Create a validation split
Step10: Develop a training application
Step11: Convert all numeric features to float64
Step12: Run the pipeline locally.
Step13: Calculate the trained model's accuracy.
Step14: Prepare the hyperparameter tuning application.
Step15: Write the tuning script.
Step16: Package the script into a docker image.
Step17: Build the docker image.
Step18: Submit an AI Platform hyperparameter tuning job
Step19: Start the hyperparameter tuning job.
Step20: Monitor the job.
Step21: NOTE
Step22: The returned run results are sorted by a value of the optimization metric. The best run is the first item on the returned list.
Step23: Retrain the model with the best hyperparameters
Step24: NOTE
Step25: Deploy the model to AI Platform Prediction
Step26: Create a model version
Step27: Serve predictions
Step28: Invoke the model
|
<ASSISTANT_TASK:>
Python Code:
import json
import os
import numpy as np
import pandas as pd
import pickle
import uuid
import time
import tempfile
from googleapiclient import discovery
from googleapiclient import errors
from google.cloud import bigquery
from jinja2 import Template
from kfp.components import func_to_container_op
from typing import NamedTuple
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
%pip install gcsfs==0.8
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
DATASET_ID='covertype_dataset'
DATASET_LOCATION='US'
TABLE_ID='covertype'
DATA_SOURCE='gs://workshop-datasets/covertype/small/dataset.csv'
SCHEMA='Elevation:INTEGER,Aspect:INTEGER,Slope:INTEGER,Horizontal_Distance_To_Hydrology:INTEGER,Vertical_Distance_To_Hydrology:INTEGER,Horizontal_Distance_To_Roadways:INTEGER,Hillshade_9am:INTEGER,Hillshade_Noon:INTEGER,Hillshade_3pm:INTEGER,Horizontal_Distance_To_Fire_Points:INTEGER,Wilderness_Area:STRING,Soil_Type:STRING,Cover_Type:INTEGER'
!bq --location=$DATASET_LOCATION --project_id=$PROJECT_ID mk --dataset $DATASET_ID
!bq --project_id=$PROJECT_ID --dataset_id=$DATASET_ID load \
--source_format=CSV \
--skip_leading_rows=1 \
--replace \
$TABLE_ID \
$DATA_SOURCE \
$SCHEMA
!gsutil ls
REGION = 'us-central1'
ARTIFACT_STORE = 'gs://qwiklabs-gcp-xx-xxxxxxx-kubeflowpipelines-default' # TO DO: REPLACE WITH YOUR ARTIFACT_STORE NAME
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
DATA_ROOT='{}/data'.format(ARTIFACT_STORE)
JOB_DIR_ROOT='{}/jobs'.format(ARTIFACT_STORE)
TRAINING_FILE_PATH='{}/{}/{}'.format(DATA_ROOT, 'training', 'dataset.csv')
VALIDATION_FILE_PATH='{}/{}/{}'.format(DATA_ROOT, 'validation', 'dataset.csv')
%%bigquery
SELECT *
FROM `covertype_dataset.covertype`
!bq query \
-n 0 \
--destination_table covertype_dataset.training \
--replace \
--use_legacy_sql=false \
'SELECT * \
FROM `covertype_dataset.covertype` AS cover \
WHERE \
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), 10) IN (1, 2, 3, 4)'
!bq extract \
--destination_format CSV \
covertype_dataset.training \
$TRAINING_FILE_PATH
!bq query \
-n 0 \
--destination_table covertype_dataset.validation \
--replace \
--use_legacy_sql=false \
'SELECT * \
FROM `covertype_dataset.covertype` AS cover \
WHERE \
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), 10) IN (8)'
!bq extract \
--destination_format CSV \
covertype_dataset.validation \
$VALIDATION_FILE_PATH
df_train = pd.read_csv(TRAINING_FILE_PATH)
df_validation = pd.read_csv(VALIDATION_FILE_PATH)
print(df_train.shape)
print(df_validation.shape)
numeric_feature_indexes = slice(0, 10)
categorical_feature_indexes = slice(10, 12)
preprocessor = ColumnTransformer(
transformers=[
('num', StandardScaler(), numeric_feature_indexes),
('cat', OneHotEncoder(), categorical_feature_indexes)
])
pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', SGDClassifier(loss='log', tol=1e-3))
])
num_features_type_map = {feature: 'float64' for feature in df_train.columns[numeric_feature_indexes]}
df_train = df_train.astype(num_features_type_map)
df_validation = df_validation.astype(num_features_type_map)
X_train = df_train.drop('Cover_Type', axis=1)
y_train = df_train['Cover_Type']
X_validation = df_validation.drop('Cover_Type', axis=1)
y_validation = df_validation['Cover_Type']
pipeline.set_params(classifier__alpha=0.001, classifier__max_iter=200)
pipeline.fit(X_train, y_train)
accuracy = pipeline.score(X_validation, y_validation)
print(accuracy)
TRAINING_APP_FOLDER = 'training_app'
os.makedirs(TRAINING_APP_FOLDER, exist_ok=True)
%%writefile {TRAINING_APP_FOLDER}/train.py
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import fire
import pickle
import numpy as np
import pandas as pd
import hypertune
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
def train_evaluate(job_dir, training_dataset_path, validation_dataset_path, alpha, max_iter, hptune):
df_train = pd.read_csv(training_dataset_path)
df_validation = pd.read_csv(validation_dataset_path)
if not hptune:
df_train = pd.concat([df_train, df_validation])
numeric_feature_indexes = slice(0, 10)
categorical_feature_indexes = slice(10, 12)
preprocessor = ColumnTransformer(
transformers=[
('num', StandardScaler(), numeric_feature_indexes),
('cat', OneHotEncoder(), categorical_feature_indexes)
])
pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', SGDClassifier(loss='log',tol=1e-3))
])
num_features_type_map = {feature: 'float64' for feature in df_train.columns[numeric_feature_indexes]}
df_train = df_train.astype(num_features_type_map)
df_validation = df_validation.astype(num_features_type_map)
print('Starting training: alpha={}, max_iter={}'.format(alpha, max_iter))
X_train = df_train.drop('Cover_Type', axis=1)
y_train = df_train['Cover_Type']
pipeline.set_params(classifier__alpha=alpha, classifier__max_iter=max_iter)
pipeline.fit(X_train, y_train)
if hptune:
X_validation = df_validation.drop('Cover_Type', axis=1)
y_validation = df_validation['Cover_Type']
accuracy = pipeline.score(X_validation, y_validation)
print('Model accuracy: {}'.format(accuracy))
# Log it with hypertune
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=accuracy
)
# Save the model
if not hptune:
model_filename = 'model.pkl'
with open(model_filename, 'wb') as model_file:
pickle.dump(pipeline, model_file)
gcs_model_path = "{}/{}".format(job_dir, model_filename)
subprocess.check_call(['gsutil', 'cp', model_filename, gcs_model_path], stderr=sys.stdout)
print("Saved model in: {}".format(gcs_model_path))
if __name__ == "__main__":
fire.Fire(train_evaluate)
%%writefile {TRAINING_APP_FOLDER}/Dockerfile
FROM gcr.io/deeplearning-platform-release/base-cpu
RUN pip install -U fire cloudml-hypertune scikit-learn==0.20.4 pandas==0.24.2
WORKDIR /app
COPY train.py .
ENTRYPOINT ["python", "train.py"]
IMAGE_NAME='trainer_image'
IMAGE_TAG='latest'
IMAGE_URI='gcr.io/{}/{}:{}'.format(PROJECT_ID, IMAGE_NAME, IMAGE_TAG)
!gcloud builds submit --tag $IMAGE_URI $TRAINING_APP_FOLDER
%%writefile {TRAINING_APP_FOLDER}/hptuning_config.yaml
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
trainingInput:
hyperparameters:
goal: MAXIMIZE
maxTrials: 4
maxParallelTrials: 4
hyperparameterMetricTag: accuracy
enableTrialEarlyStopping: TRUE
params:
- parameterName: max_iter
type: DISCRETE
discreteValues: [
200,
500
]
- parameterName: alpha
type: DOUBLE
minValue: 0.00001
maxValue: 0.001
scaleType: UNIT_LINEAR_SCALE
JOB_NAME = "JOB_{}".format(time.strftime("%Y%m%d_%H%M%S"))
JOB_DIR = "{}/{}".format(JOB_DIR_ROOT, JOB_NAME)
SCALE_TIER = "BASIC"
!gcloud ai-platform jobs submit training $JOB_NAME \
--region=$REGION \
--job-dir=$JOB_DIR \
--master-image-uri=$IMAGE_URI \
--scale-tier=$SCALE_TIER \
--config $TRAINING_APP_FOLDER/hptuning_config.yaml \
-- \
--training_dataset_path=$TRAINING_FILE_PATH \
--validation_dataset_path=$VALIDATION_FILE_PATH \
--hptune
!gcloud ai-platform jobs describe $JOB_NAME
!gcloud ai-platform jobs stream-logs $JOB_NAME
ml = discovery.build('ml', 'v1')
job_id = 'projects/{}/jobs/{}'.format(PROJECT_ID, JOB_NAME)
request = ml.projects().jobs().get(name=job_id)
try:
response = request.execute()
except errors.HttpError as err:
print(err)
except:
print("Unexpected error")
response
response['trainingOutput']['trials'][0]
alpha = response['trainingOutput']['trials'][0]['hyperparameters']['alpha']
max_iter = response['trainingOutput']['trials'][0]['hyperparameters']['max_iter']
JOB_NAME = "JOB_{}".format(time.strftime("%Y%m%d_%H%M%S"))
JOB_DIR = "{}/{}".format(JOB_DIR_ROOT, JOB_NAME)
SCALE_TIER = "BASIC"
!gcloud ai-platform jobs submit training $JOB_NAME \
--region=$REGION \
--job-dir=$JOB_DIR \
--master-image-uri=$IMAGE_URI \
--scale-tier=$SCALE_TIER \
-- \
--training_dataset_path=$TRAINING_FILE_PATH \
--validation_dataset_path=$VALIDATION_FILE_PATH \
--alpha=$alpha \
--max_iter=$max_iter \
--nohptune
!gcloud ai-platform jobs stream-logs $JOB_NAME
!gsutil ls $JOB_DIR
model_name = 'forest_cover_classifier'
labels = "task=classifier,domain=forestry"
!gcloud ai-platform models create $model_name \
--regions=$REGION \
--labels=$labels
model_version = 'v01'
!gcloud ai-platform versions create {model_version} \
--model={model_name} \
--origin=$JOB_DIR \
--runtime-version=1.15 \
--framework=scikit-learn \
--python-version=3.7\
--region global
input_file = 'serving_instances.json'
with open(input_file, 'w') as f:
for index, row in X_validation.head().iterrows():
f.write(json.dumps(list(row.values)))
f.write('\n')
!cat $input_file
!gcloud ai-platform predict \
--model $model_name \
--version $model_version \
--json-instances $input_file\
--region global
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Case 1
Step2: Case 2
Step3: 2. Vectorization
Step4: 3. Slicing and reshape
Step5: Array shape manipulation
Step6: Exercise
Step7: 2. Create a structured array representing a position (x,y) and a color (r,g,b)
Step8: 3. Consider a large vector Z, compute Z to the power of 3 using 2 different methods
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array(range(10000000))
b = np.array(range(9999999,-1,-1))
%%time
a = a + b
a = np.array(range(10000000))
b = np.array(range(9999999,-1,-1))
%%time
a +=b
#Apply function to a complete array instead of writing loop to iterate over all elements of the array.
#This is called vectorization. The opposite of vectorization (for loops) is known as the scalar implementation
def f(x):
return x*np.exp(4)
print(f(a))
x = np.array(range(100))
x[1:-1] # picks out all elements except the first and the last, but contrary to lists, a[1:-1] is not a copy of the data in a.
x[0:-1:2] # picks out every two elements up to, but not including, the last element, while
x[::4] # picks out every four elements in the whole array.
a = np.linspace(-1, 1, 6)
print (a)
a.shape
a.size
# rows, columns
a.shape = (2, 3)
a = a.reshape(2, 3) # alternative
a.shape
print (a)
# len(a) always returns the length of the first dimension of an array. -> no. of rows
Z = np.ones((10,10))
Z[1:-1,1:-1] = 0
print(Z)
Z = np.zeros(10, [ ('position', [ ('x', float, 1),
('y', float, 1)]),
('color', [ ('r', float, 1),
('g', float, 1),
('b', float, 1)])])
print(Z)
x = np.random.rand(5e7)
%timeit np.power(x,3)
%timeit x*x*x
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: With our three materials, we will set up two universes that represent pin-cells
Step2: Now let's create a hexagonal lattice using the HexLattice class
Step3: We need to set the center of the lattice, the pitch, an outer universe (which is applied to all lattice elements outside of those that are defined), and a list of universes. Let's start with the easy ones first. Note that for a 2D lattice, we only need to specify a single number for the pitch.
Step4: Now we need to set the universes property on our lattice. It needs to be set to a list of lists of Universes, where each list of Universes corresponds to a ring of the lattice. The rings are ordered from outermost to innermost, and within each ring the indexing starts at the "top". To help visualize the proper indices, we can use the show_indices() helper method.
Step5: Let's set up a lattice where the first element in each ring is the big pin universe and all other elements are regular pin universes.
Step6: We can now assign the rings (and the universes they contain) to our lattice.
Step7: Now let's put our lattice inside a circular cell that will serve as the top-level cell for our geometry.
Step8: Now let's create a plot to see what our geometry looks like.
Step9: At this point, if we wanted to simulate the model, we would need to create an instance of openmc.Settings, export it to XML, and run.
Step10: When we change the orientation to 'x', you can see that the first universe in each ring starts to the right along the x-axis. As before, the universes are defined in a clockwise fashion around each ring. To see the proper indices for a hexagonal lattice in this orientation, we can again call show_indices but pass an extra orientation argument
Step11: Hexagonal prisms
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import openmc
fuel = openmc.Material(name='fuel')
fuel.add_nuclide('U235', 1.0)
fuel.set_density('g/cm3', 10.0)
fuel2 = openmc.Material(name='fuel2')
fuel2.add_nuclide('U238', 1.0)
fuel2.set_density('g/cm3', 10.0)
water = openmc.Material(name='water')
water.add_nuclide('H1', 2.0)
water.add_nuclide('O16', 1.0)
water.set_density('g/cm3', 1.0)
materials = openmc.Materials((fuel, fuel2, water))
materials.export_to_xml()
r_pin = openmc.ZCylinder(r=0.25)
fuel_cell = openmc.Cell(fill=fuel, region=-r_pin)
water_cell = openmc.Cell(fill=water, region=+r_pin)
pin_universe = openmc.Universe(cells=(fuel_cell, water_cell))
r_big_pin = openmc.ZCylinder(r=0.5)
fuel2_cell = openmc.Cell(fill=fuel2, region=-r_big_pin)
water2_cell = openmc.Cell(fill=water, region=+r_big_pin)
big_pin_universe = openmc.Universe(cells=(fuel2_cell, water2_cell))
all_water_cell = openmc.Cell(fill=water)
outer_universe = openmc.Universe(cells=(all_water_cell,))
lattice = openmc.HexLattice()
lattice.center = (0., 0.)
lattice.pitch = (1.25,)
lattice.outer = outer_universe
print(lattice.show_indices(num_rings=4))
outer_ring = [big_pin_universe] + [pin_universe]*17 # Adds up to 18
ring_1 = [big_pin_universe] + [pin_universe]*11 # Adds up to 12
ring_2 = [big_pin_universe] + [pin_universe]*5 # Adds up to 6
inner_ring = [big_pin_universe]
lattice.universes = [outer_ring,
ring_1,
ring_2,
inner_ring]
print(lattice)
outer_surface = openmc.ZCylinder(r=5.0, boundary_type='vacuum')
main_cell = openmc.Cell(fill=lattice, region=-outer_surface)
geometry = openmc.Geometry([main_cell])
geometry.export_to_xml()
plot = openmc.Plot.from_geometry(geometry)
plot.color_by = 'material'
plot.colors = colors = {
water: 'blue',
fuel: 'olive',
fuel2: 'yellow'
}
plot.to_ipython_image()
# Change the orientation of the lattice and re-export the geometry
lattice.orientation = 'x'
geometry.export_to_xml()
# Run OpenMC in plotting mode
plot.to_ipython_image()
print(lattice.show_indices(4, orientation='x'))
main_cell.region = openmc.model.hexagonal_prism(
edge_length=4*lattice.pitch[0],
orientation='x',
boundary_type='vacuum'
)
geometry.export_to_xml()
# Run OpenMC in plotting mode
plot.color_by = 'cell'
plot.to_ipython_image()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bar from multiple values
Step2: Simplified input using same order
Step3: Operations on Composite Glyphs
Step4: Stacked
Step5: Producing Combined Data Source
Step6: Standalone Use of Composite Glyphs
Step7: Build Bars and Add Them Manually to Chart
Step8: Stack the Bars, then Show Chart
|
<ASSISTANT_TASK:>
Python Code:
bar = BarGlyph(label='a', values=[1])
bar.data
bar = BarGlyph(label='a', values=[1, 2, 3, 4])
bar.data
bar = BarGlyph('a', 1)
bar.data
bar1 = BarGlyph('foo', 1)
bar2 = BarGlyph('foo', 2)
print('No stacking')
print('bar1 y: %s, bar2 y: %s' % (bar1.data['y'], bar2.data['y']) )
from bokeh.charts.operations import stack
bar1, bar2 = stack(bar1, bar2)
print('With Stacking')
print('bar1 y: %s, bar2 y: %s' % (bar1.data['y'], bar2.data['y']) )
from bokeh.charts.utils import comp_glyphs_to_df
# utility that uses pandas.concat to concatenate each CompositeGlyph.df
comp_glyphs_to_df(bar1, bar2)
from bokeh.charts.chart import Chart
from bokeh.models.ranges import DataRange1d, FactorRange
from bokeh.io import curdoc, curstate
def add_chart_to_doc(chart):
"Handle adding chart to doc."
curdoc()._current_plot = chart
if curstate().autoadd:
curdoc().add_root(chart)
# two bars overlap on the same label/index
bar1 = BarGlyph(label='foo', values=[1])
bar2 = BarGlyph('foo', 2)
# only the third bar doesn't overlap
bar3 = BarGlyph('bar', 3)
# composite glyphs can have multiple renderers, so we get them all
renderers = []
for bar in [bar1, bar2, bar3]:
renderers += bar.renderers
# create a chart and directly add the renderers
c = Chart(renderers=renderers)
# add ranges/scales (typically handled by builder)
c.add_ranges('x', FactorRange(factors=['foo', 'bar']))
c.add_ranges('y', DataRange1d(start=0, end=4))
c.add_scales('x', 'auto')
c.add_scales('y', 'auto')
# build the chart (typically called by create_and_build)
c.start_plot()
# add chart to doc (typically handled by create_and_build)
add_chart_to_doc(c)
show(c)
stack(bar1, bar2, bar3)
show(c)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In order to use wget inside a Python program, you have to install it with pip
Step2: Then, you have to import the wget package
Step3: Now, let's develop a python function to be able to give the parameters of download. The code below is not complete and probably won't work under general circumstances, but the intention of it is simply to demonstrate the very first steps. The arguments of the function will be the following
Step4: Now, check out the content of you folder in colab.
Step5: Yesterday as an object can be defined using the timedelta, as follows
Step6: Finally, you can download the data for the yesterday object.
Step7: Unzip files in python
|
<ASSISTANT_TASK:>
Python Code:
!wget ftp://igs.bkg.bund.de/EUREF/BRDC/2022/033/BRDC00WRD_R_20220330000_01D_MN.rnx.gz
pip install wget
import wget
from datetime import date
def download_rinex_navigation(ftp, rpath, my_date):
doy = my_date.strftime('%j')
year = my_date.strftime('%Y')
url = ftp + rpath + year + '/' + doy + '/' + 'BRDC00WRD_R_' + year + doy + '0000_01D_MN.rnx.gz'
try:
name = wget.download(url)
except URLError:
return None
return name
my_date = date(2022, 2, 2)
print(my_date)
download_rinex_navigation('ftp://igs.bkg.bund.de/', 'EUREF/BRDC/', my_date)
from datetime import date, timedelta
yesterday = date.today() - timedelta(days=1)
print(yesterday)
download_rinex_navigation('ftp://igs.bkg.bund.de/', 'EUREF/BRDC/', yesterday)
import gzip
import shutil
zipped = download_rinex_navigation('ftp://igs.bkg.bund.de/', 'EUREF/BRDC/', yesterday)
unzipped = zipped[:-3]
with gzip.open(zipped, 'rb') as f_in:
with open(unzipped, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the standard operators
Step2: Problem 12.1
Step3: Problem 12.2
Step4: Problem 12.3 (use n=2 as a test-case)
Step5: Problem 12.5 and 12.6
Step6: Problem 12.7
Step7: Alternatively, we can find the indeterminacy bound for ΔX and ΔP (the unitless operators)
Step8: Which is also satisfied by the calculated value (1.41 > 0.25)
|
<ASSISTANT_TASK:>
Python Code:
from numpy import sqrt
from qutip import *
N = 10 # pick a size for our state-space
a = destroy(N)
n = a.dag()*a
a*a.dag() - a.dag()*a
n*a.dag() - a.dag()*n
n*a.dag() - a.dag()*n == a.dag()
psi = basis(N,2)
psi
a.dag()*psi
a.dag()*basis(N,2) == sqrt(3)*basis(N,3)
a
a.dag()
X = 1/2 * (a + a.dag())
P = 1/2j * (a - a.dag())
psi = 1/sqrt(2)*(basis(N,1)+basis(N,2))
ex = psi.dag()*X*psi
exq = psi.dag()*X*X*psi
ep = psi.dag()*P*psi
epq = psi.dag()*P*P*psi
deltaX = sqrt(exq[0][0][0] - ex[0][0][0]**2)
deltaP = sqrt(epq[0][0][0] - ep[0][0][0]**2)
deltaX * deltaP * 2 # compare to uncertainty relation (ΔxΔp >= 1/2)
# the factor of two is to convert from the unitless version of the operator
1/2*(psi.dag()*commutator(X,P)*psi).norm()
psi = 1/sqrt(2)*(basis(N,2)+basis(N,4))
ex = psi.dag()*X*psi
exq = psi.dag()*X*X*psi
ep = psi.dag()*P*psi
epq = psi.dag()*P*P*psi
deltaX = sqrt(exq[0][0][0] - ex[0][0][0]**2)
deltaP = sqrt(epq[0][0][0] - ep[0][0][0]**2)
deltaX * deltaP * 2 # to compare to book solution which uses the full x and p operators with units
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How many steps are there?
Step2: Helper functions for various calculations
Step3: The time derivative of $\theta_2$
Step4: The time derivative of $p_1$
Step5: The time derivative of $p_2$
Step6: Helper function to calculate a constant $C_1$
Step7: Helper function to calculate constant $C_2$
Step8: Calculate the derivatives for each $\theta_1,\theta_2,p_1,p_2$
Step9: Use a naive euler integration schemed to make a single step in the pendulum's motion
Step10: Now we need to do the actual calculations
Step11: Plotting the pendulum
Step12: Plot the paths to a png image pendulum.png
Step13: Now we can plot!
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
g = 9.8 # m/s
l1 = 1.2 # m
l2 = .7 # m
theta1_0 = np.pi/5 # other initial angle
theta2_0 = np.pi # initial angle
m1 = .10 # kg
m2 = .05 # kg
dt = 1.e-3 # time steps
max_t = 5.0 # max time
nsteps = long(max_t/dt) # number of steps
print "nsteps:", nsteps
# Import numpy for numerical calculations, vectors, etc.
from numpy import array, zeros, cos, sin, pi
from numpy.linalg import norm as norm
from scipy.integrate import ode
def dtheta1(theta1, theta2, p1, p2):
num = l2*p1 - l1*p2*cos(theta1 - theta2)
den = l1*l1*l2*(m1 + m2*sin(theta1 - theta2)**2)
return num/den
def dtheta2(theta1, theta2, p1, p2):
num = l1*(m1+m2)*p2 - l2*m2*p1*cos(theta1-theta2)
den = l1*l2*l2*m2*(m1+ m2*sin(theta1-theta2)**2)
return num/den
def dp1(theta1, theta2, p1, p2, c1, c2):
return -(m1+m2)*g*l1*sin(theta1) - c1 + c2
def dp2(theta1, theta2, p1, p2, c1, c2):
return -m2*g*l2*sin(theta2) + c1 - c2
def C1(theta1, theta2, p1, p2):
num = p1*p2*sin(theta1 - theta2)
den = l1*l2*(m1 + m2*sin(theta1 - theta2)**2)
return num/den
def C2(theta1, theta2, p1, p2):
num = l2*l2*m2*p1*p2 + l1*(m1 + m2)*p2**2 - l1*l2*m2*p1*p2*cos(theta1-theta2)
den = 2*l1*l1*l2*l2*(m1 + m2*sin(theta1-theta2)**2)**2*sin(2*(theta1-theta2))
return num/den
def deriv(t, y):
theta1, theta2, p1, p2 = y[0], y[1], y[2], y[3]
_c1 = C1(theta1, theta2, p1, p2)
_c2 = C2(theta1, theta2, p1, p2)
_dtheta1 = dtheta1(theta1, theta2, p1, p2)
_dtheta2 = dtheta2(theta1, theta2, p1, p2)
_dp1 = dp1(theta1, theta2, p1, p2, _c1, _c2)
_dp2 = dp2(theta1, theta2, p1, p2, _c1, _c2)
return array([_dtheta1, _dtheta2, _dp1, _dp2])
def euler(theta1, theta2, p1, p2):
_y = deriv(0, [theta1, theta2, p1, p2])
_dtheta1, _dtheta2, _dp1, _dp2 = _y[0], _y[1], _y[2], _y[3]
theta1 += _dtheta1*dt
theta2 += _dtheta2*dt
p1 += _dp1*dt
p2 += _dp2*dt
return theta1, theta2, p1, p2
def calculate_paths(method = "euler"):
theta1 = theta1_0
theta2 = theta2_0
p1, p2 = 0.0, 0
paths = []
if method == "euler":
print "Running EULER method"
for i in range(nsteps):
if (i % 500==0): print "Step = %d" % i
theta1, theta2, p1, p2 = euler(theta1, theta2, p1, p2)
r1 = array([l1*sin(theta1), -l1*cos(theta1)])
r2 = r1 + array([l2*sin(theta2), -l2*cos(theta2)])
paths.append([r1, r2])
elif method == "scipy":
print "Running SCIPY method"
yint = [theta1, theta2, p1, p2]
# r = ode(deriv).set_integrator('zvode', method='bdf')
r = ode(deriv).set_integrator('vode', method='bdf')
r.set_initial_value(yint, 0)
paths = []
while r.successful() and r.t < max_t:
r.integrate(r.t+dt)
theta1, theta2 = r.y[0], r.y[1]
r1 = array([l1*sin(theta1), -l1*cos(theta1)])
r2 = r1 + array([l2*sin(theta2), -l2*cos(theta2)])
paths.append([r1, r2])
return array(paths)
paths = calculate_paths()
%pylab inline --no-import-all
import matplotlib
import matplotlib.pyplot as pyplot
from matplotlib import animation
from matplotlib.collections import LineCollection
from matplotlib.lines import Line2D
import numpy as np
def plot_paths(paths, IMAGE_PATH = "pendulum.png", TITLE = "Double Pendulum Evolution"):
# set up a list of points for each node we draw the path of
points1 = np.array([paths[:, 0, 0], paths[:, 0, 1]]).transpose().reshape(-1,1,2)
points2 = np.array([paths[:, 1, 0], paths[:, 1, 1]]).transpose().reshape(-1,1,2)
# set up a list of segments for plot coloring
segs1 = np.concatenate([points1[:-1],points1[1:]],axis=1)
segs2 = np.concatenate([points2[:-1],points2[1:]],axis=1)
# make the collection of segments
lc1 = LineCollection(segs1, cmap=pyplot.get_cmap('Blues'), linewidth=3, alpha=0.7)
lc2 = LineCollection(segs2, cmap=pyplot.get_cmap('Greens'), linewidth=3, alpha=0.7)
# fill up the line collections with the time data
t = np.linspace(0,1,paths.shape[0])
lc1.set_array(t)
lc2.set_array(t)
# fake line objects to add to legend for reference
lc1_line = Line2D([0, 1], [0, 1], color='b')
lc2_line = Line2D([0, 1], [0, 1], color='g')
# settings for plotting
YAXIS = "Y"
XAXIS = "X"
# Plot the trajectories
print "Plotting."
# create a plot
plt = pyplot.figure(figsize=(15, 10), dpi=80, facecolor='w')
ax = pyplot.axes()
# set the title and axis labels
ax.set_xlabel(XAXIS)
ax.set_ylabel(YAXIS)
ax.set_title(TITLE)
ax.add_collection(lc1)
ax.add_collection(lc2)
# Manually adding artists doesn't rescale the plot, so we need to autoscale
ax.autoscale()
#ax.plot(paths[:, 0, 0], paths[:, 0, 1], "b-", alpha=0.7, linewidth=3, label="$m_1$")
#ax.plot(paths[:, 1, 0], paths[:, 1, 1], "g-", alpha=0.7, linewidth=3, label="$m_2$")
# # Objects: draw a dot on the last trajectory point
#ax.plot(paths[-1, 0, 0], paths[-1, 0, 1], "b-")
#ax.plot(paths[-1, 1, 0], paths[-1, 1, 1], "g-")
# pyplot.axis('equal')
ax.set_aspect('equal', adjustable='box')
ax.legend([lc1_line, lc2_line], ['$m_1$', '$m_2$'], bbox_to_anchor=(1., 1.), loc="best",
ncol=1, fancybox=True, shadow=True)
# Save our plot
print "Saving plot to %s." % IMAGE_PATH
plt.savefig(IMAGE_PATH, bbox_inches='tight')
plot_paths(paths)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I know this is repetitive, but let's break down what we have.
Step2: There it is
Step3: and we want to generate a list of sentences
Step4: Start with the loop header--you see it on the far right
Step5: As we know, this will create a list[-like thing] with the numbers 0 through 9, inclusive, and assign it to the variable x.
Step6: To get a list, we've been casting the generator to a list
Step7: and we get a vanilla Python list.
Step8: Also--where have we seen parentheses before? TUPLES! You can think of a generator as a sort of tuple. After all, like a tuple, a generator is immutable (cannot be changed once created). Be careful with this, though
Step9: zip() does pretty much the same thing, but on steroids
Step10: I want to loop through these three lists simultaneously, so I can print out the person's first name, last name, and their favorite language on the same line. Since I know they're the same length, I could just do a range(len(fname)), but this is arguably more elegant
Step11: enumerate()
Step12: This is great if all I want to do is loop through the lists simultaneously. But what if the ordering of the elements matters? For example, I want to prefix each sentence with the line number. How can I track what index I'm on in a loop if I don't use range()?
|
<ASSISTANT_TASK:>
Python Code:
squares = []
for element in range(10):
squares.append(element ** 2)
print(squares)
squares = [element ** 2 for element in range(10)]
print(squares)
word_counts = {
'the': 10,
'race': 2,
'is': 3,
'on': 5
}
sentences = ['"{}" appears {} times.'.format(word, count) for word, count in word_counts.items()]
print(sentences)
x = range(10)
print(x)
print(type(x))
list(x)
x = [i for i in range(10)] # Brackets -> list
print(x)
x = (i for i in range(10)) # Parentheses -> generator
print(x)
d = {
'uga': 'University of Georgia',
'gt': 'Georgia Tech',
'upitt': 'University of Pittsburgh',
'cmu': 'Carnegie Mellon University'
}
for key, value in d.items():
print("'{}' stands for '{}'.".format(key, value))
first_names = ['Shannon', 'Jen', 'Natasha', 'Benjamin']
last_names = ['Quinn', 'Benoit', 'Romanov', 'Button']
fave_langs = ['Python', 'Java', 'Assembly', 'Go']
for fname, lname, lang in zip(first_names, last_names, fave_langs):
print("{} {}'s favorite language is {}.".format(fname, lname, lang))
for fname, lname, lang in zip(first_names, last_names, fave_langs):
print("{} {}'s favorite language is {}.".format(fname, lname, lang))
x = ['a', 'list', 'of', 'strings']
for index, element in enumerate(x):
print("Found '{}' at index {}.".format(element, index))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will now create the multi-group library using data directly from Appendix A of the C5G7 benchmark documentation. All of the data below will be created at 294K, consistent with the benchmark.
Step2: We will now add the scattering matrix data.
Step3: Now that the UO2 data has been created, we can move on to the remaining materials using the same process.
Step4: Generate 2-D C5G7 Problem Input Files
Step5: Now we can go ahead and produce a materials.xml file for use by OpenMC
Step6: Our next step will be to create the geometry information needed for our assembly and to write that to the geometry.xml file.
Step7: The next step is to take our universes (representing the different pin types) and lay them out in a lattice to represent the assembly types
Step8: Let's now create the core layout in a 3x3 lattice where each lattice position is one of the assemblies we just defined.
Step9: Before we commit to the geometry, we should view it using the Python API's plotting capability
Step10: OK, it looks pretty good, let's go ahead and write the file
Step11: We can now create the tally file information. The tallies will be set up to give us the pin powers in this notebook. We will do this with a mesh filter, with one mesh cell per pin.
Step12: With the geometry and materials finished, we now just need to define simulation parameters for the settings.xml file. Note the use of the energy_mode attribute of our settings_file object. This is used to tell OpenMC that we intend to run in multi-group mode instead of the default continuous-energy mode. If we didn't specify this but our cross sections file was not a continuous-energy data set, then OpenMC would complain.
Step13: Let's go ahead and execute the simulation! You'll notice that the output for multi-group mode is exactly the same as for continuous-energy. The differences are all under the hood.
Step14: Results Visualization
|
<ASSISTANT_TASK:>
Python Code:
import os
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import openmc
%matplotlib inline
# Create a 7-group structure with arbitrary boundaries (the specific boundaries are unimportant)
groups = openmc.mgxs.EnergyGroups(np.logspace(-5, 7, 8))
uo2_xsdata = openmc.XSdata('uo2', groups)
uo2_xsdata.order = 0
# When setting the data let the object know you are setting the data for a temperature of 294K.
uo2_xsdata.set_total([1.77949E-1, 3.29805E-1, 4.80388E-1, 5.54367E-1,
3.11801E-1, 3.95168E-1, 5.64406E-1], temperature=294.)
uo2_xsdata.set_absorption([8.0248E-03, 3.7174E-3, 2.6769E-2, 9.6236E-2,
3.0020E-02, 1.1126E-1, 2.8278E-1], temperature=294.)
uo2_xsdata.set_fission([7.21206E-3, 8.19301E-4, 6.45320E-3, 1.85648E-2,
1.78084E-2, 8.30348E-2, 2.16004E-1], temperature=294.)
uo2_xsdata.set_nu_fission([2.005998E-2, 2.027303E-3, 1.570599E-2, 4.518301E-2,
4.334208E-2, 2.020901E-1, 5.257105E-1], temperature=294.)
uo2_xsdata.set_chi([5.87910E-1, 4.11760E-1, 3.39060E-4, 1.17610E-7,
0.00000E-0, 0.00000E-0, 0.00000E-0], temperature=294.)
# The scattering matrix is ordered with incoming groups as rows and outgoing groups as columns
# (i.e., below the diagonal is up-scattering).
scatter_matrix = \
[[[1.27537E-1, 4.23780E-2, 9.43740E-6, 5.51630E-9, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 3.24456E-1, 1.63140E-3, 3.14270E-9, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 4.50940E-1, 2.67920E-3, 0.00000E-0, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 4.52565E-1, 5.56640E-3, 0.00000E-0, 0.00000E-0],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 1.25250E-4, 2.71401E-1, 1.02550E-2, 1.00210E-8],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 1.29680E-3, 2.65802E-1, 1.68090E-2],
[0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 0.00000E-0, 8.54580E-3, 2.73080E-1]]]
scatter_matrix = np.array(scatter_matrix)
scatter_matrix = np.rollaxis(scatter_matrix, 0, 3)
uo2_xsdata.set_scatter_matrix(scatter_matrix, temperature=294.)
# Initialize the library
mg_cross_sections_file = openmc.MGXSLibrary(groups)
# Add the UO2 data to it
mg_cross_sections_file.add_xsdata(uo2_xsdata)
# And write to disk
mg_cross_sections_file.export_to_hdf5('mgxs.h5')
# For every cross section data set in the library, assign an openmc.Macroscopic object to a material
materials = {}
for xs in ['uo2', 'mox43', 'mox7', 'mox87', 'fiss_chamber', 'guide_tube', 'water']:
materials[xs] = openmc.Material(name=xs)
materials[xs].set_density('macro', 1.)
materials[xs].add_macroscopic(xs)
# Instantiate a Materials collection, register all Materials, and export to XML
materials_file = openmc.Materials(materials.values())
# Set the location of the cross sections file to our pre-written set
materials_file.cross_sections = 'c5g7.h5'
materials_file.export_to_xml()
# Create the surface used for each pin
pin_surf = openmc.ZCylinder(x0=0, y0=0, R=0.54, name='pin_surf')
# Create the cells which will be used to represent each pin type.
cells = {}
universes = {}
for material in materials.values():
# Create the cell for the material inside the cladding
cells[material.name] = openmc.Cell(name=material.name)
# Assign the half-spaces to the cell
cells[material.name].region = -pin_surf
# Register the material with this cell
cells[material.name].fill = material
# Repeat the above for the material outside the cladding (i.e., the moderator)
cell_name = material.name + '_moderator'
cells[cell_name] = openmc.Cell(name=cell_name)
cells[cell_name].region = +pin_surf
cells[cell_name].fill = materials['water']
# Finally add the two cells we just made to a Universe object
universes[material.name] = openmc.Universe(name=material.name)
universes[material.name].add_cells([cells[material.name], cells[cell_name]])
lattices = {}
# Instantiate the UO2 Lattice
lattices['UO2 Assembly'] = openmc.RectLattice(name='UO2 Assembly')
lattices['UO2 Assembly'].dimension = [17, 17]
lattices['UO2 Assembly'].lower_left = [-10.71, -10.71]
lattices['UO2 Assembly'].pitch = [1.26, 1.26]
u = universes['uo2']
g = universes['guide_tube']
f = universes['fiss_chamber']
lattices['UO2 Assembly'].universes = \
[[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, f, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u],
[u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u],
[u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u]]
# Create a containing cell and universe
cells['UO2 Assembly'] = openmc.Cell(name='UO2 Assembly')
cells['UO2 Assembly'].fill = lattices['UO2 Assembly']
universes['UO2 Assembly'] = openmc.Universe(name='UO2 Assembly')
universes['UO2 Assembly'].add_cell(cells['UO2 Assembly'])
# Instantiate the MOX Lattice
lattices['MOX Assembly'] = openmc.RectLattice(name='MOX Assembly')
lattices['MOX Assembly'].dimension = [17, 17]
lattices['MOX Assembly'].lower_left = [-10.71, -10.71]
lattices['MOX Assembly'].pitch = [1.26, 1.26]
m = universes['mox43']
n = universes['mox7']
o = universes['mox87']
g = universes['guide_tube']
f = universes['fiss_chamber']
lattices['MOX Assembly'].universes = \
[[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m],
[m, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, m],
[m, n, n, n, n, g, n, n, g, n, n, g, n, n, n, n, m],
[m, n, n, g, n, o, o, o, o, o, o, o, n, g, n, n, m],
[m, n, n, n, o, o, o, o, o, o, o, o, o, n, n, n, m],
[m, n, g, o, o, g, o, o, g, o, o, g, o, o, g, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, g, o, o, g, o, o, f, o, o, g, o, o, g, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, n, o, o, o, o, o, o, o, o, o, o, o, n, n, m],
[m, n, g, o, o, g, o, o, g, o, o, g, o, o, g, n, m],
[m, n, n, n, o, o, o, o, o, o, o, o, o, n, n, n, m],
[m, n, n, g, n, o, o, o, o, o, o, o, n, g, n, n, m],
[m, n, n, n, n, g, n, n, g, n, n, g, n, n, n, n, m],
[m, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, m],
[m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m]]
# Create a containing cell and universe
cells['MOX Assembly'] = openmc.Cell(name='MOX Assembly')
cells['MOX Assembly'].fill = lattices['MOX Assembly']
universes['MOX Assembly'] = openmc.Universe(name='MOX Assembly')
universes['MOX Assembly'].add_cell(cells['MOX Assembly'])
# Instantiate the reflector Lattice
lattices['Reflector Assembly'] = openmc.RectLattice(name='Reflector Assembly')
lattices['Reflector Assembly'].dimension = [1,1]
lattices['Reflector Assembly'].lower_left = [-10.71, -10.71]
lattices['Reflector Assembly'].pitch = [21.42, 21.42]
lattices['Reflector Assembly'].universes = [[universes['water']]]
# Create a containing cell and universe
cells['Reflector Assembly'] = openmc.Cell(name='Reflector Assembly')
cells['Reflector Assembly'].fill = lattices['Reflector Assembly']
universes['Reflector Assembly'] = openmc.Universe(name='Reflector Assembly')
universes['Reflector Assembly'].add_cell(cells['Reflector Assembly'])
lattices['Core'] = openmc.RectLattice(name='3x3 core lattice')
lattices['Core'].dimension= [3, 3]
lattices['Core'].lower_left = [-32.13, -32.13]
lattices['Core'].pitch = [21.42, 21.42]
r = universes['Reflector Assembly']
u = universes['UO2 Assembly']
m = universes['MOX Assembly']
lattices['Core'].universes = [[u, m, r],
[m, u, r],
[r, r, r]]
# Create boundary planes to surround the geometry
min_x = openmc.XPlane(x0=-32.13, boundary_type='reflective')
max_x = openmc.XPlane(x0=+32.13, boundary_type='vacuum')
min_y = openmc.YPlane(y0=-32.13, boundary_type='vacuum')
max_y = openmc.YPlane(y0=+32.13, boundary_type='reflective')
# Create root Cell
root_cell = openmc.Cell(name='root cell')
root_cell.fill = lattices['Core']
# Add boundary planes
root_cell.region = +min_x & -max_x & +min_y & -max_y
# Create root Universe
root_universe = openmc.Universe(name='root universe', universe_id=0)
root_universe.add_cell(root_cell)
root_universe.plot(center=(0., 0., 0.), width=(3 * 21.42, 3 * 21.42), pixels=(500, 500),
color_by='material')
# Create Geometry and set root Universe
geometry = openmc.Geometry(root_universe)
# Export to "geometry.xml"
geometry.export_to_xml()
tallies_file = openmc.Tallies()
# Instantiate a tally Mesh
mesh = openmc.Mesh()
mesh.type = 'regular'
mesh.dimension = [17 * 2, 17 * 2]
mesh.lower_left = [-32.13, -10.71]
mesh.upper_right = [+10.71, +32.13]
# Instantiate tally Filter
mesh_filter = openmc.MeshFilter(mesh)
# Instantiate the Tally
tally = openmc.Tally(name='mesh tally')
tally.filters = [mesh_filter]
tally.scores = ['fission']
# Add tally to collection
tallies_file.append(tally)
# Export all tallies to a "tallies.xml" file
tallies_file.export_to_xml()
# OpenMC simulation parameters
batches = 150
inactive = 50
particles = 5000
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
# Tell OpenMC this is a multi-group problem
settings_file.energy_mode = 'multi-group'
# Set the verbosity to 6 so we dont see output for every batch
settings_file.verbosity = 6
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-32.13, -10.71, -1e50, 10.71, 32.13, 1e50]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
# Tell OpenMC we want to run in eigenvalue mode
settings_file.run_mode = 'eigenvalue'
# Export to "settings.xml"
settings_file.export_to_xml()
# Run OpenMC
openmc.run()
# Load the last statepoint file and keff value
sp = openmc.StatePoint('statepoint.' + str(batches) + '.h5')
# Get the OpenMC pin power tally data
mesh_tally = sp.get_tally(name='mesh tally')
fission_rates = mesh_tally.get_values(scores=['fission'])
# Reshape array to 2D for plotting
fission_rates.shape = mesh.dimension
# Normalize to the average pin power
fission_rates /= np.mean(fission_rates)
# Force zeros to be NaNs so their values are not included when matplotlib calculates
# the color scale
fission_rates[fission_rates == 0.] = np.nan
# Plot the pin powers and the fluxes
plt.figure()
plt.imshow(fission_rates, interpolation='none', cmap='jet', origin='lower')
plt.colorbar()
plt.title('Pin Powers')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a quantum circuit
Step2: Extract OpenQASM
Step3: Visualize Circuit
Step4: Use Stylesheet
Step5: Bundle Classical Registers
Step6: Show Index
Step7: Reduce Gap Between Gate
Step8: Fold a Long Circuit
Step9: Show Rotation Parameters in the unit of $\pi$
Step10: Use Emoji and LaTeX Symbols as Gate Symbols
Step11: Different style of gates
Step12: All Options
Step13: You can configure the color scheme. Composer style sheet is prepared as qx_color_scheme.
Step14: Save the circuit image to a file
|
<ASSISTANT_TASK:>
Python Code:
from math import pi
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.tools.visualization import matplotlib_circuit_drawer as drawer, qx_color_scheme
# We recommend the following options for Jupter notebook
%matplotlib inline
# Create a Quantum Register called "q" with 3 qubits
qr = QuantumRegister(3, 'q')
# Create a Classical Register called "c" with 3 bits
cr = ClassicalRegister(3, 'c')
# Create a Quantum Circuit called involving "qr" and "cr"
circuit = QuantumCircuit(qr, cr)
circuit.x(qr[0]).c_if(cr, 3)
circuit.z(qr[0])
circuit.u2(pi/2, 2*pi/3, qr[1])
circuit.cu1(pi, qr[0], qr[1])
# Barrier to seperator the input from the circuit
circuit.barrier(qr[0])
circuit.barrier(qr[1])
circuit.barrier(qr[2])
# Toffoli gate from qubit 0,1 to qubit 2
circuit.ccx(qr[0], qr[1], qr[2])
# CNOT (Controlled-NOT) gate from qubit 0 to qubit 1
circuit.cx(qr[0], qr[1])
circuit.swap(qr[0], qr[2])
# measure gate from qr to cr
circuit.measure(qr, cr)
QASM_source = circuit.qasm()
print(QASM_source)
drawer(circuit)
drawer(circuit, basis='u1,u2,u3,id,cx', scale=1.0)
my_style = {'plotbarrier': True}
drawer(circuit, style=my_style)
my_style = {'cregbundle': True}
drawer(circuit, style=my_style)
my_style = {'showindex': True}
drawer(circuit, style=my_style)
my_style = {'compress': True}
drawer(circuit, style=my_style)
my_style = {'fold': 6}
drawer(circuit, style=my_style)
my_style = {'usepiformat': True}
drawer(circuit, style=my_style)
qr = QuantumRegister(1, 'q')
circuit_xyz = QuantumCircuit(qr)
circuit_xyz.x(qr[0])
circuit_xyz.y(qr[0])
circuit_xyz.z(qr[0])
drawer(circuit_xyz)
my_style = {'displaytext': {'x': '😺', 'y': '\Sigma', 'z': '✈'}}
drawer(circuit_xyz, style=my_style)
qr = QuantumRegister(2, 'q')
circuit_cucz = QuantumCircuit(qr)
circuit_cucz.cz(qr[0], qr[1])
circuit_cucz.cu1(pi, qr[0], qr[1])
drawer(circuit_cucz)
my_style = {'latexdrawerstyle': False}
drawer(circuit_cucz, style=my_style)
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit_all = QuantumCircuit(qr, cr)
circuit_all.x(qr[0])
circuit_all.y(qr[0])
circuit_all.z(qr[0])
circuit_all.barrier(qr[0])
circuit_all.barrier(qr[1])
circuit_all.barrier(qr[2])
circuit_all.h(qr[0])
circuit_all.s(qr[0])
circuit_all.sdg(qr[0])
circuit_all.t(qr[0])
circuit_all.tdg(qr[0])
circuit_all.iden(qr[0])
circuit_all.reset(qr[0])
circuit_all.rx(pi, qr[0])
circuit_all.ry(pi, qr[0])
circuit_all.rz(pi, qr[0])
circuit_all.u0(pi, qr[0])
circuit_all.u1(pi, qr[0])
circuit_all.u2(pi, pi, qr[0])
circuit_all.u3(pi, pi, pi, qr[0])
circuit_all.swap(qr[0], qr[1])
circuit_all.cx(qr[0], qr[1])
circuit_all.cy(qr[0], qr[1])
circuit_all.cz(qr[0], qr[1])
circuit_all.ch(qr[0], qr[1])
circuit_all.cu1(pi, qr[0], qr[1])
circuit_all.cu3(pi, pi, pi, qr[0], qr[1])
circuit_all.crz(pi, qr[0], qr[1])
circuit_all.ccx(qr[0], qr[1], qr[2])
circuit_all.cswap(qr[0], qr[1], qr[2])
circuit_all.measure(qr, cr)
drawer(circuit_all)
cmp_style = qx_color_scheme()
cmp_style
drawer(circuit_all, style=cmp_style)
cmp_style.update({
'usepiformat': True,
'showindex': True,
'cregbundle': True,
'compress': True,
'fold': 17
})
drawer(circuit_all, filename='circuit.pdf', style=cmp_style)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us plot the first five examples of the train data (first row) and test data (second row).
Step2: Then we import shogun components and convert the data to shogun objects
Step3: Let's plot a few missclassified examples - I guess we all agree that these are notably harder to detect.
Step4: Now the question is - is 97.30% accuracy the best we can do? While one would usually re-train KNN with different values for k here and likely perform Cross-validation, we just use a small trick here that saves us lots of computation time
Step5: We have the prediction for each of the 13 k's now and can quickly compute the accuracies
Step6: So k=3 seems to have been the optimal choice.
Step7: So we can significantly speed it up. Let's do a more systematic comparison. For that a helper function is defined to run the evaluation for KNN
Step8: Evaluate KNN with and without Cover Tree. This takes a few seconds
Step9: Generate plots with the data collected in the evaluation
Step10: Although simple and elegant, KNN is generally very resource costly. Because all the training samples are to be memorized literally, the memory cost of KNN learning becomes prohibitive when the dataset is huge. Even when the memory is big enough to hold all the data, the prediction will be slow, since the distances between the query point and all the training points need to be computed and ranked. The situation becomes worse if in addition the data samples are all very high-dimensional. Leaving aside computation time issues, k-NN is a very versatile and competitive algorithm. It can be applied to any kind of objects (not just numerical data) - as long as one can design a suitable distance function. In pratice k-NN used with bagging can create improved and more robust results.
Step11: Let's apply the SVM to the same test data set to compare results
Step12: Since the SVM performs way better on this task - let's apply it to all data we did not use in training.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from scipy.io import loadmat, savemat
from numpy import random
from os import path
import matplotlib.pyplot as plt
%matplotlib inline
import shogun as sg
mat = loadmat(os.path.join(SHOGUN_DATA_DIR, 'multiclass/usps.mat'))
Xall = mat['data']
Yall = np.array(mat['label'].squeeze(), dtype=np.double)
# map from 1..10 to 0..9, since shogun
# requires multiclass labels to be
# 0, 1, ..., K-1
Yall = Yall - 1
random.seed(0)
subset = random.permutation(len(Yall))
Xtrain = Xall[:, subset[:5000]]
Ytrain = Yall[subset[:5000]]
Xtest = Xall[:, subset[5000:6000]]
Ytest = Yall[subset[5000:6000]]
Nsplit = 2
all_ks = range(1, 21)
print(Xall.shape)
print(Xtrain.shape)
print(Xtest.shape)
def plot_example(dat, lab):
for i in range(5):
ax=plt.subplot(1,5,i+1)
plt.title(int(lab[i]))
ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')
ax.set_xticks([])
ax.set_yticks([])
_=plt.figure(figsize=(17,6))
plt.gray()
plot_example(Xtrain, Ytrain)
_=plt.figure(figsize=(17,6))
plt.gray()
plot_example(Xtest, Ytest)
labels = sg.create_labels(Ytrain)
feats = sg.create_features(Xtrain)
k=3
dist = sg.create_distance('EuclideanDistance')
knn = sg.create_machine("KNN", k=k, distance=dist, labels=labels)
labels_test = sg.create_labels(Ytest)
feats_test = sg.create_features(Xtest)
knn.train(feats)
pred = knn.apply(feats_test)
print("Predictions", pred.get("labels")[:5])
print("Ground Truth", Ytest[:5])
evaluator = sg.create_evaluation("MulticlassAccuracy")
accuracy = evaluator.evaluate(pred, labels_test)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(pred != Ytest)[0]
Xbad=Xtest[:,idx]
Ybad=Ytest[idx]
_=plt.figure(figsize=(17,6))
plt.gray()
plot_example(Xbad, Ybad)
knn.put('k', 13)
multiple_k=knn.get("classify_for_multiple_k")
print(multiple_k.shape)
for k in range(13):
print("Accuracy for k=%d is %2.2f%%" % (k+1, 100*np.mean(multiple_k[:,k]==Ytest)))
%%time
knn.put('k', 3)
knn.put('knn_solver', "KNN_BRUTE")
pred = knn.apply(feats_test)
# FIXME: causes SEGFAULT
# %%time
# knn.put('k', 3)
# knn.put('knn_solver', "KNN_COVER_TREE")
# pred = knn.apply(feats_test)
def evaluate(labels, feats, use_cover_tree=False):
import time
split = sg.create_splitting_strategy("CrossValidationSplitting", labels=labels, num_subsets=Nsplit)
split.build_subsets()
accuracy = np.zeros((Nsplit, len(all_ks)))
acc_train = np.zeros(accuracy.shape)
time_test = np.zeros(accuracy.shape)
for i in range(Nsplit):
idx_train = split.generate_subset_inverse(i)
idx_test = split.generate_subset_indices(i)
for j, k in enumerate(all_ks):
#print "Round %d for k=%d..." % (i, k)
feats.add_subset(idx_train)
labels.add_subset(idx_train)
dist = sg.create_distance('EuclideanDistance')
dist.init(feats, feats)
knn = sg.create_machine("KNN", k=k, distance=dist, labels=labels)
#knn.set_store_model_features(True)
#FIXME: causes SEGFAULT
if use_cover_tree:
continue
# knn.put('knn_solver', "KNN_COVER_TREE")
else:
knn.put('knn_solver', "KNN_BRUTE")
knn.train()
evaluator = sg.create_evaluation("MulticlassAccuracy")
pred = knn.apply()
acc_train[i, j] = evaluator.evaluate(pred, labels)
feats.remove_subset()
labels.remove_subset()
feats.add_subset(idx_test)
labels.add_subset(idx_test)
t_start = time.clock()
pred = knn.apply_multiclass(feats)
time_test[i, j] = (time.clock() - t_start) / labels.get_num_labels()
accuracy[i, j] = evaluator.evaluate(pred, labels)
feats.remove_subset()
labels.remove_subset()
return {'eout': accuracy, 'ein': acc_train, 'time': time_test}
labels = sg.create_labels(Ytest)
feats = sg.create_features(Xtest)
print("Evaluating KNN...")
wo_ct = evaluate(labels, feats, use_cover_tree=False)
# wi_ct = evaluate(labels, feats, use_cover_tree=True)
print("Done!")
fig = plt.figure(figsize=(8,5))
plt.plot(all_ks, wo_ct['eout'].mean(axis=0), 'r-*')
# plt.plot(all_ks, wo_ct['ein'].mean(axis=0), 'r--*')
plt.legend(["Test Accuracy", "Training Accuracy"])
plt.xlabel('K')
plt.ylabel('Accuracy')
plt.title('KNN Accuracy')
plt.tight_layout()
fig = plt.figure(figsize=(8,5))
plt.plot(all_ks, wo_ct['time'].mean(axis=0), 'r-*')
# plt.plot(all_ks, wi_ct['time'].mean(axis=0), 'b-d')
plt.xlabel("K")
plt.ylabel("time")
plt.title('KNN time')
plt.legend(["Plain KNN", "CoverTree KNN"], loc='center right')
plt.tight_layout()
width=80
C=1
gk=sg.create_kernel("GaussianKernel", width=width)
svm=sg.create_machine("GMNPSVM", C=C, kernel=gk, labels=labels)
_=svm.train(feats)
out=svm.apply(feats_test)
evaluator = sg.create_evaluation("MulticlassAccuracy")
accuracy = evaluator.evaluate(out, labels_test)
print("Accuracy = %2.2f%%" % (100*accuracy))
Xrem=Xall[:,subset[6000:]]
Yrem=Yall[subset[6000:]]
feats_rem=sg.create_features(Xrem)
labels_rem=sg.create_labels(Yrem)
out=svm.apply(feats_rem)
evaluator = sg.create_evaluation("MulticlassAccuracy")
accuracy = evaluator.evaluate(out, labels_rem)
print("Accuracy = %2.2f%%" % (100*accuracy))
idx=np.where(out.get("labels") != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=plt.figure(figsize=(17,6))
plt.gray()
plot_example(Xbad, Ybad)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some Stats
Step2: Machine Learning and Clustering
Step3: PCA
Step4: K-Means Clustering
|
<ASSISTANT_TASK:>
Python Code:
mons["AVERAGE_STAT"] = mons["STAT_TOTAL"]/6
gens = pd.Series([0 for i in range(len(mons.index))], index=mons.index)
for ID, mon in mons.iterrows():
if 0<mon.DEXID<=151:
gens[ID] = 1
elif 151<mon.DEXID<=251:
gens[ID] = 2
elif 251<mon.DEXID<=386:
gens[ID] = 3
elif 386<mon.DEXID<=493:
gens[ID] = 4
elif 493<mon.DEXID<=649:
gens[ID] = 5
elif 649<mon.DEXID<=721:
gens[ID] = 6
elif 721<mon.DEXID<=805:
gens[ID] = 7
else:
gens[ID] = 0
mons["GEN"] = gens
mons.to_csv("./data/pokemon_preUSUM_data.csv")
gen = {}
for i in range(1,8):
gen[i] = mons[mons.GEN == i]
plt.figure(100)
colors = sns.color_palette("colorblind", 7)
for i in range(1,8):
sns.distplot( mons[mons["GEN"] == i]["STAT_TOTAL"], hist=False,kde=True, color=colors[i-1], label=f"Gen {i}")
plt.legend()
plt.show()
stat_averages_by_gen = {i:gen[i].AVERAGE_STAT for i in range(1,8)}
testable_data = list(stat_averages_by_gen.values())
data = [list(gen) for gen in testable_data]
data = np.array(data)
averages = {i: stat_averages_by_gen[i].mean() for i in range(1,8)}
averages
stats.kruskal(*data)
recarray = mons.to_records()
test = comp.pairwise_tukeyhsd(recarray["AVERAGE_STAT"], recarray["GEN"])
test.summary()
np.random.seed(525_600)
stats_gens = mons[['HP', 'ATTACK', 'DEFENSE',
'SPECIAL_ATTACK', 'SPECIAL_DEFENSE', 'SPEED', 'GEN']]
X = np.c_[stats_gens]
pca = decomposition.PCA()
pca.fit(X)
pca.explained_variance_
pca.n_components = 3
X_reduced = pca.fit_transform(X)
X_reduced.shape
pca.get_params()
from sklearn import cluster
k_means = cluster.KMeans(n_clusters = 6)
k_means.fit(X)
mons["KMEANS_LABEL"] = pd.Series(k_means.labels_)
plotData = mons[["GEN", "STAT_TOTAL", "KMEANS_LABEL"]]
colors = sns.color_palette("colorblind", 7)
for i in range(1,8):
sns.distplot( plotData[plotData["GEN"] == i]["STAT_TOTAL"], color=colors[i-1])
plt.figure(925)
sns.boxplot(x="KMEANS_LABEL", y="STAT_TOTAL", data=plotData)
plt.show()
plt.figure(9050624)
sns.pairplot(plotData, kind="scatter", hue="GEN", palette=colors)
plt.show()
plotData.to_csv("./data/kmeans.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_error, max_error, explained_variance_score, mean_absolute_percentage_error
# initialise & fit Gradient Boosting Regressor
model = GradientBoostingRegressor(loss='squared_error',
n_estimators=100,
max_depth=None,
subsample=0.8,
random_state=101)
model.fit(X_train, y_train)
# create dictionary that contains feature importance
feature_importance= dict(zip(X_train.columns, model.feature_importances_))
print('Feature Importance',feature_importance)
# make prediction for test data & evaluate performance
y_pred = model.predict(X_test)
print('RMSE:',mean_squared_error(y_test, y_pred, squared = False))
print('MAE:',mean_absolute_error(y_test, y_pred))
print('MAPE:',mean_absolute_percentage_error(y_test, y_pred))
print('Max Error:',max_error(y_test, y_pred))
print('Explained Variance Score:',explained_variance_score(y_test, y_pred))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Role of Predictions
Step3: Wow, flights arrive late 39.0% of the time! The problem is as big as it seems. But how late is the average flight?
Step5: Flights are 9.4 minutes late departing and 4.4 minutes late arriving on average. Why the constant tardiness? Are the airlines incompetent (as we often angrily suspect), or is the problem weather? Weather is presently out of human control, so that would let the airlines off the hook. Should we be mad at the airlines or angry with the god(s)? (Personally, I’m fearful of Zeus!)
Step7: An explanation of the different kinds of delay is available on the Federal Aviation Administration (FAA) website.
Step9: Our result isn’t perfect—the sources of delay don’t total to 100%. This is a result of our aforementioned simplification regarding arrival/departing delays. Nevertheless, we do get a sense of things; our sample is informative. Most delay is from previous delays with the same airplane, which have a cascading effect on the rest of the schedule. Of delays originating during a flight’s operations, most are carrier delays. Specifically, 29% of delays are carrier delays, versus 21% for air traffic control delays and only 4.5% for weather delays.
Step10: Looks like a few flights don’t have delay information. Let’s filter those, and sort the data before saving it as a single JSON file
Step12: DateTime Conversion
Step13: In practice, these functions were worked out iteratively over the course of an hour. Employing them is then simple
Step14: Always explicitly sort your data before vectorizing it. Don’t leave the sort up to the system. If you do so, a software version change or some other unknown cause might ultimately change the sort order of your training data as compared with your result data. This would be catastrophic and confusing and should be avoided at all costs. Explicitly sorting training data in a way that avoids arbitrary sorting is essential
Step15: Let’s copy the file into a JSON Lines file and check it out
Step16: Now take a look at the result
Step17: Looking good! Our features are now prepared for vectorization.
Step18: With our data loaded, now we need to prepare our data for classification.
Step19: If null values are found, we need only employ DataFrame.na.fill to fill them. Supply fillna with a dict with the column name as the key and the column’s fill value as the value, and it will fill in the column name with that value
Step20: In our dataset, no nulls are found, but there usually are some, so take note of this step for the future. It will save you trouble as you start engineering and vectorizing your features.
Step21: RDD Alternative RouteNum
Step22: Next, we extract the heights of the bars and the bin definitions from the tuple returned by histogram
Step23: Since bars are drawn from the left, we remove the rightmost item in the bins list
Step24: Next, we use a list comprehension to determine the range between the values defining the buckets, which gives us the width of the bars. We’ve decided that the bars should be as wide as the data they measure
Step25: Finally, we plot the bar chart, specifying our bar widths (they draw from the left) and coloring our bars blue
Step27: We can summarize the previous operations in a function called create_hist, which we will reuse to draw other histograms like this one
Step28: To start, let’s visualize the first set of buckets we considered
Step29: Wow. This is a very distorted distribution. We have created an imbalanced class set from one that should ideally be balanced. This is a problem, because imbalanced classes can produce classifiers that only predict the most common value, and yet still seem fairly accurate. At best, this label set would have made things hard for our classifier when there is no benefit to doing so. We need to rethink our labels.
Step30: Hmm... this looks better, but the leftmost and rightmost buckets look too small. Let’s combine the –87 to –30 and –30 to –15 buckets, and try again
Step31: This looks better! However, the 15–30 bucket seems too small. Let’s merge this bucket with the 0–15 bucket and try again
Step33: Ah-ha! That looks pretty good. The buckets end up being “very early” (> 15 minutes early), “early” (0–15 minutes early), “late” (0–30 minutes late), and “very late” (30+ minutes late). These aren’t perfect in terms of usability, but I think they can work. Ideally the distribution in the buckets would be equal, but they are close enough.
Step34: You can see that ArrDelay is mapped to ArrDelayBucket as we indicated.
Step35: You can see the result is the same as with our UDF buckets. Now that we’ve created the ArrDelayBucket fields, we’re ready to vectorize our features using tools from pyspark.ml.feature.
Step36: Then we need to index our nominal or categorical string columns into sets of vectors made up of binary variables for every unique value found in a given column. To achieve this, for each categorical column (be it a string or number), we need to
Step37: Having indexed our categorical features, now we combine them with our numeric features into a single feature vector for our classifier.
Step38: Now we’re ready to train our classifier!
Step39: Creating and Fitting a Model
Step40: Next, we need to evaluate the classifier we’ve created.
Step41: Not great, but good enough for now. Don’t worry, we’ll work on making the model more accurate in Chapter 9.
Step42: Now let’s see the distribution of the Prediction field, to verify we don’t have that same bug
Step43: These “sanity checks” seem okay!
Step44: Conclusion
|
<ASSISTANT_TASK:>
Python Code:
import pyspark.sql.functions as F
import pyspark.sql.types as T
from pyspark.sql import SparkSession
# Initialize PySpark with MongoDB and Elastic support
spark = (
SparkSession.builder.appName("Exploring Data with Reports")
# Load support for MongoDB and Elasticsearch
.config("spark.jars.packages", "org.mongodb.spark:mongo-spark-connector_2.12:3.0.1,org.elasticsearch:elasticsearch-spark-30_2.12:7.14.2")
# Add Configuration for MongopDB
.config("spark.mongodb.input.uri", "mongodb://mongo:27017/test.coll")
.config("spark.mongodb.output.uri", "mongodb://mongo:27017/test.coll")
.getOrCreate()
)
sc = spark.sparkContext
sc.setLogLevel("ERROR")
print("\nPySpark initialized...")
# Load the on-time Parquet file
on_time_dataframe = spark.read.parquet('../data/january_performance.parquet')
on_time_dataframe.createOrReplaceTempView("on_time_performance")
total_flights = on_time_dataframe.count()
# Flights that were late leaving...
late_departures = on_time_dataframe.filter(on_time_dataframe.DepDelayMinutes > 0)
total_late_departures = late_departures.count()
# Flights that were late arriving...
late_arrivals = on_time_dataframe.filter(on_time_dataframe.ArrDelayMinutes > 0)
total_late_arrivals = late_arrivals.count()
# Flights that left late but made up time to arrive on time...
on_time_heros = on_time_dataframe.filter(
(on_time_dataframe.DepDelayMinutes > 0)
&
(on_time_dataframe.ArrDelayMinutes <= 0)
)
total_on_time_heros = on_time_heros.count()
# Get the percentage of flights that are late, rounded to 1 decimal place
pct_late = round((total_late_arrivals / (total_flights * 1.0)) * 100, 1)
print("Total flights: {:,}".format(total_flights))
print("Late departures: {:,}".format(total_late_departures))
print("Late arrivals: {:,}".format(total_late_arrivals))
print("Recoveries: {:,}".format(total_on_time_heros))
print("Percentage Late: {}%".format(pct_late))
# Get the average minutes late departing and arriving
spark.sql(
SELECT
ROUND(AVG(DepDelay),1) AS AvgDepDelay,
ROUND(AVG(ArrDelay),1) AS AvgArrDelay
FROM on_time_performance
).show()
late_flights = spark.sql(
SELECT
FlightDate,
ArrDelayMinutes,
WeatherDelay,
CarrierDelay,
NASDelay,
SecurityDelay,
LateAircraftDelay
FROM
on_time_performance
WHERE
WeatherDelay IS NOT NULL
OR
CarrierDelay IS NOT NULL
OR
NASDelay IS NOT NULL
OR
SecurityDelay IS NOT NULL
OR
LateAircraftDelay IS NOT NULL
ORDER BY
FlightDate
)
late_flights.sample(0.1).show(10)
# Calculate the percentage contribution to delay for each source
total_delays = spark.sql(
SELECT
ROUND(SUM(WeatherDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_weather_delay,
ROUND(SUM(CarrierDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_carrier_delay,
ROUND(SUM(NASDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_nas_delay,
ROUND(SUM(SecurityDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_security_delay,
ROUND(SUM(LateAircraftDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_late_aircraft_delay
FROM on_time_performance
)
total_delays.show()
import sys, os, re
import iso8601
import datetime
# Load the on-time Parquet file
on_time_dataframe = spark.read.parquet('../data/january_performance.parquet')
on_time_dataframe.registerTempTable("on_time_performance")
on_time_dataframe = on_time_dataframe.filter(on_time_dataframe.Month == '1')
# Select a few features of interest
simple_on_time_features = spark.sql(
SELECT
FlightNum,
FlightDate,
DayOfWeek,
DayofMonth AS DayOfMonth,
CONCAT(Month, '-', DayofMonth) AS DayOfYear,
Carrier,
Origin,
Dest,
Distance,
DepDelay,
ArrDelay,
CRSDepTime,
CRSArrTime
FROM on_time_performance
WHERE FlightDate < '2015-02-01'
)
simple_on_time_features.limit(5).toPandas()
# Sample 10% to make executable inside the notebook
# simple_on_time_features = simple_on_time_features.sample(False, 0.1)
# Filter nulls, they can't help us
print(f"Original feature records: {simple_on_time_features.count():,}")
# Three ways to access a DataFrame Column in PySpark
# "ArrDelay", F.col("ArrDelay"), df.ArrDelay
filled_on_time_features = simple_on_time_features.filter(
simple_on_time_features.ArrDelay.isNotNull()
&
simple_on_time_features.DepDelay.isNotNull()
)
print(f"Non-null feature records: {filled_on_time_features.count():,}")
# We need to turn timestamps into timestamps, and not strings or numbers
def convert_hours(hours_minutes):
hours = hours_minutes[:-2]
minutes = hours_minutes[-2:]
if hours == '24':
hours = '23'
minutes = '59'
time_string = "{}:{}:00Z".format(hours, minutes)
return time_string
def compose_datetime(iso_date, time_string):
return "{} {}".format(iso_date, time_string)
def create_iso_string(iso_date, hours_minutes):
time_string = convert_hours(hours_minutes)
full_datetime = compose_datetime(iso_date, time_string)
return full_datetime
def create_datetime(iso_string):
return iso8601.parse_date(iso_string)
def convert_datetime(iso_date, hours_minutes):
iso_string = create_iso_string(iso_date, hours_minutes)
dt = create_datetime(iso_string)
return dt
def day_of_year(iso_date_string):
dt = iso8601.parse_date(iso_date_string)
doy = dt.timetuple().tm_yday
return doy
def alter_feature_datetimes(row):
Process the DateTimes to handle overnight flights and day of year
flight_date = iso8601.parse_date(row['FlightDate'])
scheduled_dep_time = convert_datetime(row['FlightDate'], row['CRSDepTime'])
scheduled_arr_time = convert_datetime(row['FlightDate'], row['CRSArrTime'])
# Handle overnight flights
if scheduled_arr_time < scheduled_dep_time:
scheduled_arr_time += datetime.timedelta(days=1)
doy = day_of_year(row['FlightDate'])
return {
'FlightNum': row['FlightNum'],
'FlightDate': flight_date,
'DayOfWeek': int(row['DayOfWeek']),
'DayOfMonth': int(row['DayOfMonth']),
'DayOfYear': doy,
'Carrier': row['Carrier'],
'Origin': row['Origin'],
'Dest': row['Dest'],
'Distance': row['Distance'],
'DepDelay': row['DepDelay'],
'ArrDelay': row['ArrDelay'],
'CRSDepTime': scheduled_dep_time,
'CRSArrTime': scheduled_arr_time,
}
from pyspark.sql import Row
timestamp_features = filled_on_time_features.rdd.map(alter_feature_datetimes)
timestamp_features.first()
timestamp_df = timestamp_features.map(lambda x: Row(**x)).toDF()
# **{"name": "Russell"}
# name="Russell"
# a = ["Russell", "Jurney"]
# df.select(*a)
# df.select("Russell", "Jurney")
timestamp_df.limit(3).toPandas()
timestamp_df
# Explicitly sort the data and keep it sorted throughout.
# Leave nothing to chance.
sorted_features = timestamp_df.sort(
timestamp_df.DayOfYear,
timestamp_df.Carrier,
timestamp_df.Origin,
timestamp_df.Dest,
timestamp_df.FlightNum,
timestamp_df.CRSDepTime,
timestamp_df.CRSArrTime,
)
# Store as a single JSON file and bzip2 it
sorted_features.write.mode("overwrite").json("../data/simple_flight_delay_features.jsonl")
%%bash
du -sh ../data/simple_flight_delay_features.json*
echo ""
head -5 ../data/simple_flight_delay_features.jsonl/part-0000*
#
# {
# "ArrDelay":5.0,"CRSArrTime":"2015-12-31T03:20:00.000-08:00",
# "CRSDepTime":"2015-12-31T03:05:00.000-08:00",
# "Carrier":"WN","DayOfMonth":31,"DayOfWeek":4,
# "DayOfYear":365,"DepDelay":14.0,"Dest":"SAN",
# "Distance":368.0, "FlightDate":"2015-12-30T16:00:00.000-08:00",
# "FlightNum":"6109","Origin":"TUS"
# }
#
from pyspark.sql.types import (
StringType, IntegerType, FloatType, DateType, TimestampType,
StructType, StructField
)
schema = StructType([
StructField("ArrDelay", FloatType(), True), # "ArrDelay":5.0
StructField("CRSArrTime", TimestampType(), True), # "CRSArrTime":"2015-12..."
StructField("CRSDepTime", TimestampType(), True), # "CRSDepTime":"2015-12..."
StructField("Carrier", StringType(), True), # "Carrier":"WN"
StructField("DayOfMonth", IntegerType(), True), # "DayOfMonth":31
StructField("DayOfWeek", IntegerType(), True), # "DayOfWeek":4
StructField("DayOfYear", IntegerType(), True), # "DayOfYear":365
StructField("DepDelay", FloatType(), True), # "DepDelay":14.0
StructField("Dest", StringType(), True), # "Dest":"SAN"
StructField("Distance", FloatType(), True), # "Distance":368.0
StructField("FlightDate", DateType(), True), # "FlightDate":"2015-12..."
StructField("FlightNum", StringType(), True), # "FlightNum":"6109"
StructField("Origin", StringType(), True), # "Origin":"TUS"
])
features = spark.read.json(
"../data/simple_flight_delay_features.jsonl",
schema=schema
)
print(features.first())
features.limit(5).toPandas()
null_counts = [(column, features.where(features[column].isNull()).count()) \
for column in features.columns]
cols_with_nulls = filter(lambda x: x[1] > 0, null_counts)
print(list(cols_with_nulls))
filled_features = features.na.fill({'DepDelay': 0})
#
# Add a Route variable to replace FlightNum
#
features_with_route = features.withColumn(
'Route',
F.concat(
features.Origin,
F.lit('-'),
features.Dest
)
)
features_with_route.select("Origin", "Dest", "Route").show(5)
%matplotlib inline
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# Look at overall histogram
data_tuple = (
features
.select("ArrDelay")
.rdd
.flatMap(lambda x: x)
.histogram([-87.0, -60, -30, -15, 0, 15, 30, 60, 120])
)
data_tuple
arr_delay = features.select("ArrDelay")
sample = arr_delay.filter(arr_delay.ArrDelay < 120).sample(0.10)
sample.toPandas().hist('ArrDelay', bins=20)
heights = np.array(data_tuple[1])
# The bins are 1 > length than the values
full_bins = data_tuple[0]
heights, full_bins
# Bars are drawn from the left
mid_point_bins = full_bins[:-1]
mid_point_bins
# The width of a bar should be the range it maps in the data
widths = [abs(i - j) for i, j in zip(full_bins[:-1], full_bins[1:])]
widths
# And now the bars should plot nicely
bar = plt.bar(mid_point_bins, heights, width=widths, color='b')
def create_hist(rdd_histogram_data):
Given an RDD.histogram, plot a pyplot histogram
heights = np.array(rdd_histogram_data[1])
full_bins = rdd_histogram_data[0]
mid_point_bins = full_bins[:-1]
widths = [abs(i - j) for i, j in zip(full_bins[:-1], full_bins[1:])]
bar = plt.bar(mid_point_bins, heights, width=widths, color='b')
return bar
%matplotlib inline
buckets = [-87.0, 15, 60, 200]
rdd_histogram_data = features\
.select("ArrDelay")\
.rdd\
.flatMap(lambda x: x)\
.histogram(buckets)
create_hist(rdd_histogram_data)
%matplotlib inline
buckets = [-87.0, -30, -15, 0, 15, 30, 120]
rdd_histogram_data = (
features
.select("ArrDelay")
.rdd
.flatMap(lambda x: x)
.histogram(buckets)
)
create_hist(rdd_histogram_data)
%matplotlib inline
buckets = [-87.0, -15, 0, 15, 30, 120]
rdd_histogram_data = (
features
.select("ArrDelay")
.rdd
.flatMap(lambda x: x)
.histogram(buckets)
)
create_hist(rdd_histogram_data)
%matplotlib inline
buckets = [-87.0, -15, 0, 30, 120]
rdd_histogram_data = (
features
.select("ArrDelay")
.rdd
.flatMap(lambda x: x)
.histogram(buckets)
)
create_hist(rdd_histogram_data)
#
# Categorize or 'bucketize' the arrival delay field using a DataFrame UDF
#
@F.udf(StringType())
def bucketize_arr_delay(arr_delay: float) -> float:
Convert the numeric delays into buckets
bucket = None
if arr_delay <= -15.0:
bucket = 0.0
elif arr_delay > -15.0 and arr_delay <= 0.0:
bucket = 1.0
elif arr_delay > 0.0 and arr_delay <= 15.0:
bucket = 2.0
elif arr_delay > 15.0 and arr_delay <= 30.0:
bucket = 3.0
elif arr_delay > 30.0:
bucket = 4.0
return bucket
# Wrap the function in pyspark.sql.functions.udf with
# pyspark.sql.types.StructField information
from pyspark.sql.functions import udf
# dummy_function_udf = udf(bucketize_arr_delay, )
# Add a category column via pyspark.sql.DataFrame.withColumn
manual_bucketized_features = features_with_route.withColumn(
"ArrDelayBucket",
bucketize_arr_delay(features['ArrDelay'])
)
manual_bucketized_features.select("ArrDelay", "ArrDelayBucket").limit(10).toPandas()
#
# Use pysmark.ml.feature.Bucketizer to bucketize ArrDelay
#
from pyspark.ml.feature import Bucketizer
splits = [-float("inf"), -15.0, 0, 15.0, 30.0, float("inf")]
bucketizer = Bucketizer(
splits=splits,
inputCol="ArrDelay",
outputCol="ArrDelayBucket"
)
ml_bucketized_features = bucketizer.transform(features_with_route)
# Check the buckets out
ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").limit(10).toPandas()
ml_bucketized_features.limit(3).toPandas()
from pyspark.ml.feature import StringIndexer, VectorAssembler
# Turn category fields into categoric feature vectors, then drop
# intermediate fields
for column in ["Carrier", "DayOfMonth", "DayOfWeek", "DayOfYear",
"Origin", "Dest", "Route"]:
string_indexer = StringIndexer(
inputCol=column,
outputCol=column + "_index"
)
ml_bucketized_features = (
string_indexer.fit(ml_bucketized_features)
.transform(ml_bucketized_features)
)
# Check out the indexes
ml_bucketized_features.limit(5).toPandas()
# Handle continuous numeric fields by combining them into one feature vector
numeric_columns = ["DepDelay", "Distance"]
index_columns = [
"Carrier_index",
"DayOfMonth_index",
"DayOfWeek_index",
"DayOfYear_index",
"Origin_index",
"Origin_index",
"Dest_index",
"Route_index"
]
vector_assembler = VectorAssembler(
inputCols=numeric_columns + index_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# Drop the index columns
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# Check out the features
final_vectorized_features.limit(5).toPandas()
final_vectorized_features.select("Features_vec").show(10, False)
final_vectorized_features = final_vectorized_features.filter(final_vectorized_features.FlightDate < '2015-02-01')
# Test/train split
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2], seed=31337)
training_data.count(), test_data.count()
# Instantiate and fit random forest classifier
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
maxBins=4657,
maxMemoryInMB=1024,
seed=31337
)
model = rfc.fit(training_data)
model
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# Evaluate model using test data
predictions = model.transform(test_data)
(
predictions.select(
"ArrDelayBucket",
"Features_vec",
"rawPrediction",
"probability",
"prediction"
)
.sample(0.001)
.limit(5)
.toPandas()
)
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket", metricName="accuracy"
)
accuracy = evaluator.evaluate(predictions)
print("Accuracy = {}".format(accuracy))
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket", metricName="f1"
)
accuracy = evaluator.evaluate(predictions)
print("F1 = {}".format(accuracy))
# Sanity-check a sample
predictions.sample(False, 0.001, 18).orderBy("CRSDepTime").limit(5).toPandas()
predictions.groupBy("prediction").count().orderBy('prediction').toPandas()
predictions.sample(0.001).sort("CRSDepTime").select("ArrDelayBucket","prediction").limit(20).toPandas()
# Handle continuous numeric fields by combining them into one feature vector
numeric_columns = ["Distance"]
index_columns = [
"Carrier_index",
"DayOfMonth_index",
"DayOfWeek_index",
"DayOfYear_index",
"Origin_index",
"Origin_index",
"Dest_index",
"Route_index"
]
vector_assembler = VectorAssembler(
inputCols=numeric_columns + index_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# Drop the index columns
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# Check out the features
final_vectorized_features.limit(5).toPandas()
from pyspark.ml import Pipeline
from pyspark.ml.regression import GBTRegressor
from pyspark.ml.feature import VectorIndexer
from pyspark.ml.evaluation import RegressionEvaluator
# Train a GBT model.
gbt = GBTRegressor(
featuresCol="Features_vec",
labelCol="DepDelay",
maxIter=10,
maxBins=4657
)
gbt_model = gbt.fit(final_vectorized_features)
gbt_model
predictions = gbt_model.transform(final_vectorized_features)
error = predictions.select(
"Origin",
"Dest",
"Carrier",
"DepDelay",
"prediction",
(F.col("DepDelay") - F.col("prediction")).alias("Error")
)
(
error
.sample(False, 0.001, 10)
.orderBy("CRSDepTime")
.limit(10)
.toPandas()
)
import pyspark.sql.functions as F
(
error.select(
F.percentile_approx("Error", 0.5, 10000).alias("Median Error"),
F.stddev("Error").alias("STD Error"),
F.mean("Error").alias("Average Error")
)
.toPandas()
)
# Trim our columns and shorten our DataFrame/column names for brevity
p = predictions.select(
predictions.ArrDelayBucket.alias("actual"),
predictions.prediction
)
# Get a list of all labels in the training data
buckets_df = p.groupBy("actual").count()
buckets = buckets_df.rdd.map(lambda x: x.actual).collect()
buckets
# Now compute the confusion matrix, where: "Each element i,j of the matrix would be
# the number of items with true class i that were classified as being in class j."
rows = []
for actual in buckets:
column = []
for prediction in buckets:
value = p.filter(p.actual == actual).filter(p.prediction == prediction).count()
column.append(value)
rows.append(column)
rows
%matplotlib inline
conf_arr = np.array(rows)
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j)/float(a))
norm_conf.append(tmp_arr)
fig = plt.figure(figsize=(8, 8))
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap='summer',
interpolation='nearest')
width, height = conf_arr.shape
for x in range(width):
for y in range(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
cb = fig.colorbar(res)
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
plt.xticks(range(width), ['0','1','2','3','4'])#alphabet[:width])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Model Selection On The ASM Features Using GridSearchCV
Step2: 2.1 Gradient Boosting
Step3: 2.2.2 Grid Search
Step4: 2.3 Extra Trees Classifier
Step5: 2.3.2 Grid Search
Step6: 3. Model selection On The Byte Features Using GridSearchCV
Step7: 3.1 Ridge Classifier
Step8: 3.2 Support Vector Machine
Step9: 4. Model Selection On The Combined Training ASM/Byte Data Using GridSearchCV
Step10: 4.1 Ridge Classifier
Step11: 4.2 Extra Trees Classifier
Step12: 4.3 Gradient Boost
Step13: 5. Run ExtraTreeClassifiers With 10-Fold Cross Validation
Step14: 6. GridSearchCV with XGBoost on All Combined ASM and Call Graph Features.
Step15: 7. Summary Of Results
Step16: 8. Test/Experimental Code Only
|
<ASSISTANT_TASK:>
Python Code:
# First load the .asm and .byte training data and training labels
# sorted_train_data_asm = pd.read_csv('data/sorted-train-malware-features-asm-reduced.csv')
# sorted_train_data_byte = pd.read_csv('data/sorted-train-malware-features-byte.csv')
sorted_train_labels = pd.read_csv('data/sorted-train-labels.csv')
combined_train_data = pd.read_csv('data/final-combined-train-data-30percent.csv')
combined_test_data = pd.read_csv('data/final-combined-test-data-30percent.csv')
call_graph_features_train = pd.read_csv('data/final-call-graph-features-10percent.csv')
sorted_train_labels.head()
combined_train_data.head()
combined_test_data.head()
# Utility function to report best scores
from operator import itemgetter
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(score.mean_validation_score, np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
def run_cv(X,y, clf):
# Construct a kfolds object
kf = KFold(len(y),n_folds=10,shuffle=True)
y_prob = np.zeros((len(y),9))
y_pred = np.zeros(len(y))
# Iterate through folds
for train_index, test_index in kf:
print(test_index, train_index)
X_train = X.loc[train_index,:]
X_test = X.loc[test_index,:]
y_train = y[train_index]
clf.fit(X_train, y_train.flatten()) # use flatten to get rid of data conversion warnings
y_prob[test_index] = clf.predict_proba(X_test)
y_pred[test_index] = clf.predict(X_test)
#print(clf.get_params())
return y_prob, y_pred
# Assign asm data to X,y for brevity, then split the dataset in two equal parts.
X = combined_train_data.iloc[:,1:]
y = np.array(sorted_train_labels.iloc[:,1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
X_train.shape
plt.figure(figsize=(15,15))
plt.xlabel("EDX Register")
plt.ylabel("Malware Class")
xa = np.array(X['edx'])
xb = np.array(X['esi'])
ya = np.array(y)
plt.scatter(xa,ya,c=ya,cmap='brg')
plt.figure(figsize=(15,15))
plt.xlabel("EDX Register")
plt.ylabel("ESI Register")
xa = np.array(X['edx'])
xb = np.array(X['esi'])
ya = np.array(y)
plt.scatter(xa,xb,c=ya,cmap='brg')
X_means = X.mean()
X_std = X.std()
X_var = X.var()
X_cov = X.cov()
X_means.head()
X_std.head()
X_var.head()
X_cov.head()
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
print("# Tuning hyper-parameters for SVC")
print()
clfrand = RandomizedSearchCV(SVC(C=1), tuned_parameters, cv=10)
start = time()
clfrand.fit(X_train, y_train)
print("Best parameters set found on training set:")
print()
print(clfrand.best_params_)
print()
print("Grid scores on training set:")
print()
report(clfrand.grid_scores_)
print()
print("Classification report:")
print("SVC took {:.2f} seconds for {:d} candidates.".format(((time() - start), n_iter_search)))
print()
y_true, y_pred = y_test, clfrand.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
print("# Tuning hyper-parameters for SVC")
print()
clfgrid = GridSearchCV(SVC(C=1), tuned_parameters, cv=10, n_jobs=4)
start = time()
clfgrid.fit(X_train, y_train)
print("Best parameters set found on training set:")
print()
print(clfgrid.best_params_)
print()
print("Grid scores on training set:")
print()
report(clfgrid.grid_scores_)
print()
print("Classification report:")
print("SVC took {:.2f} seconds for {:d} candidates.".format(((time() - start), n_iter_search)))
print()
y_true, y_pred = y_test, clfgrid.predict(X_test)
print(classification_report(y_true, y_pred))
print()
clfextra1 = ExtraTreesClassifier(n_jobs=4)
# use a random grid over parameters, most important parameters are n_estimators (larger is better) and
# max_features (for classification best value is square root of the number of features)
# Reference: http://scikit-learn.org/stable/modules/ensemble.html
param_dist = {"n_estimators": [100, 500, 1000],
"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clfextra1, param_distributions=param_dist, n_iter=n_iter_search)
start = time()
random_search.fit(X_train, y_train)
print("ExtraTreesClassifier - RandomizedSearchCV:")
print(" ")
print("Best parameters set found on training set:")
print(" ")
print(random_search.best_params_)
print(" ")
print("Grid scores on training set:")
print(" ")
report(random_search.grid_scores_)
print(" ")
print("Classification report:")
print("RandomizedSearchCV took {:.2f} seconds for {:d} candidates.".format((time() - start), n_iter_search))
print(" ")
y_pred = random_search.predict(X_test)
print(classification_report(y_test, y_pred))
print(" ")
y_prob = random_search.predict_proba(X_test)
print("logloss = {:.3f}".format(log_loss(y_test, y_prob)))
print("score = {:.3f}".format(accuracy_score(y_test, y_pred)))
cm = confusion_matrix(y_test, y_pred)
print(cm)
clfextra2 = ExtraTreesClassifier(n_jobs=4)
# use a full grid over all parameters, most important parameters are n_estimators (larger is better) and
# max_features (for classification best value is square root of the number of features)
# Reference: http://scikit-learn.org/stable/modules/ensemble.html
param_grid = {"n_estimators": [100, 500, 1000, 2000],
"max_depth": [None],
"max_features": [20],
"min_samples_split": [1],
"min_samples_leaf": [1],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clfextra2, param_grid=param_grid, cv=10)
start = time()
grid_search.fit(X_train, y_train)
print("ExtraTreesClassifier - GridSearchCV:")
print(" ")
print("Best parameters set found on training set:")
print(" ")
print(grid_search.best_params_)
print(" ")
print("Grid scores on training set:")
print(" ")
report(grid_search.grid_scores_)
print(" ")
print("Classification report:")
print("GridSearchCV took {:.2f} seconds.".format((time() - start)))
print(" ")
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
print(" ")
y_prob = grid_search.predict_proba(X_test)
print("logloss = {:.3f}".format(log_loss(y_test, y_prob)))
print("score = {:.3f}".format(accuracy_score(y_train, y_pred)))
cm = confusion_matrix(y_test, y_pred)
print(cm)
print("Classification report:")
print("GridSearchCV took {:.2f} seconds.".format(time() - start))
print(" ")
y_pred = grid_search.predict(X_test)
print(classification_report(y_test, y_pred))
print(" ")
y_prob = grid_search.predict_proba(X_test)
print("logloss = {:.3f}".format(log_loss(y_test, y_prob)))
print("score = {:.3f}".format(accuracy_score(y_test, y_pred)))
cm = confusion_matrix(y_test, y_pred)
print(cm)
# Assign byte data to X,y for brevity, then split the dataset in two equal parts.
X = sorted_train_data_byte.iloc[:,1:]
y = np.array(sorted_train_labels.iloc[:,1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
plt.figure(figsize=(15,15))
plt.xlabel("File Entropy")
plt.ylabel("Malware Class")
xa = np.array(X['entropy'])
xb = np.array(X['filesize'])
ya = np.array(y)
plt.scatter(xa,ya,c=ya,cmap='brg')
plt.figure(figsize=(15,15))
plt.xlabel("File Size")
plt.ylabel("Malware Class")
plt.scatter(xb,ya,c=ya,cmap='brg')
plt.figure(figsize=(15,15))
plt.xlabel("File Size")
plt.ylabel("Shannon's Entropy")
#colors = cm.rainbow(np.linspace(0, 1, len(ya)))
plt.scatter(xb,xa,c=ya,cmap='brg')
clfridge = RidgeClassifierCV(cv=10)
clfridge.fit(X_train, y_train)
y_pred = clfridge.predict(X_test)
print(classification_report(y_test, y_pred))
print(" ")
print("score = {:.3f}".format(accuracy_score(y_train, y_pred)))
cm = confusion_matrix(y_test, y_pred)
print(cm)
clfextra = ExtraTreesClassifier(n_jobs=4)
# use a full grid over all parameters, most important parameters are n_estimators (larger is better) and
# max_features (for classification best value is square root of the number of features)
# Reference: http://scikit-learn.org/stable/modules/ensemble.html
param_grid = {"n_estimators": [1000, 2000],
"max_depth": [3, None],
"max_features": [1, 2],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clfextra, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("ExtraTreesClassifier - GridSearchCV:")
print(" ")
print("Best parameters set found on training set:")
print(" ")
print(grid_search.best_params_)
print(" ")
print("Grid scores on training set:")
print(" ")
report(grid_search.grid_scores_)
print(" ")
print("Classification report:")
print("GridSearchCV took {:.2f} seconds.".format((time() - start)))
print(" ")
y_pred = grid_search.predict(X)
print(classification_report(y, y_pred))
print(" ")
y_prob = grid_search.predict_proba(X)
print("logloss = {:.3f}".format(log_loss(y, y_prob)))
print("score = {:.3f}".format(accuracy_score(y, y_pred)))
cm = confusion_matrix(y, y_pred)
print(cm)
# Assign byte data to X,y for brevity, then split the dataset in two equal parts.
X = combined_train_data.iloc[:,1:]
y = np.array(sorted_train_labels.iloc[:,1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
from sklearn.linear_model import RidgeClassifierCV
clfridge = RidgeClassifierCV(cv=10)
clfridge.fit(X_train, y_train)
y_pred = clfridge.predict(X_test)
print(classification_report(y_test, y_pred))
print(" ")
print("score = {:.3f}".format(accuracy_score(y_train, y_pred)))
cm = confusion_matrix(y_test, y_pred)
print(cm)
clf1 = ExtraTreesClassifier(n_estimators=1000, max_features=None, min_samples_leaf=1, min_samples_split=9, n_jobs=4, criterion='gini')
p1, pred1 = run_cv(X,y,clf1)
print("logloss = {:.3f}".format(log_loss(y, p1)))
print("score = {:.3f}".format(accuracy_score(y, pred1)))
cm = confusion_matrix(y, pred1)
print(cm)
X = combined_train_data.iloc[:,1:]
ylabels = sorted_train_labels.iloc[:,1:]
y = np.array(ylabels - 1)
y = y.flatten()
y
xgclf = xgb.XGBClassifier(objective="multi:softprob", nthread=4)
params = {"n_estimators": [1000, 2000],
"max_depth": [5, 10],
"learning_rate": [0.1, 0.05]}
# run grid search
grid_search = GridSearchCV(xgclf, param_grid=params)
start = time()
grid_search.fit(X, y)
print("XGBoost Classifier - GridSearchCV:")
print(" ")
print("Best parameters set found on training set:")
print(" ")
print(grid_search.best_params_)
print(" ")
print("Grid scores on training set:")
print(" ")
report(grid_search.grid_scores_)
print(" ")
print("Classification report:")
print("GridSearchCV took {:.2f} seconds.".format((time() - start)))
print(" ")
y_pred = grid_search.predict(X)
print(classification_report(y, y_pred))
print(" ")
y_prob = grid_search.predict_proba(X)
print("logloss = {:.3f}".format(log_loss(y, y_prob)))
print("score = {:.3f}".format(accuracy_score(y, y_pred)))
cm = confusion_matrix(y, y_pred)
print(cm)
print("GridSearchCV took {:.2f} seconds.".format((time() - start)))
print(" ")
y_pred = grid_search.predict(X)
print(classification_report(y, y_pred))
print(" ")
y_prob = grid_search.predict_proba(X)
print("logloss = {:.3f}".format(log_loss(y, y_prob)))
print("score = {:.3f}".format(accuracy_score(y, y_pred)))
cm = confusion_matrix(y, y_pred)
print(cm)
# Now try with best parameters and 50/50 train-test split
xgclf = xgb.XGBClassifier(n_estimators=1000, max_depth=10, learning_rate=0.01,objective="multi:softprob", nthread=4)
prob1, pred1 = run_cv(X_train, y_train, xgclf)
print("logloss = {:.3f}".format(log_loss(y_train, prob1)))
print("score = {:.3f}".format(accuracy_score(y_train, pred1)))
cm = confusion_matrix(y_train, pred1)
print(cm)
pred2 = xgclf.predict(X_test)
prob2 = xgclf.predict_proba(X_test)
print("logloss = {:.3f}".format(log_loss(y_test, prob2)))
print("score = {:.3f}".format(accuracy_score(y_test, pred2)))
cm = confusion_matrix(y_test, pred2)
print(cm)
xgclf = xgb.XGBClassifier(n_estimators=1000, max_depth=10, learning_rate=0.1,objective="multi:softprob", nthread=4)
prob1, pred1 = run_cv(X,y,xgclf)
print("logloss = {:.3f}".format(log_loss(y, prob1)))
print("score = {:.3f}".format(accuracy_score(y, pred1)))
cm = confusion_matrix(y, pred1)
print(cm)
help(xgb)
help(ExtraTreesClassifier)
ytrain = np.array(y)
X = data_reduced.iloc[:,1:]
X.shape
clf1 = ExtraTreesClassifier(n_estimators=1000, max_features=None, min_samples_leaf=1, min_samples_split=9, n_jobs=4, criterion='gini')
p1, pred1 = run_cv(X,ytrain,clf1)
print "logloss = %.3f" % log_loss(y, p1)
print "score = %.3f" % accuracy_score(ytrain, pred1)
cm = confusion_matrix(y, pred1)
print(cm)
clf2 = ExtraTreesClassifier(n_estimators=500, max_features=None, min_samples_leaf=1, min_samples_split=9, n_jobs=4, criterion='gini')
p2, pred2 = run_cv(X,ytrain,clf2)
print "logloss = %.3f" % log_loss(y, p2)
print "score = %.3f" % accuracy_score(ytrain, pred2)
cm = confusion_matrix(y, pred2)
print(cm)
clf3 = ExtraTreesClassifier(n_estimators=250, max_features=None, min_samples_leaf=1, min_samples_split=9, n_jobs=4, criterion='gini')
p3, pred3 = run_cv(X,ytrain,clf3)
print "logloss = %.3f" % log_loss(y, p3)
print "score = %.3f" % accuracy_score(ytrain, pred3)
cm = confusion_matrix(y, pred3)
print(cm)
clf4 = ExtraTreesClassifier(n_estimators=2000, max_features=None, min_samples_leaf=2, min_samples_split=3, n_jobs=4, criterion='gini')
p4, pred4 = run_cv(X,ytrain,clf4)
print "logloss = %.3f" % log_loss(y, p4)
print "score = %.3f" % accuracy_score(ytrain, pred4)
cm = confusion_matrix(y, pred4)
print(cm)
clf5 = ExtraTreesClassifier(n_estimators=1000, n_jobs=4, criterion='gini')
p5, pred5 = run_cv(X,ytrain,clf5)
print "logloss = %.4f" % log_loss(y, p5)
print "score = %.4f" % accuracy_score(ytrain, pred5)
cm = confusion_matrix(y, pred5)
print(cm)
clf6 = ExtraTreesClassifier(n_estimators=2000, n_jobs=4, criterion='gini')
p6, pred6 = run_cv(X,ytrain,clf6)
print "logloss = %.4f" % log_loss(y, p6)
print "score = %.4f" % accuracy_score(ytrain, pred6)
cm = confusion_matrix(y, pred6)
print(cm)
data = pd.read_csv('data/all-combined-train-data-final.csv')
labels = pd.read_csv('data/sorted-train-labels.csv')
data.head(20)
X = data.iloc[:,1:]
ylabels = labels.iloc[:,1:].values
y = np.array(ylabels - 1).flatten() # numpy arrays are unloved in many places.
y
labels.head()
xgclf = xgb.XGBClassifier(objective="multi:softprob", nthread=4)
params = {"n_estimators": [1000, 2000],
"max_depth": [5, 10],
"learning_rate": [0.1, 0.05]}
# run grid search
grid_search = GridSearchCV(xgclf, param_grid=params)
start = time()
grid_search.fit(X, y)
print("XGBoost Classifier - GridSearchCV:")
print(" ")
print("Best parameters set found on training set:")
print(" ")
print(grid_search.best_params_)
print(" ")
print("Grid scores on training set:")
print(" ")
report(grid_search.grid_scores_)
print(" ")
print("Classification report:")
print("GridSearchCV took {:.2f} seconds.".format((time() - start)))
print(" ")
y_pred = grid_search.predict(X)
print(classification_report(y, y_pred))
print(" ")
y_prob = grid_search.predict_proba(X)
print("logloss = {:.3f}".format(log_loss(y, y_prob)))
print("score = {:.3f}".format(accuracy_score(y, y_pred)))
cm = confusion_matrix(y, y_pred)
print(cm)
# TODO:
# go through the features and delete any that sum to less than 200
colsum = X.sum(axis=0, numeric_only=True)
zerocols = colsum[(colsum[:] == 0)]
zerocols
zerocols = colsum[(colsum[:] < 110)]
zerocols.shape
reduceX = X
for col in reduceX.columns:
if sum(reduceX[col]) < 100:
del reduceX[col]
reduceX.shape
skb = SelectKBest(chi2, k=20)
X_kbestnew = skb.fit_transform(X, y)
X_kbestnew.shape
combined_train_data.loc[combined_train_data['filename'] == '4jKA1GUDv6TMNpPuIxER',:]
# Get an array of labels in the same order as the asm filenames
# y = [0]*labels.shape[0]
# fnames = train_data_asm['filename']
# for i in range(len(y)):
# fname = train_data_asm.loc[i,'filename']
# row = labels[labels['Id'] == fname]
# y[i] = row.iloc[0,1]
train_data_byte[train_data_byte.loc[:,'filename']=='4jKA1GUDv6TMNpPuIxER']
count = 0
for i in range(len(y)):
if y[i] == 0:
count += 1
print(count)
count = 0
for i in range(len(sorted_train_labels)):
if sorted_train_labels.iloc[i,1] == 0:
count += 1
print(count)
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
type(X)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PHOEBE allows you to check for online updates to local install passbands. If we add a new atmosphere table or feature (extinction as in this release, for example) to a passband table, you can now update directly from the python interface of PHOEBE.
Step2: If there are updates available that you'd like to apply, you can apply them all via phoebe.update_all_passbands
Step3: To update a single passband to the latest online version with the same contents as the locally installed version (or with new tables, see the content argument), call phoebe.update_passband.
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
print(phoebe.list_installed_passbands())
print(phoebe.list_all_update_passbands_available())
print(phoebe.update_passband_available('Johnson:V'))
phoebe.update_all_passbands()
phoebe.update_passband('Johnson:V')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Obtaining the sequence annotation
Step2: Obtaining the clone sequence
Step3: Functions to plot interesting positions and gene boundaries
Step4: Functions to detect variants that differ from the clone and increase in frequency
Step5: Reading all data
Step6: Positions that increase in frequency
Step7: Control, replicate D
Step8: Control, replicate E
Step9: TLR3 treatment
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
from pylab import rcParams
import seaborn as sns
from array import array
import numpy as np
from scipy.stats import ttest_ind
from scipy.stats import linregress
%matplotlib inline
begins=[]
ends=[]
names =[]
with open ("sequence.gb") as f:
in_pep = False
for l in f:
if "mat_peptide" in l:
begins.append(int(l.split()[1].split("..")[0]))
ends.append(int(l.split()[1].split("..")[1]))
in_pep = True
elif in_pep :
names.append(l.split("=")[1])
in_pep = False
print(begins)
print(ends)
print(names)
file = "cloneSequence/SP6-ZIKV_seq_only.txt"
clone = ""
with open (file) as f:
for l in f:
if ">" in l:
pass
else:
clone +=l.strip()
# Interesting positions
positions=[316,1670,1785,2340,5935,7172,8449,9165]
def plot_positions():
for x in positions:
plt.axvline(x=x, linewidth=1, linestyle=':')
def plot_genes():
for i in range(len(begins)):
plt.plot([begins[i], begins[i]], [0.99,1.0], linewidth=2, linestyle='-', color="black")
if i%2==0:
plt.text (begins[i] + ((ends[i] - begins[i])/10), 1.005, (names[i].replace('"', ''))[0:3], size='xx-small')
else:
plt.text (begins[i] + ((ends[i] - begins[i])/10), 1.015, (names[i].replace('"', ''))[0:3], size='xx-small')
plt.plot([ends[-1], ends[-1]], [0.99,1.0], linewidth=2, linestyle='-', color="black")
def is_increasing(minor_frequencies):
#print(minor_frequencies)
previous = minor_frequencies[0]
for m in range(1,len(minor_frequencies)):
if previous < minor_frequencies[m]:
#print(str(previous) + " < " + str(minor_frequencies[m]))
previous = minor_frequencies[m]
else:
return False
return True
def get_variant_frequency(variant, table, i):
sum_of_bases = table['As_quality_corrected'][i]+table['Cs_quality_corrected'][i]+table['Gs_quality_corrected'][i]+table['Ts_quality_corrected'][i]+table['Ns_quality_corrected'][i]
if variant == "A":
return table["As_quality_corrected"][i] / sum_of_bases
elif variant == "C":
return table["Cs_quality_corrected"][i] / sum_of_bases
elif variant == "G":
return table["Gs_quality_corrected"][i] / sum_of_bases
elif variant == "T":
return table["Ts_quality_corrected"][i] / sum_of_bases
else:
return np.nan
def get_increasing_variants(tables, clone):
num_tables = len(tables)
first = tables[0]
last = tables[num_tables-1]
major = ""
minor = ""
major_frequencies = array('d',[0.0]*num_tables)
minor_frequencies = array('d',[0.0]*num_tables)
increasingVariants = dict()
for i in first["Position"]:
major = clone[i] #first["Major_variant"][i]
#print(last['Major_variant_frequency_quality_corrected'][i])
major_frequencies[0] = get_variant_frequency(major, first, i)
if major == last["Major_variant"][i]:
minor = last["Second_variant"][i]
else:
minor = last["Major_variant"][i]
minor_frequencies[0] = get_variant_frequency(minor, first, i)
for table_id in range(1, num_tables):
major_frequencies[table_id] = get_variant_frequency(major, tables[table_id], i)
minor_frequencies[table_id] = get_variant_frequency(minor, tables[table_id], i)
if is_increasing(minor_frequencies):
increasingVariants[i] = [major, minor, major_frequencies.tolist(), minor_frequencies.tolist()]
return increasingVariants
def print_variants(dict_variants):
print("Position\tclone base\tincreasing variant\tFinal frequency")
for k in dict_variants.keys():
print(str(k)+"\t"+dict_variants[k][0]+"\t"+dict_variants[k][1]+"\t"+str(dict_variants[k][3][-1]))
# CirSeq initial sample
cirseq = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1CirseqD3_1_sequence.txt.assembled.fastq_mapped_AA.csv", na_values=" -nan")
# Control runs, replicate A
DD3_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD3A_1_sequence.txt.assembled.fastq_mapped_AA.csv", na_values=" -nan")
DD6_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD6A_1_sequence.txt.assembled.fastq_mapped_AA.csv", na_values=" -nan")
DD9_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD9A_1_sequence.txt.assembled.fastq_mapped_AA.csv", na_values=" -nan")
DD12_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD12A_1_sequence.txt.assembled.fastq_mapped_AA.csv", na_values=" -nan")
DD24_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD24A_1_sequence.txt.assembled.fastq_mapped_AA.csv", na_values=" -nan")
DD51_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD51A_1_sequence.txt.assembled.fastq_mapped_AA.csv", na_values=" -nan")
DD51_A_no_reamp = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD51Anoreamplification_1_sequence.txt.assembled.fastq_mapped_AA.csv", na_values=" -nan")
# Control runs, replicate D
DD3_D = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD3D_1_sequence.txt.assembled.fastq_mapped_AA.csv")
DD6_D = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD6D_1_sequence.txt.assembled.fastq_mapped_AA.csv")
DD9_D = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD9D_1_sequence.txt.assembled.fastq_mapped_AA.csv")
DD12_D = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD12D_1_sequence.txt.assembled.fastq_mapped_AA.csv")
DD24_D = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD24D_1_sequence.txt.assembled.fastq_mapped_AA.csv")
# Control runs, replicate E
DD6_E = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD6E_1_sequence.txt.assembled.fastq_mapped_AA.csv")
DD9_E = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1DD9E_1_sequence.txt.assembled.fastq_mapped_AA.csv")
# TLR3 activation runs, replicate A
TD9_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1TD9A_1_sequence.txt.assembled.fastq_mapped_AA.csv")
TD12_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1TD12A_1_sequence.txt.assembled.fastq_mapped_AA.csv")
TD24_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1TD24A_1_sequence.txt.assembled.fastq_mapped_AA.csv")
TD51_A = pd.read_csv ("HV5GLBCXY_ZIKV_17s006139-1-1_DREUX_lane1TD51A_1_sequence.txt.assembled.fastq_mapped_AA.csv")
#DD3_A.describe(include='all')
tables_A = [DD3_A, DD6_A, DD9_A, DD12_A, DD24_A, DD51_A]
increasing_A = get_increasing_variants(tables_A, clone)
print("There are "+str(len(increasing_A))+" positions that rise in frequency.")
print("Those are:")
print_variants(increasing_A)
tables_D = [DD3_D, DD6_D, DD9_D, DD12_D, DD24_D]
increasing_D = get_increasing_variants(tables_D, clone)
print("There are "+str(len(increasing_D))+" positions that rise in frequency.")
print("Those are:")
print_variants(increasing_D)
tables_E = [DD6_E, DD9_E]
increasing_E = get_increasing_variants(tables_E, clone)
print("There are "+str(len(increasing_E))+" positions that rise in frequency.")
print("There are too many of them, we choose not to print them.")
tables_TA = [TD9_A, TD12_A, TD24_A, TD51_A]
increasing_TA = get_increasing_variants(tables_TA, clone)
print("There are "+str(len(increasing_TA))+" positions that rise in frequency.")
print("Those are:")
print_variants(increasing_TA)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters, categories and scores
Step2: Weighting
Step3: The new_snow_24_72h_scores are used to weight the new_snow_24h_scores prior to multiplying it with wind_speed_score.
Step4: New snow 24-72 h
Step5: Wind speed
Step6: New snow vs. wind speed
Step7: ToDo
Step8: the dict is not sorted and the comparison less than is random...
|
<ASSISTANT_TASK:>
Python Code:
# -*- coding: utf-8 -*-
%matplotlib inline
from __future__ import print_function
import pylab as plt
import datetime
import numpy as np
plt.rcParams['figure.figsize'] = (14, 6)
# New snow amount last 24 h 0-60 cm [10 cm intervals]
new_snow_24h_cat = np.array([0, 10, 20, 30, 40, 50, 60])
new_snow_24h_score = np.array([0.1, 0.5, 1.1, 1.3, 1.4, 1.8, 2.0])
# Wind speed 0-100 km/h [0,10,20,30,40,50,60,80,100]
wind_speed_km_cat = np.array([-5, 0, 10, 20, 30, 40, 50, 60, 80, 100, 150])
wind_speed_cat = wind_speed_km_cat / 3.6 # m/s
wind_speed_score = np.array([0.0, 1.8, 2.8, 3.3, 2.6, 1.2, 0.6, 0.3, 0.15, 0.07, 0.0])
# New snow amount last 24-72h 0-100 cm [0,10,20,30,40,50,60,80,100]
new_snow_24_72h_cat = np.array([0, 10, 20, 30, 40, 50, 60, 80, 100])
new_snow_24_72h_score = np.array([0.8, 1.0, 1.1, 1.2, 1.4, 1.6, 1.8, 2.1, 2.5]) # a weight for new_snow_24h
# Evolution of temperature
evolution_temperature_cat = ["constant very cold", "constant cold", "constant warm", "rise towards 0 deg after snowfall", "substantial cooling after snowfall"]
# Bonding to existing snowpack
bonding_existing_snowpack_cat = ["favorable", "moderate", "poor"]
# Type of new snow
type_new_snow_cat = ["loose-powder", "soft", "packed", "packed and moist"]
new_snow_24h_fit = np.polyfit(new_snow_24h_cat, new_snow_24h_score, 2)
score_new_snow_24h = np.poly1d(new_snow_24h_fit)
x = np.arange(0, 60.0)
res = score_new_snow_24h(x)
plt.scatter(new_snow_24h_cat, new_snow_24h_score)
plt.plot(x, res)
new_snow_24_72h_fit = np.polyfit(new_snow_24_72h_cat, new_snow_24_72h_score, 1)
score_new_snow_24_72h = np.poly1d(new_snow_24_72h_fit)
x = np.arange(0, 100.0)
res = score_new_snow_24_72h(x)
plt.scatter(new_snow_24_72h_cat, new_snow_24_72h_score)
plt.plot(x, res)
wind_speed_fit = np.polyfit(wind_speed_cat, wind_speed_score, 5)
score_wind_speed = np.poly1d(wind_speed_fit)
x = np.arange(-5, 150.0 / 3.6)
res = score_wind_speed(x)
plt.scatter(wind_speed_cat, wind_speed_score)
plt.plot(x, res)
new_snow = np.matrix(np.arange(0, 60.0))
sns = score_new_snow_24h(new_snow)
# weighted by new snow amount of the previous two days
new_snow_72 = 40
ns_weight = score_new_snow_24_72h(new_snow_72)
sns *= ns_weight
wind_speed = np.matrix(np.arange(0, 100.0 / 3.6))
swp = score_wind_speed(wind_speed)
M = np.multiply(sns, swp.T)
#print(M)
plt.contourf(M)#np.flipud(M.T))
print("Min {0}; Max {1}".format(np.amin(M), np.amax(M)))
plt.colorbar()
plt.xlabel("New snow last 24h [cm]")
plt.ylabel("Wind speed [m/s]")
new_snow_cat = ["0-5", "5-10", "10-15", "15-20"]
new_snow_thres = {(0, 5): 0.2, (5, 10): 0.5, (10, 15): 1, (15, 20): 3}
wind_cat = ["0-3", "4-7", "8-10", "10-15", "16-30"]
wind_thres = {(0, 3): 0.2, (3, 7): 1, (7, 10): 2, (10, 15): 0.2, (15, 30): 0.01}
new_snow_region = np.array([[0, 4, 6, 18],
[0, 4, 6, 18],
[0, 4, 6, 18]])
wind_region = np.array([[0, 4, 12, 18],
[4, 0, 18, 6],
[18, 12, 6, 0]])
def get_score(a, score_dict):
for key, value in score_dict.items():
if key[0] <= a < key[1]:
# if a < key:
return value
break
return None
new_snow_region_score = [get_score(a, new_snow_thres) for a in new_snow_region.flatten()]
new_snow_region_score = np.array(new_snow_region_score).reshape(new_snow_region.shape)
print(new_snow_region_score)
wind_region_score = [get_score(a, wind_thres) for a in wind_region.flatten()]
wind_region_score = np.array(wind_region_score).reshape(wind_region.shape)
print(wind_region_score)
print(wind_region_score * new_snow_region_score)
X = np.matrix(np.arange(0, 11.0))
Y = np.matrix(np.arange(10.0, 21.0))
Z = np.multiply(X, Y.T)
print(X)
print(Y.T)
print(Z)
plt.imshow(Z)
print("Min {0}; Max {1}".format(np.amin(Z), np.amax(Z)))
plt.colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Calculating Molar Fluorescence (MF) of Free Ligand
Step2: Curve-fitting to binding saturation curve
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from lxml import etree
import pandas as pd
import os
import matplotlib.cm as cm
import seaborn as sns
%pylab inline
# Get read and position data of each fluorescence reading section
def get_wells_from_section(path):
reads = path.xpath("*/Well")
wellIDs = [read.attrib['Pos'] for read in reads]
data = [(float(s.text), r.attrib['Pos'])
for r in reads
for s in r]
datalist = {
well : value
for (value, well) in data
}
welllist = [
[
datalist[chr(64 + row) + str(col)]
if chr(64 + row) + str(col) in datalist else None
for row in range(1,9)
]
for col in range(1,13)
]
return welllist
file_lig1="MI_FLU_hsa_lig1_20150922_150518.xml"
file_name = os.path.splitext(file_lig1)[0]
label = file_name[0:25]
print label
root = etree.parse(file_lig1)
#find data sections
Sections = root.xpath("/*/Section")
much = len(Sections)
print "****The xml file " + file_lig1 + " has %s data sections:****" % much
for sect in Sections:
print sect.attrib['Name']
#Work with topread
TopRead = root.xpath("/*/Section")[0]
welllist = get_wells_from_section(TopRead)
df_topread = pd.DataFrame(welllist, columns = ['A - HSA','B - Buffer','C - HSA','D - Buffer', 'E - HSA','F - Buffer','G - HSA','H - Buffer'])
df_topread.transpose()
# To generate cvs file
# df_topread.transpose().to_csv(label + Sections[0].attrib['Name']+ ".csv")
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
%matplotlib inline
def model(x,slope,intercept):
''' 1D linear model in the format scipy.optimize.curve_fit expects: '''
return x*slope + intercept
# generate some data
#X = np.random.rand(1000)
#true_slope=1.0
#true_intercept=0.0
#noise = np.random.randn(len(X))*0.1
#Y = model(X,slope=true_slope,intercept=true_intercept) + noise
#ligand titration
lig1=np.array([200.0000,86.6000,37.5000,16.2000,7.0200, 3.0400, 1.3200, 0.5700, 0.2470, 0.1070, 0.0462, 0.0200])
lig1
# Since I have 4 replicates
L=np.concatenate((lig1, lig1, lig1, lig1))
len(L)
# Fluorescence read
df_topread.loc[:,("B - Buffer", "D - Buffer", "F - Buffer", "H - Buffer")]
B=df_topread.loc[:,("B - Buffer")]
D=df_topread.loc[:,("D - Buffer")]
F=df_topread.loc[:,("F - Buffer")]
H=df_topread.loc[:,("H - Buffer")]
Y = np.concatenate((B.as_matrix(),D.as_matrix(),F.as_matrix(),H.as_matrix()))
(MF,BKG),_ = optimize.curve_fit(model,L,Y)
print('MF: {0:.3f}, BKG: {1:.3f}'.format(MF,BKG))
print('y = {0:.3f} * L + {1:.3f}'.format(MF, BKG))
def model2(x,kd,fr):
''' 1D linear model in the format scipy.optimize.curve_fit expects: '''
# lr =((x+rtot+kd)-((x+rtot+kd)**2-4*x*rtot)**(1/2))/2
# y = bkg + mf*(x - lr) + fr*mf*lr
bkg = 86.2
mf = 2.517
rtot = 0.5
return bkg + mf*(x - ((x+rtot+kd)-((x+rtot+kd)**2-4*x*rtot)**(1/2))/2) + fr*mf*(((x+rtot+kd)-((x+rtot+kd)**2-4*x*rtot)**(1/2))/2)
# Total HSA concentration (uM)
Rtot = 0.5
#Total ligand titration
X = L
len(X)
# Fluorescence read
df_topread.loc[:,("A - HSA", "C - HSA", "E - HSA", "G - HSA")]
A=df_topread.loc[:,("A - HSA")]
C=df_topread.loc[:,("C - HSA")]
E=df_topread.loc[:,("E - HSA")]
G=df_topread.loc[:,("G - HSA")]
Y = np.concatenate((A.as_matrix(),C.as_matrix(),E.as_matrix(),G.as_matrix()))
len(Y)
(Kd,FR),_ = optimize.curve_fit(model2, X, Y, p0=(5,1))
print('Kd: {0:.3f}, Fr: {1:.3f}'.format(Kd,FR))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What we have just done is create a dog who can only feel what's in his location (since he's blind), and can eat or drink. Let's see if he's alive...
Step2: This is our dog. How cool is he? Well, he's hungry and needs to go search for food. For him to do this, we need to give him a program. But before that, let's create a park for our dog to play in.
Step3: Wumpus Environment
Step4: PROGRAM
Step5: That's how easy it is to implement an agent, its program, and environment. But that was a very simple case. What if our environment was 2-Dimentional instead of 1? And what if we had multiple agents?
|
<ASSISTANT_TASK:>
Python Code:
from agents import *
class BlindDog(Agent):
def eat(self, thing):
print("Dog: Ate food at {}.".format(self.location))
def drink(self, thing):
print("Dog: Drank water at {}.".format( self.location))
dog = BlindDog()
print(dog.alive)
class Food(Thing):
pass
class Water(Thing):
pass
class Park(Environment):
def percept(self, agent):
'''prints & return a list of things that are in our agent's location'''
things = self.list_things_at(agent.location)
print(things)
return things
def execute_action(self, agent, action):
'''changes the state of the environment based on what the agent does.'''
if action == "move down":
agent.movedown()
elif action == "eat":
items = self.list_things_at(agent.location, tclass=Food)
if len(items) != 0:
if agent.eat(items[0]): #Have the dog pick eat the first item
self.delete_thing(items[0]) #Delete it from the Park after.
elif action == "drink":
items = self.list_things_at(agent.location, tclass=Water)
if len(items) != 0:
if agent.drink(items[0]): #Have the dog drink the first item
self.delete_thing(items[0]) #Delete it from the Park after.
def is_done(self):
'''By default, we're done when we can't find a live agent,
but to prevent killing our cute dog, we will or it with when there is no more food or water'''
no_edibles = not any(isinstance(thing, Food) or isinstance(thing, Water) for thing in self.things)
dead_agents = not any(agent.is_alive() for agent in self.agents)
return dead_agents or no_edibles
from ipythonblocks import BlockGrid
from agents import *
color = {"Breeze": (225, 225, 225),
"Pit": (0,0,0),
"Gold": (253, 208, 23),
"Glitter": (253, 208, 23),
"Wumpus": (43, 27, 23),
"Stench": (128, 128, 128),
"Explorer": (0, 0, 255),
"Wall": (44, 53, 57)
}
def program(percepts):
'''Returns an action based on it's percepts'''
print(percepts)
return input()
w = WumpusEnvironment(program, 7, 7)
grid = BlockGrid(w.width, w.height, fill=(123, 234, 123))
def draw_grid(world):
global grid
grid[:] = (123, 234, 123)
for x in range(0, len(world)):
for y in range(0, len(world[x])):
if len(world[x][y]):
grid[y, x] = color[world[x][y][-1].__class__.__name__]
def step():
global grid, w
draw_grid(w.get_world())
grid.show()
w.step()
step()
class BlindDog(Agent):
location = 1
def movedown(self):
self.location += 1
def eat(self, thing):
'''returns True upon success or False otherwise'''
if isinstance(thing, Food):
print("Dog: Ate food at {}.".format(self.location))
return True
return False
def drink(self, thing):
''' returns True upon success or False otherwise'''
if isinstance(thing, Water):
print("Dog: Drank water at {}.".format(self.location))
return True
return False
def program(percepts):
'''Returns an action based on it's percepts'''
for p in percepts:
if isinstance(p, Food):
return 'eat'
elif isinstance(p, Water):
return 'drink'
return 'move down'
park = Park()
dog = BlindDog(program)
dogfood = Food()
water = Water()
park.add_thing(dog, 0)
park.add_thing(dogfood, 5)
park.add_thing(water, 7)
park.run(10)
class Park(XYEnvironment):
def percept(self, agent):
'''prints & return a list of things that are in our agent's location'''
things = self.list_things_at(agent.location)
print(things)
return things
def execute_action(self, agent, action):
'''changes the state of the environment based on what the agent does.'''
if action == "move down":
agent.movedown()
elif action == "eat":
items = self.list_things_at(agent.location, tclass=Food)
if len(items) != 0:
if agent.eat(items[0]): #Have the dog pick eat the first item
self.delete_thing(items[0]) #Delete it from the Park after.
elif action == "drink":
items = self.list_things_at(agent.location, tclass=Water)
if len(items) != 0:
if agent.drink(items[0]): #Have the dog drink the first item
self.delete_thing(items[0]) #Delete it from the Park after.
def is_done(self):
'''By default, we're done when we can't find a live agent,
but to prevent killing our cute dog, we will or it with when there is no more food or water'''
no_edibles = not any(isinstance(thing, Food) or isinstance(thing, Water) for thing in self.things)
dead_agents = not any(agent.is_alive() for agent in self.agents)
return dead_agents or no_edibles
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup a connection with local MariaDB
Step2: Read SQL tables directly into Pandas DataFrame
Step3: Ooops! We get the original bytearray objects (e.g. bytearray(b'se-bbr')) from the WLM database storage formate out. We need to convert them.
Step4: OK. Now we've got those strings in a proper readable format.
|
<ASSISTANT_TASK:>
Python Code:
# Boring prerequisites
import pandas as pd # Beautiful tool for data wrangling! e.g. '!pip install pandas' from a Notebook
import datetime
import numpy as np
pd.set_option("display.max_rows",25) # Useful when having large Pandas DataFrames like we do here
import mysql.connector as mariadb # See https://mariadb.com/blog/how-connect-python-programs-mariadb e.g. '!pip install mysql' from Notebook
import csv
# Replace 'mos' with your username
conn = mariadb.connect(user='mos', password='', database='monuments_db',buffered=True)
cursor = conn.cursor()
cursor.execute("SELECT * FROM monuments_all WHERE country='se-bbr'")
all_bbr = pd.io.sql.read_sql('select * from monuments_all WHERE country="se-bbr"', conn)
all_bbr.shape
all_bbr.keys()
all_bbr.dtypes
all_bbr.to_csv("./files/all_bbr.csv")
!head -n3 ./files/all_bbr.csv
# First fill na values
all_bbr["country"] = all_bbr.country.str.decode("utf-8").astype("str")
all_bbr["lang"] = all_bbr.lang.str.decode("utf-8").astype("str")
all_bbr["adm0"] = all_bbr.adm0.str.decode("utf-8")
all_bbr["adm1"] = all_bbr.adm1.str.decode("utf-8")
# We also want lat_int and lon_int to be integers, just to be correct
all_bbr["lat_int"] = all_bbr.lat_int.fillna(0).astype("int64")
all_bbr["lon_int"] = all_bbr.lon_int.fillna(0).astype("int64")
all_bbr.dtypes
all_bbr.to_csv("./files/all_bbr.csv")
!head -n 3 ./files/all_bbr.csv
table_name = "se_bbr" # I've renamed monuments_se-bbr_(se) to 'se_bbr' in local database, change to correct name
se_bbr = pd.io.sql.read_sql('select * from se_bbr', conn)
se_bbr.shape
se_bbr.keys()
se_bbr.dtypes
se_bbr.to_csv("./files/se_bbr.csv")
!head -n3 ./files/se_bbr.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate Features And Target
Step2: Split Data Intro Training And Test Sets
Step3: Training Binary Classifier
Step4: Create Predicted Probabilities
Step5: Plot Receiving Operating Characteristic Curve
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Create feature matrix and target vector
X, y = make_classification(n_samples=10000,
n_features=10,
n_classes=2,
n_informative=3,
random_state=3)
# Split into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1)
# Create classifier
clf = LogisticRegression()
# Train model
clf.fit(X_train, y_train)
# Get predicted probabilities
y_score = clf.predict_proba(X_test)[:,1]
# Create true and false positive rates
false_positive_rate, true_positive_rate, threshold = roc_curve(y_test, y_score)
# Plot ROC curve
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, true_positive_rate)
plt.plot([0, 1], ls="--")
plt.plot([0, 0], [1, 0] , c=".7"), plt.plot([1, 1] , c=".7")
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Local, individual load of updated data set (with weather data integrated) into training, development, and test subsets.
Step2: Logistic Regression
Step3: LR with L1-Penalty Hyperparameter Tuning
Step4: Dataframe for Coefficients
Step5: Plot for Coefficients
Step6: LR with L2-Penalty Hyperparameter Tuning
Step7: Dataframe for Coefficients
Step8: Plot of Coefficients
|
<ASSISTANT_TASK:>
Python Code:
# Additional Libraries
%matplotlib inline
import matplotlib.pyplot as plt
# Import relevant libraries:
import time
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import log_loss
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
# Import Meta-estimators
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
# Import Calibration tools
from sklearn.calibration import CalibratedClassifierCV
# Set random seed and format print output:
np.random.seed(0)
np.set_printoptions(precision=3)
# Data path to your local copy of Kalvin's "x_data.csv", which was produced by the negated cell above
data_path = "./data/x_data_3.csv"
df = pd.read_csv(data_path, header=0)
x_data = df.drop('category', 1)
y = df.category.as_matrix()
# Impute missing values with mean values:
#x_complete = df.fillna(df.mean())
x_complete = x_data.fillna(x_data.mean())
X_raw = x_complete.as_matrix()
# Scale the data between 0 and 1:
X = MinMaxScaler().fit_transform(X_raw)
# Shuffle data to remove any underlying pattern that may exist. Must re-run random seed step each time:
np.random.seed(0)
shuffle = np.random.permutation(np.arange(X.shape[0]))
X, y = X[shuffle], y[shuffle]
print(np.where(y == 'TREA'))
print(np.where(y == 'PORNOGRAPHY/OBSCENE MAT'))
## Due to difficulties with log loss and set(y_pred) needing to match set(labels), we will remove the extremely rare
## crimes from the data for quality issues.
#X_minus_trea = X[np.where(y != 'TREA')]
#y_minus_trea = y[np.where(y != 'TREA')]
#X_final = X_minus_trea[np.where(y_minus_trea != 'PORNOGRAPHY/OBSCENE MAT')]
#y_final = y_minus_trea[np.where(y_minus_trea != 'PORNOGRAPHY/OBSCENE MAT')]
## Separate training, dev, and test data:
#test_data, test_labels = X_final[800000:], y_final[800000:]
#dev_data, dev_labels = X_final[700000:800000], y_final[700000:800000]
#train_data, train_labels = X_final[100000:700000], y_final[100000:700000]
#calibrate_data, calibrate_labels = X_final[:100000], y_final[:100000]
test_data, test_labels = X[800000:], y[800000:]
dev_data, dev_labels = X[700000:800000], y[700000:800000]
#train_data, train_labels = X[100000:700000], y[100000:700000]
train_data, train_labels = X[:700000], y[:700000]
#calibrate_data, calibrate_labels = X[:100000], y[:100000]
# Create mini versions of the above sets
#mini_train_data, mini_train_labels = X_final[:20000], y_final[:20000]
#mini_calibrate_data, mini_calibrate_labels = X_final[19000:28000], y_final[19000:28000]
#mini_dev_data, mini_dev_labels = X_final[49000:60000], y_final[49000:60000]
#mini_train_data, mini_train_labels = X[:20000], y[:20000]
mini_train_data, mini_train_labels = X[:200000], y[:200000]
#mini_calibrate_data, mini_calibrate_labels = X[19000:28000], y[19000:28000]
mini_dev_data, mini_dev_labels = X[430000:480000], y[430000:480000]
## Create list of the crime type labels. This will act as the "labels" parameter for the log loss functions that follow
#crime_labels = list(set(y_final))
#crime_labels_mini_train = list(set(mini_train_labels))
#crime_labels_mini_dev = list(set(mini_dev_labels))
#crime_labels_mini_calibrate = list(set(mini_calibrate_labels))
#print(len(crime_labels), len(crime_labels_mini_train), len(crime_labels_mini_dev),len(crime_labels_mini_calibrate))
crime_labels = list(set(y))
crime_labels_mini_train = list(set(mini_train_labels))
crime_labels_mini_dev = list(set(mini_dev_labels))
#crime_labels_mini_calibrate = list(set(mini_calibrate_labels))
#print(len(crime_labels), len(crime_labels_mini_train), len(crime_labels_mini_dev),len(crime_labels_mini_calibrate))
print(len(crime_labels), len(crime_labels_mini_train), len(crime_labels_mini_dev))
print(len(train_data),len(train_labels))
print(len(dev_data),len(dev_labels))
print(len(mini_train_data),len(mini_train_labels))
print(len(mini_dev_data),len(mini_dev_labels))
print(len(test_data),len(test_labels))
#print(len(mini_calibrate_data),len(mini_calibrate_labels))
#print(len(calibrate_data),len(calibrate_labels))
#log_reg = LogisticRegression(penalty='l1').fit(mini_train_data, mini_train_labels)
#log_reg = LogisticRegression().fit(mini_train_data, mini_train_labels)
#eval_prediction_probabilities = log_reg.predict_proba(mini_dev_data)
#eval_predictions = log_reg.predict(mini_dev_data)
#print("Multi-class Log Loss:", log_loss(y_true = mini_dev_labels, y_pred = eval_prediction_probabilities, labels = crime_labels_mini_dev), "\n\n")
#columns = ['hour_of_day','dayofweek',\
# 'x','y','bayview','ingleside','northern',\
# 'central','mission','southern','tenderloin',\
# 'park','richmond','taraval','HOURLYDRYBULBTEMPF',\
# 'HOURLYRelativeHumidity','HOURLYWindSpeed',\
# 'HOURLYSeaLevelPressure','HOURLYVISIBILITY',\
# 'Daylight']
##print(len(columns))
#allCoefs = pd.DataFrame(index=columns)
#for a in range(len(log_reg.coef_)):
# #print(crime_labels_mini_dev[a])
# #print(pd.DataFrame(log_reg.coef_[a], index=columns))
# allCoefs[crime_labels_mini_dev[a]] = log_reg.coef_[a]
# #print()
#allCoefs
#%matplotlib inline
#import matplotlib.pyplot as plt
#
#f = plt.figure(figsize=(15,8))
#allCoefs.plot(kind='bar', figsize=(15,8))
#plt.legend(loc='center left', bbox_to_anchor=(1.0,0.5))
#plt.show()
lr_param_grid_1 = {'C': [0, 0.0001, 0.001, 0.01, 0.1, 0.5, 1.0, 5.0, 10.0]}
#lr_param_grid_1 = {'C': [0.0001, 0.01, 0.5, 5.0, 10.0]}
LR_l1 = GridSearchCV(LogisticRegression(penalty='l1'), param_grid=lr_param_grid_1, scoring='neg_log_loss')
LR_l1.fit(train_data, train_labels)
print('L1: best C value:', str(LR_l1.best_params_['C']))
LR_l1_prediction_probabilities = LR_l1.predict_proba(dev_data)
LR_l1_predictions = LR_l1.predict(dev_data)
print("L1 Multi-class Log Loss:", log_loss(y_true = dev_labels, y_pred = LR_l1_prediction_probabilities, labels = crime_labels), "\n\n")
columns = ['hour_of_day','dayofweek',\
'x','y','bayview','ingleside','northern',\
'central','mission','southern','tenderloin',\
'park','richmond','taraval','HOURLYDRYBULBTEMPF',\
'HOURLYRelativeHumidity','HOURLYWindSpeed',\
'HOURLYSeaLevelPressure','HOURLYVISIBILITY',\
'Daylight']
allCoefsL1 = pd.DataFrame(index=columns)
for a in range(len(LR_l1.coef_)):
allCoefsL1[crime_labels[a]] = LR_l1.coef_[a]
allCoefsL1
f = plt.figure(figsize=(15,8))
allCoefsL1.plot(kind='bar', figsize=(15,8))
plt.legend(loc='center left', bbox_to_anchor=(1.0,0.5))
plt.show()
lr_param_grid_2 = {'C': [0, 0.0001, 0.001, 0.01, 0.1, 0.5, 1.0, 5.0, 10.0], \
'solver':['liblinear','newton-cg','lbfgs', 'sag']}
LR_l2 = GridSearchCV(LogisticRegression(penalty='l2'), param_grid=lr_param_grid_2, scoring='neg_log_loss')
LR_l2.fit(train_data, train_labels)
print('L2: best C value:', str(LR_l2.best_params_['C']))
print('L2: best solver:', str(LR_l2.best_params_['solver']))
LR_l2_prediction_probabilities = LR_l2.predict_proba(dev_data)
LR_l2_predictions = LR_l2.predict(dev_data)
print("L2 Multi-class Log Loss:", log_loss(y_true = dev_labels, y_pred = LR_l2_prediction_probabilities, labels = crime_labels), "\n\n")
columns = ['hour_of_day','dayofweek',\
'x','y','bayview','ingleside','northern',\
'central','mission','southern','tenderloin',\
'park','richmond','taraval','HOURLYDRYBULBTEMPF',\
'HOURLYRelativeHumidity','HOURLYWindSpeed',\
'HOURLYSeaLevelPressure','HOURLYVISIBILITY',\
'Daylight']
allCoefsL2 = pd.DataFrame(index=columns)
for a in range(len(LR_l2.coef_)):
allCoefsL2[crime_labels[a]] = LR_l2.coef_[a]
allCoefsL2
f = plt.figure(figsize=(15,8))
allCoefsL2.plot(kind='bar', figsize=(15,8))
plt.legend(loc='center left', bbox_to_anchor=(1.0,0.5))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Population Statistics and Visualization
Step2: We'll define the function hist to plot a histogram of a population. Our hist wraps plt.hist, but with some specific keyword values
Step3: Transactions
Step4: Interactions
Step5: Simulation
Step6: Here is a simple example of simulating a population of 4 actors for 8 time steps
Step7: SImulation Visualization
Step8: There are three parts to this output
Step9: Now we can easily make an initial population from a distribution function. I'll start with a uniform distribution
Step10: And try a constant distribution, where everyone starts out the same
Step11: The resulting histogram looks different, but only because the starting distribution is so narrow and tall; the end distribution has a Gini coefficient of about 1/2 and standard deviation of about 100, just like we get from the other starting distributions.
Step12: Surprise
Step13: Now the results look very different
Step14: Another surprise
Step15: The status_quo transaction increases inequality from the initial population, but not as much as the other transaction functions.
Step16: Surprise
Step17: It is still surprising that we still have no efect from restricting trade.
Step18: Let's see what it looks like
Step19: Hey—that looks like the beta distribution. Let's compare
|
<ASSISTANT_TASK:>
Python Code:
import random
N = 5000 # Default size of the population
MU = 100. # Default mean of the population
population = [random.gauss(mu=MU, sigma=MU/5) for actor in range(N)]
def gini(y):
"Compute the Gini coefficient (a measure of equality/inequality) in a population, y."
y = sorted(y)
n = len(y)
numer = 2 * sum((i+1) * y[i] for i in range(n))
denom = n * sum(y)
return (numer / denom) - (n + 1) / n
%matplotlib inline
import matplotlib.pyplot as plt
def hist(population, label='pop', **kwargs):
"A custom version of `hist` with better defaults."
label = label + ': G=' + str(round(gini(population), 2))
h = plt.hist(list(population), bins=30, alpha=0.5, label=label, **kwargs)
plt.xlabel('wealth'); plt.ylabel('count'); plt.grid(True)
plt.legend()
hist(population)
def random_split(A, B):
"Take all the money uin the pot and divide it randomly between the two actors."
pot = A + B
share = random.uniform(0, pot)
return share, pot - share
random_split(100, 100)
def anyone(N): return random.sample(range(N), 2)
anyone(N)
def simulate(population, T, transaction=random_split, interaction=anyone):
"Run simulation on population for T transactions; yield (t, pop) at each time step."
population = population.copy()
yield population
for t in range(1, T + 1):
i, j = interaction(len(population))
population[i], population[j] = transaction(population[i], population[j])
yield population
for pop in simulate([100] * 4, 8):
print(pop)
import statistics
def show(population, k=40, percentiles=(1, 10, 50, 90, 99), **kwargs):
"Run a simulation for k*N steps, printing statistics and displaying a plot and histogram."
N = len(population)
start = list(population)
results = [(t, sorted(pop)) # Sort results so that percentiles work
for (t, pop) in enumerate(simulate(population, k * N, **kwargs))
if t % (N / 10) == 0]
times = [t for (t, pop) in results]
# Printout:
print(' t Gini stdev' + (' {:3d}%' * len(percentiles)).format(*percentiles))
print('------- ---- -----' + ' ----' * len(percentiles))
fmt = '{:7,d} {:.2f} {:5.1f}' + ' {:4.0f}' * len(percentiles)
for (t, pop) in results:
if t % (4 * N) == 0:
data = [percent(pct, pop) for pct in percentiles]
print(fmt.format(t, gini(pop), statistics.stdev(pop), *data))
# Plot:
plt.hold(True); plt.xlabel('wealth'); plt.ylabel('time'); plt.grid(True)
for pct in percentiles:
line = [percent(pct, pop) for (t, pop) in results]
plt.plot(line, times)
plt.show()
# Histogram:
R = (min(pop+start), max(pop+start))
hist(start, 'start', range=R)
hist(pop, 'end', range=R)
def percent(pct, items):
"The item that is pct percent through the sorted list of items."
return items[min(len(items)-1, len(items) * pct // 100)]
show(population)
def samples(distribution, *args, n=N, mu=MU):
"Sample from the distribution n times, then normalize results to have mean mu."
numbers = [distribution(*args) for _ in range(N)]
return normalize(numbers, mu)
def normalize(numbers, mu):
"Make the numbers non-negative, and scale them so they have mean mu."
numbers = [max(0, n) for n in numbers]
factor = len(numbers) * mu / sum(numbers)
return [x * factor for x in numbers]
show(samples(random.uniform, 0, 200))
def constant(mu=MU): return mu
show(samples(constant))
def beta(): return random.betavariate(0.9, 12)
show(samples(beta))
def winner_take_all(A, B): return random.choice(([A + B, 0], [0, A + B]))
show(population, transaction=winner_take_all)
def redistribute(A, B, rate=0.31):
"Tax both parties at rate; split the tax revenue evenly, and randomly split the rest."
tax = rate * (A + B)
Arand, Brand = random_split(A + B - tax, 0)
return tax / 2 + Arand, tax / 2 + Brand
show(population, transaction=redistribute)
def status_quo(A, B):
"A transaction that is most likely to leave things unchanged, but could move any amount of wealth around."
a = random.triangular(0, (A + B) / 2, A / 2)
return (A / 2 + a), (A + B) - (A / 2 + a)
show(population, transaction=status_quo)
def neighborhood(n, width=5):
"Choose two agents in the same neighborhood"
i = random.randrange(n - width)
return random.sample(range(i, i + width + 1), 2)
show(population, interaction=neighborhood)
def adjacent(n): return neighborhood(n, 1)
show(population, interaction=adjacent)
USA_table = [
(0, 10, 7.63),
(10, 20, 19.20),
(20, 30, 30.50),
(30, 40, 41.08),
(40, 50, 49.95),
(50, 60, 57.73),
(60, 70, 64.56),
(70, 80, 70.39),
(80, 90, 75.02),
(90, 100, 79.02),
(100, 110, 82.57),
(110, 120, 85.29),
(120, 130, 87.60),
(130, 140, 89.36),
(140, 150, 90.95),
(150, 160, 92.52),
(160, 170, 93.60),
(170, 180, 94.55),
(180, 190, 95.23),
(190, 200, 95.80),
(200, 250, 97.70),
(250, 300, 100.0)]
def USA():
"Sample from the USA distribution."
p = random.uniform(0, 100)
for (lo, hi, cum_pct) in USA_table:
if p <= cum_pct:
return random.uniform(lo, hi)
hist(samples(USA), label='USA')
hist(samples(beta), label='beta')
hist(samples(USA), label='USA')
show(samples(USA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import math
def decBinary(arr , n ) :
k = int(math . log2(n ) )
while(n > 0 ) :
arr[k ] = n % 2
k = k - 1
n = n // 2
def binaryDec(arr , n ) :
ans = 0
for i in range(0 , n ) :
ans = ans +(arr[i ] <<(n - i - 1 ) )
return ans
def concat(m , n ) :
k = int(math . log2(m ) ) + 1
l = int(math . log2(n ) ) + 1
a =[0 for i in range(0 , k ) ]
b =[0 for i in range(0 , l ) ]
c =[0 for i in range(0 , k + l ) ]
decBinary(a , m ) ;
decBinary(b , n ) ;
iin = 0
for i in range(0 , k ) :
c[iin ] = a[i ]
iin = iin + 1
for i in range(0 , l ) :
c[iin ] = b[i ]
iin = iin + 1
return(binaryDec(c , k + l ) )
m = 4
n = 5
print(concat(m , n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set the parameters, create the cluster and the channels, etc
Step2: Calculate the distances between each user and each base station
Step3: Plot the distances
Step4: Calculate the Transmit power of each user
Step5: Path Loss of the Interference Links relative to the direct link
Step6: Question
Step7: However, notice that the relative path loss is only one side of the coint. We still need to account the transmit power of each interfering base station when compared with the transmit power of our own base station. That is, if the relative path loss to some interference link is 25dB, but that particular base station transmit with 10dB more power than our own base station then the relative interference power of that link when compared with the desired signal would be 15dB.
Step8: Some of the precoders may have 0 energy.
Step9: Pos Processing SINR for each Stream
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from ipywidgets.widgets import Image
from IPython.display import display, display_svg, display_png
# xxxxxxxxxx Add the parent folder to the python path. xxxxxxxxxxxxxxxxxxxx
import sys
parent_dir = "../"
sys.path.append(parent_dir)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Import Statements xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
from time import time
from time import sleep
import numpy as np
from pprint import pprint
from matplotlib import pyplot as plt
from pyphysim.simulations.runner import SimulationRunner
from pyphysim.simulations.parameters import SimulationParameters
from pyphysim.simulations.results import SimulationResults, Result
from pyphysim.simulations.simulationhelpers import simulate_do_what_i_mean
from pyphysim.simulations.runner import get_common_parser
from pyphysim.modulators.fundamental import PSK
from pyphysim.channels import pathloss
from pyphysim.channels import multiuser
from pyphysim.util.conversion import dB2Linear, linear2dB
from pyphysim.cell.cell import Cluster
from pyphysim.util import misc
from pyphysim.util.conversion import dBm2Linear
from pyphysim.ia import algorithms
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
multiUserChannel = multiuser.MultiUserChannelMatrix()
#ia_solver = algorithms.MMSEIASolver(multiUserChannel)
ia_solver = algorithms.MaxSinrIASolver(multiUserChannel)
#ia_solver = algorithms.AlternatingMinIASolver(multiUserChannel)
ia_solver.max_iterations = 500
modulator = PSK(4)
include_path_loss_bool = True
M = 4
NSymbs = 100
K = 19
Nr = 4
Nt = 8
Ns = 2
SNR = 30.0
users_per_cell = 1
# Cluter variables
cell_radius = 1.0
# Dependent parameters
#noise_var = 1 / dB2Linear(SNR)
noise_var = dBm2Linear(-116.4)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Create the cluster of cells xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
cluster = Cluster(cell_radius=cell_radius, num_cells=19)
cluster.create_wrap_around_cells(include_users_bool=True)
cluster.add_random_users(cell_ids=range(1,20), num_users=users_per_cell)
#cluster.add_border_users(cell_ids=range(1,20), angles=0, ratios=0.01)
cluster.figsize = (7,7)
cluster.fill_face_bool = True
cluster.fill_opacity = 0.3
cluster
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
multiUserChannel.randomize(Nr, Nt, K)
if include_path_loss_bool is True:
# xxxxxxxxxx Calculate the pathloss for each link xxxxxxxxxxxxxxxxxxxxx
dists = cluster.calc_dist_all_users_to_each_cell()
#pl_obj = pathloss.PathLossFreeSpace()
pl_obj = pathloss.PathLoss3GPP1()
#pl_obj = pathloss.PathLossOkomuraHata()
# Calculate the path loss in linear scale
pl = pl_obj.calc_path_loss(dists)
#pl = np.eye(19)
multiUserChannel.set_pathloss(pl)
else:
# Path loss will be equal to 1.0 for all links
pl = np.ones([K,K])
multiUserChannel.set_pathloss(None)
# fig, axs = plt.subplots(10,2, figsize=(12,40))
# for row in range(10):
# for col in range(2):
# index = 2 * row + col
# if index > 18:
# break
# # print "{0}: {1}x{2}".format(index, row, col)
# axs[row, col].set_xlim([0, 20])
# axs[row, col].grid()
# axs[row, col].plot(range(1,20), dists[index])
# axs[row, col].plot(index+1, dists[index][index], 'or')
# axs[row, col].xaxis.set_ticks(np.arange(1,20))
# plt.show()
# Calculate the transmit power of each user
snr = dB2Linear(SNR)
Pt = snr * noise_var/pl.diagonal()
pl_dB = -linear2dB(pl)
np.set_printoptions(precision=1, linewidth=140)
for index in range(19):
pl_dB[index] = pl_dB[index] - pl_dB[index][index]
print('Relative Direct Channel Path Loss:\n{0}'.format(pl_dB.diagonal()))
print('Relative Max Path Loss:\n{0}'.format(pl_dB.max(axis=1)))
print('Relative Min Path Loss:\n{0}'.format(np.sort(pl_dB, axis=1)[:,1]))
print()
np.set_printoptions(precision=4, linewidth=120)
# print np.mean(np.sort(pl_dB, axis=1)[:,1])
# print np.mean(pl_dB.max(axis=1))
[(sum(pl_dB[i] < 30) - 1) for i in range(19)]
noise_var
assert abs(ia_solver.noise_var - noise_var) < 1e-12
ia_solver.clear()
runned_reps = ia_solver.solve(Ns=Ns, P=Pt)
print(runned_reps)
data = []
for f in ia_solver.full_F:
data.append(np.linalg.norm(f, 'fro')**2)
print(data[-1])
print
data = np.array(data)
#print("Number of non zero links: {0}".format(len(data[data > 1e-3])))
print("Sum Capacity: {0}\nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx".format(ia_solver.calc_sum_capacity()))
print("Input SNR: {0}".format(SNR))
print("Post-processing SINRs")
for v in ia_solver.calc_SINR_in_dB():
print(v)
# # If any of the Nr, Nt or Ns variables were integers (meaning all
# # users have the same value) we will convert them by numpy arrays
# # with correct size (K).
# Nr = ia_solver.Nr
# Nt = ia_solver.Nt
# Ns = ia_solver.Ns
# cumNs = np.cumsum(ia_solver.Ns)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxx Input Data xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # inputData has the data of all users (vertically stacked)
# inputData = np.random.randint(0, M, [np.sum(Ns), NSymbs])
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxx Modulate input data xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # modulatedData has the data of all users (vertically stacked)
# modulatedData = modulator.modulate(inputData)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxxxxxxx Perform the Interference Alignment xxxxxxxxxxxxxxxxxxx
# # Split the data. transmit_signal will be a list and each element
# # is a numpy array with the data of a user
# transmit_signal = np.split(modulatedData, cumNs[:-1])
# transmit_signal_precoded = map(np.dot, ia_solver.full_F, transmit_signal)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxx Pass through the channel xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# multi_user_channel = ia_solver._multiUserChannel
# # received_data is an array of matrices, one matrix for each receiver.
# #import pudb; pudb.set_trace() ## DEBUG ##
# received_data = multi_user_channel.corrupt_data(
# transmit_signal_precoded, noise_var)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxx Perform the Interference Cancelation xxxxxxxxxxxxxxxxxxxxxx
# received_data_no_interference = map(np.dot,
# ia_solver.full_W_H, received_data)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxx Demodulate Data xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# received_data_no_interference = np.vstack(received_data_no_interference)
# demodulated_data = modulator.demodulate(received_data_no_interference)
# # demodulated_data = map(modulator.demodulate, received_data_no_interference)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxx Calculates the symbol and bit error rates xxxxxxxxxxxxxxxxx
# symbolErrors = np.sum(inputData != demodulated_data)
# bitErrors = misc.count_bit_errors(inputData, demodulated_data)
# numSymbols = inputData.size
# numBits = inputData.size * modulators.level2bits(M)
# ia_cost = ia_solver.get_cost()
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # xxxxxxxxxx Calculates the Sum Capacity xxxxxxxxxxxxxxxxxxxxxxxxxx
# sirn_all_k = ia_solver.calc_SINR()
# calc_capacity = lambda sirn: np.sum(np.log2(1 + sirn))
# # Array with the sum capacity of each user
# sum_capacity = map(calc_capacity, sirn_all_k)
# # Total sum capacity
# total_sum_capacity = np.sum(sum_capacity)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# #xxxxxxxxxx Number of iterations of the IA algorithm xxxxxxxxxxxxxx
# ia_runned_iterations = ia_solver.runned_iterations
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # Print the results
# print("SumCapacity: {0}".format(total_sum_capacity))
# print("BER: {0}".format(float(bitErrors)/numBits))
# # xxxxx Return the simulation results xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# symbolErrorsResult = Result.create(
# "symbol_errors", Result.SUMTYPE, symbolErrors)
# numSymbolsResult = Result.create(
# "num_symbols", Result.SUMTYPE, numSymbols)
# bitErrorsResult = Result.create("bit_errors", Result.SUMTYPE, bitErrors)
# numBitsResult = Result.create("num_bits", Result.SUMTYPE, numBits)
# berResult = Result.create("ber", Result.RATIOTYPE, bitErrors, numBits,
# accumulate_values=False)
# serResult = Result.create(
# "ser", Result.RATIOTYPE, symbolErrors, numSymbols, accumulate_values=False)
# ia_costResult = Result.create(
# "ia_cost", Result.RATIOTYPE, ia_cost, 1, accumulate_values=False)
# sum_capacityResult = Result.create(
# "sum_capacity", Result.RATIOTYPE, total_sum_capacity, 1,
# accumulate_values=False)
# ia_runned_iterationsResult = Result.create(
# "ia_runned_iterations", Result.RATIOTYPE, ia_runned_iterations, 1, accumulate_values=False)
# #import pudb; pudb.set_trace() ## DEBUG ##
# simResults = SimulationResults()
# simResults.add_result(symbolErrorsResult)
# simResults.add_result(numSymbolsResult)
# simResults.add_result(bitErrorsResult)
# simResults.add_result(numBitsResult)
# simResults.add_result(berResult)
# simResults.add_result(serResult)
# simResults.add_result(ia_costResult)
# simResults.add_result(sum_capacityResult)
# simResults.add_result(ia_runned_iterationsResult)
# # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# # return simResults
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.3 From file to XML object
Step2: As you can see, we obtained an instance of type lxml.etree._ElementTree. It means the xml markup has been transformed into something Python understands.
Step3: From the documentation of the XMLParser function, here are some arguments that might be useful for you
Step4: DIY
Step6: 1.3.2 Errors and understanding them
Step10: What error did we raise trying to parse this XML ? We got an XMLSyntaxError. It can happen for various reasons, including when entities cannot be parsed. Can you try to find another way to raise an XMLSyntaxError ?
Step11: As you can see, errors are detailed enough so you can correct your own XML, at least manually.
Step13: You can do plenty of things using lxml and access properties or methods of nodes, here is an overview of reading functionalities offered by lxml
Step14: If we want to retrieve the attributes of our div, we can do as follow
Step15: Great ! We accessed our first information using lxml ! Now, how about getting somewhere other than the root tag ? To do so, there are two ways
Step16: Now that we have access to our children, we can have access to their text
Step17: Ok, we are now able to get some stuff done. Remember the namespace naming ? Sometimes it's useful to retrieve namespaces and their prefix
Step19: What you've learned
Step20: As you can see, the xpath returns a list. This behaviour is intended, since an xpath can retrieve more than one item
Step21: You see ? The xpath //l returns two elements, just like python does in a list. Now, let's apply some xpath to the children and see what happens
Step22: As you can see, you can do xpath from any node in lxml. One important thing though
Step24: Xpath with namespaces and prefix
Step27: What you have learned
Step28: Did you see what happened ? We used xslt(xml). etree.XSLT() transforms a xsl document into a function, which then takes one parameter (in this case an xml document). But can you figure out what this returns ? Let's ask Python
Step29: The result is not of the same type of element we usually have, even though it does share most of its methods and attributes
Step30: And has something more
Step33: XSLT is more complex than just inputing xml. You can do XSLT using parameters as well. In this case, your parameters will be accessibles as a named argument to the generated function. If your XSL has a name xsl-param, the function given back by etree.XSLT will have a name argument
Step34: 2. Using ElementTree
Step35: 2.1 Traversing the Parsed Tree
Step36: 2.1.1 Finding Nodes in a Document¶
Step37: Another version can take advantage of the fact that the outline nodes are only nested two levels deep. Changing the search path to .//outline/outline mean the loop will process only the second level of outline nodes.
Step38: 2.1.2 Parsed Node Attributes
Step39: 2.1.3 Parsing Strings
|
<ASSISTANT_TASK:>
Python Code:
from lxml import etree
# We open our file
with open("data/books.xml") as file:
# We use the etree.parse property
parsed = etree.parse(file)
# We print the object
print(parsed)
# We initiate a new parser from etree, asking it to remove nodes of text which are empty
parser = etree.XMLParser(remove_blank_text=True)
# We open the file
with open("data/books.xml") as file:
# And we parse using the new parser
parsed = etree.parse(file, parser)
# We print the object
print(parsed)
# We open the file
xml = '<root xmlns:a="xmlns1" xmlns:b="xmlns2"><tag xmlns:c="xmlns3" /><tag xmlns:a="xmlns1" /><tag /></root>'
parsed = etree.fromstring(xml)
print(parsed)
# Put your code here
xml =
<fileDesc>
<titleStmt>
<title>Aeneid</title>
<title type="sub">Machine readable text</title>
<author n="Verg.">P. Vergilius Maro</author>
<editor role="editor" n="Greenough">J. B. Greenough</editor>
</titleStmt>
<extent>about 505Kb</extent>
<!-- &Perseus.publish;-->
<sourceDesc>
<biblStruct>
<monogr>
<author>Vergil</author>
<title>Bucolics, Aeneid, and Georgics Of Vergil</title>
<editor role="editor">J. B. Greenough</editor>
<imprint>
<pubPlace>Boston</pubPlace>
<publisher>Ginn & Co.</publisher>
<date>1900</date>
</imprint>
</monogr>
</biblStruct>
</sourceDesc>
</fileDesc>
etree.fromstring(xml)
#Write your xml in xml variable
# invalid
xml =
#
xml2 =
<start>this is a text</start>
#
xml3 =
<start attr="test"/>
etree.fromstring(xml3)
# With no namespace
print(etree.fromstring("<root />"))
# With namespace
print(etree.fromstring("<root xmlns='http://localhost' />"))
# First, we will need some xml
xml =
<div type="Book" n="1">
<l n="1">Arma virumque cano, Troiae qui primus ab oris</l>
<tei:l n="2" xmlns:tei="http://www.tei-c.org/ns/1.0">Italiam, fato profugus, Laviniaque venit</tei:l>
<l n="3">litora, multum ille et terris iactatus et alto</l>
<l n="4">vi superum saevae memorem Iunonis ob iram;</l>
<l n="5">multa quoque et bello passus, dum conderet urbem,</l>
<l n="6">inferretque deos Latio, genus unde Latinum,</l>
<l n="7">Albanique patres, atque altae moenia Romae.</l>
</div>
div = etree.fromstring(xml)
print(parsed)
type_div = div.get("type")
print(type_div)
print(div.get("n"))
# If we want a dictionary of attributes
print(div.attrib)
attributes_div = dict(div.attrib)
print(attributes_div)
# Of if we want a list
list_attributes_div = div.items()
print(list_attributes_div)
children = div.getchildren()
print(children)
line_1 = children[0] # Because it's a list we can access children through index
print(line_1)
print(line_1.text)
# <tei:l n="2" xmlns:tei="http://www.tei-c.org/ns/1.0">Italiam, fato profugus, Laviniaque venit</tei:l>
line_2 = children[1]
print(line_2.nsmap)
print(line_2.prefix)
print(line_2.tag)
# We generate some xml and parse it
## TODO
xml = <div>
<l n="1">
<p>Text</p>
<p>new p</p>
followed
<test>
<p>p3</p>
</test>
</l>
<l n="2">
by line two
</l>
<p>test</p>
<p><l n="3"> line 3</l></p>
</div>
div = etree.fromstring(xml)
print(div)
# When doing an xpath, the results will be a list
print("-"*20)
ps = div.xpath("/div/l")
for p in ps:
print(p)
print("-"*20)
# print(ps)
print([value.values()[0] for value in ps])
print(ps[0].text == "Text")
print(div.xpath("//l"))
# We assign our first line to a variable
line_1 = div.xpath("//l")[0]
#print(dir(line_1))
print(line_1.attrib['n'])
# We look for p
print(line_1.xpath("p")) # This works
print(line_1.xpath("./p")) # This works too
print(line_1.xpath(".//p")) # This still works
print(line_1.xpath("//p")) # entire doc
root.xpath("wrong:xpath:never:works")
# We create a valid xml object
xml = <root>
<tag xmlns="http://localhost">Text</tag>
<tei:tag xmlns:tei="http://www.tei-c.org/ns/1.0">Other text</tei:tag>
<teiTwo:tag xmlns:teiTwo="http://www.tei-c.org/ns/2.0">Other text</teiTwo:tag>
</root>
root = etree.fromstring(xml)
# We register every namespaces in a dictionary using prefix as keys :
ns = {
"local" : "http://localhost", # Even if this namespace had no prefix, we can register one for it
"tei" : "http://www.tei-c.org/ns/1.0",
"two": "http://www.tei-c.org/ns/2.0"
}
print([d.text for namespace in ns
for d in root.xpath("//{namespace}:tag".format(namespace=namespace),
namespaces=ns) ])
# Here is an xml containing an xsl: for each text node of an xml file in the xpath /humanities/field,
# this will return a node <name> with the text inside
xslt_root = etree.fromstring(
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<fields><xsl:apply-templates /></fields>
</xsl:template>
<xsl:template match="/humanities/field">
<name><xsl:value-of select="./text()" /></name>
</xsl:template>
</xsl:stylesheet>)
# We transform our document to an xsl
xslt = etree.XSLT(xslt_root)
# We create some xml we need to change
xml = <humanities>
<field>History</field>
<field>Classics</field>
<field>French</field>
<field>German</field>
</humanities>
parsed_xml = etree.fromstring(xml)
# And now we process our xml :
transformed = xslt(parsed_xml)
print(transformed)
print(type(transformed))
print(type(parsed_xml))
print(transformed.xpath("//name"))
string_result = str(transformed)
print(string_result)
# Here is an xml containing an xsl: for each text node of an xml file in the xpath /humanities/field,
# this will return a node <name> with the text inside
xslt_root = etree.fromstring(
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:param name="n" />
<xsl:template match="/humanities">
<fields>
<xsl:attribute name="n">
<xsl:value-of select="$n"/>
</xsl:attribute>
<xsl:apply-templates select="field"/>
</fields>
</xsl:template>
<xsl:template match="/humanities/field">
<name><xsl:value-of select="./text()" /></name>
</xsl:template>
</xsl:stylesheet>)
# We transform our document to an xsl
xslt = etree.XSLT(xslt_root)
# We create some xml we need to change
xml = <humanities>
<category>Humanities</category>
<field>History</field>
<field>Classics</field>
<field>French</field>
<field>German</field>
</humanities>
parsed_xml = etree.fromstring(xml)
# And now we process our xml :
transformed = xslt(parsed_xml, n="'Humanities'") # Note that for a string, we encapsulate it within single quotes
print(transformed)
# Be aware that you can use xpath as a value for the argument, though it can be rather complex sometimes
transformed = xslt(parsed_xml, n=etree.XPath("//category/text()"))
print(transformed)
from xml.etree import ElementTree
with open('data/books.xml', 'rt') as f:
tree = ElementTree.parse(f)
print(tree)
from xml.etree import ElementTree
with open('data/books.xml', 'r') as f:
tree = ElementTree.parse(f)
# print(dir(tree))
for node in tree.iter():
print (node.tag, node.attrib)
print("-----")
# from xml.etree import ElementTree
# with open('data/books.xml', 'r') as f:
# tree = ElementTree.parse(f)
# # print(dir(tree))
# for node in tree.iter():
# print (node.tag, node.attrib)
# print("-----")
### To print only the groups of names and feed URLs for the podcasts,
# leaving out of all of the data in the header section by iterating
# over only the outline nodes and print the text and xmlUrl attributes.
from xml.etree import ElementTree
with open('data/podcasts.opml', 'rt') as f:
tree = ElementTree.parse(f)
print(len( list(tree.iter('outline'))))
for node in tree.iter('outline'):
name = node.attrib.get('text')
url = node.attrib.get('xmlUrl')
if name and url:
print ('\t%s :: %s' % (name, url))
else:
print (name)
for node in tree.findall('.//outline'):
url = node.attrib.get('xmlUrl')
if url:
print( url)
else:
print(node.attrib.get("text"))
print(dir(tree))
print(tree.getroot)
for node in tree.findall('.//outline/outline'):
url = node.attrib.get('xmlUrl')
print (url)
from xml.etree import ElementTree
with open('data/data.xml', 'rt') as f:
tree = ElementTree.parse(f)
node = tree.find('./with_attributes')
print (node.tag)
for name, value in sorted(node.attrib.items()):
print (' %-4s = "%s"' % (name, value))
for path in [ './child', './child_with_tail' ]:
node = tree.find(path)
print(node.tag)
print (' child node text:', node.text)
print (' and tail text :', node.tail)
from xml.etree.ElementTree import XML
parsed = XML('''
<root>
<group>
<child id="a">This is child "a".</child>
<child id="b">This is child "b".</child>
</group>
<group>
<child id="c">This is child "c".</child>
</group>
</root>
''')
print ('parsed =', parsed)
for elem in parsed:
print (elem.tag)
if elem.text is not None and elem.text.strip():
print (' text: "%s"' % elem.text)
if elem.tail is not None and elem.tail.strip():
print (' tail: "%s"' % elem.tail)
for name, value in sorted(elem.attrib.items()):
print(' %-4s = "%s"' % (name, value))
print
from xml.etree.ElementTree import Element, tostring
top = Element('top')
children = [
Element('child', num=str(i))
for i in range(3)
]
top.extend(children)
print(top)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The qutip.OptPulseProcessor is a noisy quantum device simulator integrated with the optimal pulse algorithm from the qutip.control module. It is a subclass of qutip.Processor and is equipped with a method to find the optimal pulse sequence (hence the name OptPulseProcessor) for a qutip.QubitCircuit or a list of qutip.Qobj. For the user guide of qutip.Processor, please refer to the introductory notebook.
Step2: The method load_circuit calls qutip.control.optimize_pulse_unitary and returns the pulse coefficients.
Step3: Like the Processor, the simulation is calculated with a QuTiP solver. The method run_state calls mesolve and returns the result. One can also add noise to observe the change in the fidelity, e.g. the t1 decoherence time.
Step4: Multi-qubit gate
Step5: We have single-qubit control $\sigma_x$ and $\sigma_z$, with the argument cyclic_permutation=True, it creates 3 operators each targeted on one qubit.
Step6: The interaction is generated by $\sigma_x\sigma_x$ between the qubit 0 & 1 and qubit 1 & 2. expand_operator can be used to expand the operator to a larger dimension with given target qubits.
Step8: Use the above defined control Hamiltonians, we now find the optimal pulse for the Toffoli gate with 6 time slots. Instead of a QubitCircuit, a list of operators can also be given as an input.
Step9: Merging a quantum circuit
Step10: In the above figure, the pulses from $t=0$ to $t=3$ are for the CNOT gate while the rest for are the two single qubits gates. The difference in the frequency of change is merely a result of our choice of evo_time. Here we can see that the three gates are carried out in sequence.
Step11: In this figure there are no different stages, the three gates are first merged and then the algorithm finds the optimal pulse for the resulting unitary evolution.
|
<ASSISTANT_TASK:>
Python Code:
from numpy import pi
from qutip import sigmaz, sigmax, identity, basis, fidelity, tensor
from qutip_qip.device import OptPulseProcessor
from qutip_qip.circuit import QubitCircuit
from qutip_qip.operations import expand_operator, toffoli
num_qubits = 1
# Drift Hamiltonian
H_d = sigmaz()
# The (single) control Hamiltonian
H_c = sigmax()
processor = OptPulseProcessor(num_qubits, drift=H_d)
processor.add_control(H_c, 0)
qc = QubitCircuit(num_qubits)
qc.add_gate("SNOT", 0)
# This method calls optimize_pulse_unitary
tlist, coeffs = processor.load_circuit(qc, min_grad=1e-20, init_pulse_type='RND',
num_tslots=6, evo_time=1, verbose=True)
processor.plot_pulses(title="Control pulse for the Hadamard gate", use_control_latex=False);
rho0 = basis(2,1)
plus = (basis(2,0) + basis(2,1)).unit()
minus = (basis(2,0) - basis(2,1)).unit()
result = processor.run_state(init_state=rho0)
print("Fidelity:", fidelity(result.states[-1], minus))
# add noise
processor.t1 = 40.0
result = processor.run_state(init_state=rho0)
print("Fidelity with qubit relaxation:", fidelity(result.states[-1], minus))
toffoli()
N = 3
H_d = tensor([identity(2)] * 3)
test_processor = OptPulseProcessor(N, H_d)
test_processor.add_control(sigmaz(), cyclic_permutation=True)
test_processor.add_control(sigmax(), cyclic_permutation=True)
sxsx = tensor([sigmax(),sigmax()])
sxsx01 = expand_operator(sxsx, 3, targets=[0,1])
sxsx12 = expand_operator(sxsx, 3, targets=[1,2])
test_processor.add_control(sxsx01)
test_processor.add_control(sxsx12)
def get_control_latex():
Get the labels for each Hamiltonian.
It is used in the method``plot_pulses``.
It is a 2-d nested list, in the plot,
a different color will be used for each sublist.
return ([[r"$\sigma_z^%d$" % n for n in range(test_processor.num_qubits)],
[r"$\sigma_x^%d$" % n for n in range(test_processor.num_qubits)],
[r"$g_01$", r"$g_12$" ]])
test_processor.model.get_control_latex = get_control_latex
test_processor.dims = [2,2,2]
test_processor.load_circuit([toffoli()], num_tslots=6, evo_time=1, verbose=True);
test_processor.plot_pulses(title="Contorl pulse for toffoli gate");
qc = QubitCircuit(3)
qc.add_gate("CNOT", controls=0, targets=2)
qc.add_gate("RX", targets=2, arg_value=pi/4)
qc.add_gate("RY", targets=1, arg_value=pi/8)
setting_args = {"CNOT": {"num_tslots": 20, "evo_time": 3},
"RX": {"num_tslots": 2, "evo_time": 1},
"RY": {"num_tslots": 2, "evo_time": 1}}
test_processor.load_circuit(qc, merge_gates=False, setting_args=setting_args, verbose=True);
fig, axes = test_processor.plot_pulses(title="Control pulse for a each gate in the circuit", show_axis=True);
axes[-1].set_xlabel("time");
qc = QubitCircuit(3)
qc.add_gate("CNOT", controls=0, targets=2)
qc.add_gate("RX", targets=2, arg_value=pi/4)
qc.add_gate("RY", targets=1, arg_value=pi/8)
test_processor.load_circuit(qc, merge_gates=True, verbose=True, num_tslots=20, evo_time=5);
test_processor.plot_pulses(title="Control pulse for a merged unitary evolution");
import qutip_qip
print("qutip-qip version:", qutip_qip.version.version)
from qutip.ipynbtools import version_table
version_table()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
sns.distplot(dataset[columns], kde = False, bins = 30, color = 'red', hist_kws=dict(edgecolor="k", linewidth=1) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Your guess
Step2: Your guess
Step3: Your guess
Step4: Your guess
Step5: Your guess
Step6: Your guess
Step7: Your guess
Step8: Your guess
Step9: Your guess
Step10: Your guess
Step11: Your guess
Step12: Your guess
Step13: Your guess
Step14: Your guess
Step15: Your guess
Step16: (A) (1pt) Replace the 12th element in geneNames with the string "Camk2a".
Step17: (B) (1pt) Prompt the user to input a sentence using raw_input(). Print each word of their sentence on a separate line.
Step18: Try running this code yourself and observe the output. Everything prints out on its own line, which isn't what we want -- we'd usually prefer a matrix-type format. Try this slightly modified code
Step19: This gives us the matrix format we want. Make sure you understand how this code works. FYI, the "\t" is a tab character, and much like "\n", it is invisible once it's printed (it becomes a tab). The comma at the very end of the print statement suppresses the \n that print usually adds on to the end.
Step20: So this outputs the distance between the two strings, where the distance is defined as the fraction of the sequence length that is mismatched.
Step21: (C) Below is a loop that creates a list. Do the same thing but with a list comprehension instead.
|
<ASSISTANT_TASK:>
Python Code:
ages = [65, 34, 96, 47]
print len(ages)
names = ["Wilfred", "Manfred", "Wadsworth", "Jeeves"]
ages = [65, 34, 96, 47]
print len(ages) == len(names)
ages = [65, 34, 96, 47]
for hippopotamus in ages:
print hippopotamus
ages = [65, 34, 96, 47]
print ages[1:3]
names = ["Wilfred", "Manfred", "Wadsworth", "Jeeves"]
if "Willard" not in names:
names.append("Willard")
print names
names = ["Wilfred", "Manfred", "Wadsworth", "Jeeves"]
ages = [65, 34, 96, 47]
for i in range(len(names)):
print names[i],"is",ages[i]
ages = [65, 34, 96, 47]
ages.sort()
print ages
ages = [65, 34, 96, 47]
ages = ages.sort()
print ages
ages = [65, 34, 96, 47]
print max(ages)
cat = "Mitsworth"
for i in range(len(cat)):
print cat[i]
cat = "Mitsworth"
print cat[:4]
str1 = "Good morning, Mr. Mitsworth."
parts = str1.split()
print parts
print str1
str1 = "Good morning, Mr. Mitsworth."
parts = str1.split(",")
print parts
names = ["Wilfred", "Manfred", "Wadsworth", "Jeeves"]
print names[-1]
oldList = [2, 2, 6, 1, 2, 6]
newList = []
for item in oldList:
if item not in newList:
newList.append(item)
print newList
# run this first!
geneNames = ["Ppia", "Gria2", "Mecp2", "Omd", "Zfp410", "Hsp1", "Mtap1a", "Cfl1",
"Slc25a40", "Dync1i1", "V1ra4", "Fmnl1", "Mtap2", "Atp5b", "Olfr259",
"Atf3", "Vapb", "Dhx8", "Slc22a15", "Orai3", "Ifitm7", "Kcna2", "Timm23", "Shank1"]
magicWords = "abracadabra"
things = [1, 2, 5, 10, 25, 50]
for i in range(len(things)):
for j in range(len(things)):
print abs(things[i] - things[j]) #absolute value of the difference
things = [1, 2, 5, 10, 25, 50]
for i in range(len(things)):
for j in range(len(things)):
print abs(things[i] - things[j]), "\t",
print ""
str1 = "Wilfred"
str2 = "Manfred"
diffs = 0
for k in range(len(str1)):
if str1[k] != str2[k]: #compare the two strings at the same index
diffs = diffs + 1
print "dist =", round(float(diffs) / len(str1), 2)
# Loop version:
import random
randomNums = []
for i in range(100):
randomNums.append(random.randint(0,10))
print randomNums
# Your list comprehension version here:
# Loop version:
import random
randomNums = []
for i in range(100):
randNum = random.randint(0,10)
if (randNum % 2) == 0:
randNumStr = str(randNum)
randomNums.append(randNumStr)
print randomNums
# Your list comprehension version here:
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
data = np.array([[4, 2, 5, 6, 7],
[ 5, 4, 3, 5, 7]])
bin_size = 3
bin_data_mean = data[:,:(data.shape[1] // bin_size) * bin_size].reshape(data.shape[0], -1, bin_size).mean(axis=-1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scrape Pageview Mobile App traffic (current api)
Step2: Scrape Pageview Desktop Site traffic (current api)
Step3: Scrape Pageview All Site traffic (current api)
Step4: Scrape Pagecount desktop Site traffic (legacy api)
Step5: Scrape Pagecount all Site traffic (legacy api)
Step6: Scrape Pagecount mobile Site traffic (legacy api)
Step7: Make a dictionary and add relevant info. timestamp = key, counts = value. Repeat for all
|
<ASSISTANT_TASK:>
Python Code:
#current
import requests
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
headers={'User-Agent' : 'https://github.com/your_github_username', 'From' : 'abhiv@uw.edu'}
params = {'project' : 'en.wikipedia.org',
'access' : 'mobile-web',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2015070100',
'end' : '2017091000'#use the first day of the following month to ensure a full month of data is collected
}
api_call = requests.get(endpoint.format(**params))
pageview_mobile_site = api_call.json()
print(pageview_mobile_site)
#current
import requests
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
headers={'User-Agent' : 'https://github.com/your_github_username', 'From' : 'abhiv@uw.edu'}
params = {'project' : 'en.wikipedia.org',
'access' : 'mobile-app',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2015070100',
'end' : '2017091000'#use the first day of the following month to ensure a full month of data is collected
}
api_call = requests.get(endpoint.format(**params))
pageview_mobile_app = api_call.json()
print(pageview_mobile_app)
#current
import requests
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
headers={'User-Agent' : 'https://github.com/your_github_username', 'From' : 'abhiv@uw.edu'}
params = {'project' : 'en.wikipedia.org',
'access' : 'desktop',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2015070100',
'end' : '2017091000'#use the first day of the following month to ensure a full month of data is collected
}
api_call = requests.get(endpoint.format(**params))
pageview_desktop_site = api_call.json()
print(pageview_desktop_site)
#current
import requests
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
headers={'User-Agent' : 'https://github.com/your_github_username', 'From' : 'abhiv@uw.edu'}
params = {'project' : 'en.wikipedia.org',
'access' : 'all-access',
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2015070100',
'end' : '2017091000'#use the first day of the following month to ensure a full month of data is collected
}
api_call = requests.get(endpoint.format(**params))
pageview_all_site = api_call.json()
print(pageview_all_site)
#Legacy
import requests
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access}/{granularity}/{start}/{end}'
headers={'User-Agent' : 'https://github.com/your_github_username', 'From' : 'abhiv@uw.edu'}
params = {'project' : 'en.wikipedia.org',
'access' : 'desktop-site',
'granularity' : 'monthly',
'start' : '2008010100',
'end' : '2016071000'#use the first day of the following month to ensure a full month of data is collected
}
api_call = requests.get(endpoint.format(**params))
pagecounts_desktop_site = api_call.json()
print(pagecounts_desktop_site)
#Legacy
import requests
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access}/{granularity}/{start}/{end}'
headers={'User-Agent' : 'https://github.com/your_github_username', 'From' : 'abhiv@uw.edu'}
params = {'project' : 'en.wikipedia.org',
'access' : 'all-sites',
'granularity' : 'monthly',
'start' : '2008010100',
'end' : '2016071000'#use the first day of the following month to ensure a full month of data is collected
}
api_call = requests.get(endpoint.format(**params))
pagecounts_all_sites = api_call.json()
print(pagecounts_all_sites)
#Legacy
import requests
endpoint = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access}/{granularity}/{start}/{end}'
headers={'User-Agent' : 'https://github.com/your_github_username', 'From' : 'abhiv@uw.edu'}
params = {'project' : 'en.wikipedia.org',
'access' : 'mobile-site',
'granularity' : 'monthly',
'start' : '2008010100',
'end' : '2016071000'#use the first day of the following month to ensure a full month of data is collected
}
api_call = requests.get(endpoint.format(**params))
pagecounts_mobile_site = api_call.json()
print(pagecounts_mobile_site)
countresponse['items']
countresponse['items'][0]['count']
viewresponse['items'][3]['views']
pageview_mobile_site['items'][3]['timestamp']
#pageview_mobile_site
i = 0
dict_pageview_mobile_site={}
for x in pageview_mobile_site['items']:
dict_pageview_mobile_site[pageview_mobile_site['items'][i]['timestamp']] = pageview_mobile_site['items'][i]['views']
i+=1
dict_pageview_mobile_site
#pageview_desktop_site
i = 0
dict_pageview_desktop_site={}
for x in pageview_desktop_site['items']:
dict_pageview_desktop_site[pageview_desktop_site['items'][i]['timestamp']] = pageview_desktop_site['items'][i]['views']
i+=1
dict_pageview_desktop_site
#pageview_all_site
i = 0
dict_pageview_all_site={}
for x in pageview_all_site['items']:
dict_pageview_all_site[pageview_all_site['items'][i]['timestamp']] = pageview_all_site['items'][i]['views']
i+=1
dict_pageview_all_site
#pagecount_all_site
i = 0
dict_pagecounts_all_sites={}
for x in pagecounts_all_sites['items']:
dict_pagecounts_all_sites[pagecounts_all_sites['items'][i]['timestamp']] = pagecounts_all_sites['items'][i]['count']
i+=1
dict_pagecounts_all_sites
i = 0
dict_pagecounts_desktop_site={}
for x in pagecounts_desktop_site['items']:
dict_pagecounts_desktop_site[pagecounts_desktop_site['items'][i]['timestamp']] = pagecounts_desktop_site['items'][i]['count']
i+=1
i = 0
dict_pagecounts_mobile_site={}
for x in pagecounts_mobile_site['items']:
dict_pagecounts_mobile_site[pagecounts_mobile_site['items'][i]['timestamp']] = pagecounts_mobile_site['items'][i]['count']
i+=1
i = 0
dict_pageview_mobile_app={}
for x in pageview_mobile_app['items']:
dict_pageview_mobile_app[pageview_mobile_app['items'][i]['timestamp']] = pageview_mobile_app['items'][i]['views']
i+=1
dict_pageview_mobile = {}
i=0
for x in pageview_mobile_app['items']:
dict_pageview_mobile[pageview_mobile_app['items'][i]['timestamp']] = pageview_mobile_app['items'][i]['views']+pageview_mobile_site['items'][i]['views']
i+=1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Motivating Support Vector Machines
Step2: A discriminative classifier attempts to draw a line between the two sets of data. Immediately we see a problem
Step3: These are three very different separaters which perfectly discriminate between these samples. Depending on which you choose, a new data point will be classified almost entirely differently!
Step4: Notice here that if we want to maximize this width, the middle fit is clearly the best.
Step6: To better visualize what's happening here, let's create a quick convenience function that will plot SVM decision boundaries for us
Step7: Notice that the dashed lines touch a couple of the points
Step8: Let's use IPython's interact functionality to explore how the distribution of points affects the support vectors and the discriminative fit.
Step9: Notice the unique thing about SVM is that only the support vectors matter
Step10: Clearly, no linear discrimination will ever separate these data.
Step11: If we plot this along with our data, we can see the effect of it
Step12: We can see that with this additional dimension, the data becomes trivially linearly separable!
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
plt.style.use('seaborn')
from sklearn.datasets.samples_generator import make_blobs
X, y = make_blobs(n_samples=50, centers=2,
random_state=0, cluster_std=0.60)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring');
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b in [(1, 0.65), (0.5, 1.6), (-0.2, 2.9)]:
plt.plot(xfit, m * xfit + b, '-k')
plt.xlim(-1, 3.5);
xfit = np.linspace(-1, 3.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
for m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:
yfit = m * xfit + b
plt.plot(xfit, yfit, '-k')
plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', color='#AAAAAA', alpha=0.4)
plt.xlim(-1, 3.5);
from sklearn.svm import SVC # "Support Vector Classifier"
clf = SVC(kernel='linear')
clf.fit(X, y)
def plot_svc_decision_function(clf, ax=None):
Plot the decision function for a 2D SVC
if ax is None:
ax = plt.gca()
x = np.linspace(plt.xlim()[0], plt.xlim()[1], 30)
y = np.linspace(plt.ylim()[0], plt.ylim()[1], 30)
Y, X = np.meshgrid(y, x)
P = np.zeros_like(X)
for i, xi in enumerate(x):
for j, yj in enumerate(y):
P[i, j] = clf.decision_function([[xi, yj]])
# plot the margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none');
from ipywidgets import interact
def plot_svm(N=10):
X, y = make_blobs(n_samples=200, centers=2,
random_state=0, cluster_std=0.60)
X = X[:N]
y = y[:N]
clf = SVC(kernel='linear')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plt.xlim(-1, 4)
plt.ylim(-1, 6)
plot_svc_decision_function(clf, plt.gca())
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none')
interact(plot_svm, N=[10, 200], kernel='linear');
from sklearn.datasets.samples_generator import make_circles
X, y = make_circles(100, factor=.1, noise=.1)
clf = SVC(kernel='linear').fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf);
r = np.exp(-(X[:, 0] ** 2 + X[:, 1] ** 2))
from mpl_toolkits import mplot3d
def plot_3D(elev=30, azim=30):
ax = plt.subplot(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='spring')
ax.view_init(elev=elev, azim=azim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
interact(plot_3D, elev=(-90, 90), azip=(-180, 180));
clf = SVC(kernel='rbf')
clf.fit(X, y)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
plot_svc_decision_function(clf)
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=200, facecolors='none');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We generate some expert trajectories, that the discriminator needs to distinguish from the learner's trajectories.
Step2: Now we are ready to set up our AIRL trainer.
Step3: When we look at the histograms of rewards before and after learning, we can see that the learner is not perfect yet, but it made some progress at least.
|
<ASSISTANT_TASK:>
Python Code:
from stable_baselines3 import PPO
from stable_baselines3.ppo import MlpPolicy
import gym
import seals
env = gym.make("seals/CartPole-v0")
expert = PPO(
policy=MlpPolicy,
env=env,
seed=0,
batch_size=64,
ent_coef=0.0,
learning_rate=0.0003,
n_epochs=10,
n_steps=64,
)
expert.learn(1000) # Note: set to 100000 to train a proficient expert
from imitation.data import rollout
from imitation.data.wrappers import RolloutInfoWrapper
from stable_baselines3.common.vec_env import DummyVecEnv
rollouts = rollout.rollout(
expert,
DummyVecEnv([lambda: RolloutInfoWrapper(gym.make("seals/CartPole-v0"))] * 5),
rollout.make_sample_until(min_timesteps=None, min_episodes=60),
)
from imitation.algorithms.adversarial.airl import AIRL
from imitation.rewards.reward_nets import BasicShapedRewardNet
from imitation.util.networks import RunningNorm
from stable_baselines3 import PPO
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
import gym
import seals
venv = DummyVecEnv([lambda: gym.make("seals/CartPole-v0")] * 8)
learner = PPO(
env=venv,
policy=MlpPolicy,
batch_size=64,
ent_coef=0.0,
learning_rate=0.0003,
n_epochs=10,
)
reward_net = BasicShapedRewardNet(
venv.observation_space, venv.action_space, normalize_input_layer=RunningNorm
)
airl_trainer = AIRL(
demonstrations=rollouts,
demo_batch_size=1024,
gen_replay_buffer_capacity=2048,
n_disc_updates_per_round=4,
venv=venv,
gen_algo=learner,
reward_net=reward_net,
)
learner_rewards_before_training, _ = evaluate_policy(
learner, venv, 100, return_episode_rewards=True
)
airl_trainer.train(20000) # Note: set to 300000 for better results
learner_rewards_after_training, _ = evaluate_policy(
learner, venv, 100, return_episode_rewards=True
)
import matplotlib.pyplot as plt
import numpy as np
print(np.mean(learner_rewards_after_training))
print(np.mean(learner_rewards_before_training))
plt.hist(
[learner_rewards_before_training, learner_rewards_after_training],
label=["untrained", "trained"],
)
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic python string manipulation
Step2: CHALLENGE
Step3: Introducing TextBlob
Step4: Summarizing/keywording text
Step5: Challenge
Step6: We could continue to add on stopwords as we try to make these keywords better. But it's kind of like playing whack-a-mole
Step7: For each word $ w $ in a given document $ D $, we can multiply the term frequency
Step8: Note that TF-IDF can be tweaked in lots of other ways if you aren't getting good results.
Step9: Seeing words in context
Step10: Sentiment Analysis
|
<ASSISTANT_TASK:>
Python Code:
csvfile = open(
reader =
data = []
for line in reader:
line[3] = line[3].decode('utf-8')
# getting the number of rows
#taking a look at the first row
comment_text =
comment_text
# strings are like lists of characters
# use a colon for start:end indexes
# they can be stuck together easily
# and split apart
comment_text
split_on_questions =
# it's easy to strip whitespace off of them
for string in split_on_questions:
# and cast them to one case
cleaned =
cleaned
# join them back together
# and look for substring inside them
for row in data:
comment_text = row[-1]
blob = TextBlob(data[80][-1])
blob
# we can get lists of sentences
# lists of words
# lists of "tokens" (punctuation included)
# even parts of speech and noun phrases
word_count = collections.Counter(
word_count
stopwords = nltk.corpus.stopwords.words('english')
nltk.download()
for key in word_count.keys():
N_documents = float(len(data))
word_document_counts =
word_idf = {}
for row in data[1:]:
blob = TextBlob(row[-1].lower())
# calculate IDFs
comment = data[80][-1]
blob = TextBlob(comment.lower())
num_words_in_comment = len(blob.words)
word_count = blob.word_counts
tf_scores = {}
for word, count in word_count.iteritems():
if word not in stopwords and len(word) > 2:
tf_scores[word] =
tf_idf = {}
for word, tf in tf_scores.iteritems():
tf_idf[word] =
sorted(tf_idf.iteritems(), key=lambda k: k[1], reverse=True)[:5]
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
print stemmer.stem('political')
print stemmer.stem('politics')
print stemmer.stem('politician')
from nltk.text import Text
tokens = TextBlob(data[80][-1]).tokens
text_object = Text(tokens)
text_object.concordance('Hilary')
blob = TextBlob(data[41][-1])
blob
blob.sentiment
blob.sentences[1].sentiment
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step12: Required functions for text preprocessing
Step13: Data
Step17: Encoder-decoder
Step19: Encoder
Step21: Decoder
Step23: Loss function
Step25: We now use this to create a weight mask of 0s and 1s, where 0 corresponds to invalid locations. When we compute the cross entropy loss, we multiply by this weight mask, thus ignoring invalid locations.
Step26: As an example, let us create a prediction tensor of all ones of size (3,4,10) and a target label tensor of all ones of size (3,4). We specify the valud lengths to (4,2,0). Thus the first loss should be twice the second. And the third loss should be 0.
Step39: Training
Step41: Prediction
Step43: Evaluation
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from IPython import display
try:
import torch
except ModuleNotFoundError:
%pip install -qq torch
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import data
import collections
import re
import random
import os
import requests
import zipfile
import hashlib
import time
np.random.seed(seed=1)
torch.manual_seed(1)
!mkdir figures # for saving plots
# Required functions for downloading data
def download(name, cache_dir=os.path.join("..", "data")):
Download a file inserted into DATA_HUB, return the local filename.
assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}."
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split("/")[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # Hit cache
print(f"Downloading {fname} from {url}...")
r = requests.get(url, stream=True, verify=True)
with open(fname, "wb") as f:
f.write(r.content)
return fname
def download_extract(name, folder=None):
Download and extract a zip/tar file.
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == ".zip":
fp = zipfile.ZipFile(fname, "r")
elif ext in (".tar", ".gz"):
fp = tarfile.open(fname, "r")
else:
assert False, "Only zip/tar files can be extracted."
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
def read_data_nmt():
Load the English-French dataset.
data_dir = download_extract("fra-eng")
with open(os.path.join(data_dir, "fra.txt"), "r") as f:
return f.read()
def preprocess_nmt(text):
Preprocess the English-French dataset.
def no_space(char, prev_char):
return char in set(",.!?") and prev_char != " "
# Replace non-breaking space with space, and convert uppercase letters to
# lowercase ones
text = text.replace("\u202f", " ").replace("\xa0", " ").lower()
# Insert space between words and punctuation marks
out = [" " + char if i > 0 and no_space(char, text[i - 1]) else char for i, char in enumerate(text)]
return "".join(out)
def tokenize_nmt(text, num_examples=None):
Tokenize the English-French dataset.
source, target = [], []
for i, line in enumerate(text.split("\n")):
if num_examples and i > num_examples:
break
parts = line.split("\t")
if len(parts) == 2:
source.append(parts[0].split(" "))
target.append(parts[1].split(" "))
return source, target
class Vocab:
Vocabulary for text.
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[1], reverse=True)
# The index for the unknown token is 0
self.unk, uniq_tokens = 0, ["<unk>"] + reserved_tokens
uniq_tokens += [token for token, freq in self.token_freqs if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
def count_corpus(tokens):
Count token frequencies.
# Here `tokens` is a 1D list or 2D list
if len(tokens) == 0 or isinstance(tokens[0], list):
# Flatten a list of token lists into a list of tokens
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
reduce_sum = lambda x, *args, **kwargs: x.sum(*args, **kwargs)
astype = lambda x, *args, **kwargs: x.type(*args, **kwargs)
def build_array_nmt(lines, vocab, num_steps):
Transform text sequences of machine translation into minibatches.
lines = [vocab[l] for l in lines]
lines = [l + [vocab["<eos>"]] for l in lines]
array = torch.tensor([truncate_pad(l, num_steps, vocab["<pad>"]) for l in lines])
valid_len = reduce_sum(astype(array != vocab["<pad>"], torch.int32), 1)
return array, valid_len
def load_array(data_arrays, batch_size, is_train=True):
Construct a PyTorch data iterator.
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
def truncate_pad(line, num_steps, padding_token):
Truncate or pad sequences.
if len(line) > num_steps:
return line[:num_steps] # Truncate
return line + [padding_token] * (num_steps - len(line))
def load_data_nmt(batch_size, num_steps, num_examples=600):
Return the iterator and the vocabularies of the translation dataset.
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = Vocab(source, min_freq=2, reserved_tokens=["<pad>", "<bos>", "<eos>"])
tgt_vocab = Vocab(target, min_freq=2, reserved_tokens=["<pad>", "<bos>", "<eos>"])
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = load_array(data_arrays, batch_size)
return data_iter, src_vocab, tgt_vocab
DATA_HUB = dict()
DATA_URL = "http://d2l-data.s3-accelerate.amazonaws.com/"
DATA_HUB["fra-eng"] = (DATA_URL + "fra-eng.zip", "94646ad1522d915e7b0f9296181140edcf86a4f5")
batch_size, num_steps = 64, 10
train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size, num_steps)
class Encoder(nn.Module):
The base encoder interface for the encoder-decoder architecture.
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X, *args):
raise NotImplementedError
class Decoder(nn.Module):
The base decoder interface for the encoder-decoder architecture.
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
class EncoderDecoder(nn.Module):
The base class for the encoder-decoder architecture.
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
enc_outputs = self.encoder(enc_X, *args)
dec_state = self.decoder.init_state(enc_outputs, *args)
return self.decoder(dec_X, dec_state)
class Seq2SeqEncoder(Encoder):
The RNN encoder for sequence to sequence learning.
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs):
super(Seq2SeqEncoder, self).__init__(**kwargs)
# Embedding layer
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.GRU(embed_size, num_hiddens, num_layers, dropout=dropout)
def forward(self, X, *args):
# The output `X` shape: (`batch_size`, `num_steps`, `embed_size`)
X = self.embedding(X)
# In RNN models, the first axis corresponds to time steps
X = X.permute(1, 0, 2)
# When state is not mentioned, it defaults to zeros
output, state = self.rnn(X)
# `output` shape: (`num_steps`, `batch_size`, `num_hiddens`)
# `state` shape: (`num_layers`, `batch_size`, `num_hiddens`)
return output, state
encoder = Seq2SeqEncoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
encoder.eval()
batch_size = 4
num_steps = 7
X = torch.zeros((batch_size, num_steps), dtype=torch.long)
output, state = encoder(X)
print(output.shape)
print(state.shape)
class Seq2SeqDecoder(Decoder):
The RNN decoder for sequence to sequence learning.
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs):
super(Seq2SeqDecoder, self).__init__(**kwargs)
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers, dropout=dropout)
self.dense = nn.Linear(num_hiddens, vocab_size)
def init_state(self, enc_outputs, *args):
return enc_outputs[1]
def forward(self, X, state):
# The output `X` shape: (`num_steps`, `batch_size`, `embed_size`)
X = self.embedding(X).permute(1, 0, 2)
# Broadcast `context` so it has the same `num_steps` as `X`
context = state[-1].repeat(X.shape[0], 1, 1)
X_and_context = torch.cat((X, context), 2)
output, state = self.rnn(X_and_context, state)
output = self.dense(output).permute(1, 0, 2)
# `output` shape: (`batch_size`, `num_steps`, `vocab_size`)
# `state` shape: (`num_layers`, `batch_size`, `num_hiddens`)
return output, state
decoder = Seq2SeqDecoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2)
decoder.eval()
state = decoder.init_state(encoder(X))
output, state = decoder(X, state)
print(output.shape) # (batch size, number of time steps, vocabulary size)
print(state.shape) # (num layers, batch size, num hiddens)
def sequence_mask(X, valid_len, value=0):
Mask irrelevant entries in sequences.
maxlen = X.size(1)
mask = torch.arange((maxlen), dtype=torch.float32, device=X.device)[None, :] < valid_len[:, None]
X[~mask] = value
return X
X = torch.tensor([[1, 2, 3], [4, 5, 6]])
sequence_mask(X, torch.tensor([1, 2]))
class MaskedSoftmaxCELoss(nn.CrossEntropyLoss):
The softmax cross-entropy loss with masks.
# `pred` shape: (`batch_size`, `num_steps`, `vocab_size`)
# `label` shape: (`batch_size`, `num_steps`)
# `valid_len` shape: (`batch_size`,)
def forward(self, pred, label, valid_len):
weights = torch.ones_like(label)
weights = sequence_mask(weights, valid_len)
self.reduction = "none"
unweighted_loss = super(MaskedSoftmaxCELoss, self).forward(pred.permute(0, 2, 1), label)
weighted_loss = (unweighted_loss * weights).mean(dim=1)
return weighted_loss
loss = MaskedSoftmaxCELoss()
loss(torch.ones(3, 4, 10), torch.ones((3, 4), dtype=torch.long), torch.tensor([4, 2, 0]))
class Animator:
For plotting data in animation.
def __init__(
self,
xlabel=None,
ylabel=None,
legend=None,
xlim=None,
ylim=None,
xscale="linear",
yscale="linear",
fmts=("-", "m--", "g-.", "r:"),
nrows=1,
ncols=1,
figsize=(3.5, 2.5),
):
# Incrementally plot multiple lines
if legend is None:
legend = []
display.set_matplotlib_formats("svg")
self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [
self.axes,
]
# Use a lambda function to capture arguments
self.config_axes = lambda: set_axes(self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# Add multiple data points into the figure
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
class Timer:
Record multiple running times.
def __init__(self):
self.times = []
self.start()
def start(self):
Start the timer.
self.tik = time.time()
def stop(self):
Stop the timer and record the time in a list.
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
Return the average time.
return sum(self.times) / len(self.times)
def sum(self):
Return the sum of time.
return sum(self.times)
def cumsum(self):
Return the accumulated time.
return np.array(self.times).cumsum().tolist()
class Accumulator:
For accumulating sums over `n` variables.
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
Set the axes for matplotlib.
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
def grad_clipping(net, theta):
Clip the gradient.
if isinstance(net, nn.Module):
params = [p for p in net.parameters() if p.requires_grad]
else:
params = net.params
norm = torch.sqrt(sum(torch.sum((p.grad**2)) for p in params))
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def try_gpu(i=0):
Return gpu(i) if exists, otherwise return cpu().
if torch.cuda.device_count() >= i + 1:
return torch.device(f"cuda:{i}")
return torch.device("cpu")
def train_seq2seq(net, data_iter, lr, num_epochs, tgt_vocab, device):
Train a model for sequence to sequence.
def xavier_init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
if type(m) == nn.GRU:
for param in m._flat_weights_names:
if "weight" in param:
nn.init.xavier_uniform_(m._parameters[param])
net.apply(xavier_init_weights)
net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
loss = MaskedSoftmaxCELoss()
net.train()
animator = Animator(xlabel="epoch", ylabel="loss", xlim=[10, num_epochs])
for epoch in range(num_epochs):
timer = Timer()
metric = Accumulator(2) # Sum of training loss, no. of tokens
for batch in data_iter:
X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]
bos = torch.tensor([tgt_vocab["<bos>"]] * Y.shape[0], device=device).reshape(-1, 1)
dec_input = torch.cat([bos, Y[:, :-1]], 1) # Teacher forcing
Y_hat, _ = net(X, dec_input, X_valid_len)
l = loss(Y_hat, Y, Y_valid_len)
l.sum().backward() # Make the loss scalar for `backward`
grad_clipping(net, 1)
num_tokens = Y_valid_len.sum()
optimizer.step()
with torch.no_grad():
metric.add(l.sum(), num_tokens)
if (epoch + 1) % 10 == 0:
animator.add(epoch + 1, (metric[0] / metric[1],))
print(f"loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} " f"tokens/sec on {str(device)}")
embed_size, num_hiddens, num_layers, dropout = 32, 32, 2, 0.1
lr, num_epochs, device = 0.005, 300, try_gpu()
encoder = Seq2SeqEncoder(len(src_vocab), embed_size, num_hiddens, num_layers, dropout)
decoder = Seq2SeqDecoder(len(tgt_vocab), embed_size, num_hiddens, num_layers, dropout)
net = EncoderDecoder(encoder, decoder)
train_seq2seq(net, train_iter, lr, num_epochs, tgt_vocab, device)
def predict_seq2seq(net, src_sentence, src_vocab, tgt_vocab, num_steps, device, save_attention_weights=False):
Predict for sequence to sequence.
# Set `net` to eval mode for inference
net.eval()
src_tokens = src_vocab[src_sentence.lower().split(" ")] + [src_vocab["<eos>"]]
enc_valid_len = torch.tensor([len(src_tokens)], device=device)
src_tokens = truncate_pad(src_tokens, num_steps, src_vocab["<pad>"])
# Add the batch axis
enc_X = torch.unsqueeze(torch.tensor(src_tokens, dtype=torch.long, device=device), dim=0)
enc_outputs = net.encoder(enc_X, enc_valid_len)
dec_state = net.decoder.init_state(enc_outputs, enc_valid_len)
# Add the batch axis
dec_X = torch.unsqueeze(torch.tensor([tgt_vocab["<bos>"]], dtype=torch.long, device=device), dim=0)
output_seq, attention_weight_seq = [], []
for _ in range(num_steps):
Y, dec_state = net.decoder(dec_X, dec_state)
# We use the token with the highest prediction likelihood as the input
# of the decoder at the next time step
dec_X = Y.argmax(dim=2)
pred = dec_X.squeeze(dim=0).type(torch.int32).item()
# Save attention weights (to be covered later)
if save_attention_weights:
attention_weight_seq.append(net.decoder.attention_weights)
# Once the end-of-sequence token is predicted, the generation of the
# output sequence is complete
if pred == tgt_vocab["<eos>"]:
break
output_seq.append(pred)
return " ".join(tgt_vocab.to_tokens(output_seq)), attention_weight_seq
def bleu(pred_seq, label_seq, k):
Compute the BLEU.
pred_tokens, label_tokens = pred_seq.split(" "), label_seq.split(" ")
len_pred, len_label = len(pred_tokens), len(label_tokens)
score = math.exp(min(0, 1 - len_label / len_pred))
for n in range(1, k + 1):
num_matches, label_subs = 0, collections.defaultdict(int)
for i in range(len_label - n + 1):
label_subs["".join(label_tokens[i : i + n])] += 1
for i in range(len_pred - n + 1):
if label_subs["".join(pred_tokens[i : i + n])] > 0:
num_matches += 1
label_subs["".join(pred_tokens[i : i + n])] -= 1
score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
return score
# prediction
engs = ["go .", "i lost .", "he's calm .", "i'm home ."]
fras = ["va !", "j'ai perdu .", "il est calme .", "je suis chez moi ."]
data = []
for eng, fra in zip(engs, fras):
translation, attention_weight_seq = predict_seq2seq(net, eng, src_vocab, tgt_vocab, num_steps, device)
score = bleu(translation, fra, k=2)
data.append((eng, fra, translation, score))
df = pd.DataFrame.from_records(data, columns=["English", "Truth", "Prediction", "Bleu"])
display.display(df)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make sure to restart your kernel to ensure this change has taken place.
Step2: To start, we'll create our list of users, movies and features. While the users and movies represent elements in our database, for a content-based filtering method the features of the movies are likely hand-engineered and rely on domain knowledge to provide the best embedding space. Here we use the categories of Action, Sci-Fi, Comedy, Cartoon, and Drama to describe our movies (and thus our users).
Step3: Initialize our users, movie ratings and features
Step4: Computing the user feature matrix
Step5: Next we normalize each user feature vector to sum to 1. Normalizing isn't strictly neccesary, but it makes it so that rating magnitudes will be comparable between users.
Step6: Ranking feature relevance for each user
Step7: Determining movie recommendations.
Step8: The computation above finds the similarity measure between each user and each movie in our database. To focus only on the ratings for new movies, we apply a mask to the all_users_ratings matrix.
Step9: Finally let's grab and print out the top 2 rated movies for each user
|
<ASSISTANT_TASK:>
Python Code:
!pip install tensorflow==2.1
import numpy as np
import tensorflow as tf
print(tf.__version__)
users = ['Ryan', 'Danielle', 'Vijay', 'Chris']
movies = ['Star Wars', 'The Dark Knight', 'Shrek', 'The Incredibles', 'Bleu', 'Memento']
features = ['Action', 'Sci-Fi', 'Comedy', 'Cartoon', 'Drama']
num_users = len(users)
num_movies = len(movies)
num_feats = len(features)
num_recommendations = 2
# each row represents a user's rating for the different movies
users_movies = tf.constant([
[4, 6, 8, 0, 0, 0],
[0, 0, 10, 0, 8, 3],
[0, 6, 0, 0, 3, 7],
[10, 9, 0, 5, 0, 2]],dtype=tf.float32)
# features of the movies one-hot encoded
# e.g. columns could represent ['Action', 'Sci-Fi', 'Comedy', 'Cartoon', 'Drama']
movies_feats = tf.constant([
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[1, 0, 1, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1]],dtype=tf.float32)
users_feats = #TODO
users_feats
users_feats = users_feats/tf.reduce_sum(users_feats,axis=1,keepdims=True)
users_feats
top_users_features = tf.nn.top_k(users_feats, num_feats)[1]
top_users_features
for i in range(num_users):
feature_names = [features[index] for index in top_users_features[i]]
print('{}: {}'.format(users[i],feature_names))
users_ratings = #TODO
users_ratings
users_ratings_new = tf.where(tf.equal(users_movies, tf.zeros_like(users_movies)),
users_ratings,
tf.zeros_like(tf.cast(users_movies, tf.float32)))
users_ratings_new
top_movies = tf.nn.top_k(users_ratings_new, num_recommendations)[1]
top_movies
for i in range(num_users):
movie_names = [movies[index] for index in top_movies[i]]
print('{}: {}'.format(users[i],movie_names))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read Data and Select Seed Point(s)
Step2: ConnectedThreshold
Step3: ConfidenceConnected
Step4: VectorConfidenceConnected
Step5: Clean up, Clean up...
Step6: And now we compare the original segmentation to the segmentation after clean up (using the GUI you can zoom in on the region of interest for a closer look).
|
<ASSISTANT_TASK:>
Python Code:
# To use interactive plots (mouse clicks, zooming, panning) we use the notebook back end. We want our graphs
# to be embedded in the notebook, inline mode, this combination is defined by the magic "%matplotlib notebook".
%matplotlib notebook
import SimpleITK as sitk
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
import gui
# Using an external viewer (ITK-SNAP or 3D Slicer) we identified a visually appealing window-level setting
T1_WINDOW_LEVEL = (1050, 500)
img_T1 = sitk.ReadImage(fdata("nac-hncma-atlas2013-Slicer4Version/Data/A1_grayT1.nrrd"))
# Rescale the intensities and map them to [0,255], these are the default values for the output
# We will use this image to display the results of segmentation
img_T1_255 = sitk.Cast(
sitk.IntensityWindowing(
img_T1,
windowMinimum=T1_WINDOW_LEVEL[1] - T1_WINDOW_LEVEL[0] / 2.0,
windowMaximum=T1_WINDOW_LEVEL[1] + T1_WINDOW_LEVEL[0] / 2.0,
),
sitk.sitkUInt8,
)
point_acquisition_interface = gui.PointDataAquisition(img_T1, window_level=(1050, 500))
# preselected seed point in the left ventricle
point_acquisition_interface.set_point_indexes([(132, 142, 96)])
initial_seed_point_indexes = point_acquisition_interface.get_point_indexes()
seg_explicit_thresholds = sitk.ConnectedThreshold(
img_T1, seedList=initial_seed_point_indexes, lower=100, upper=170
)
# Overlay the segmentation onto the T1 image
gui.MultiImageDisplay(
image_list=[sitk.LabelOverlay(img_T1_255, seg_explicit_thresholds)],
title_list=["connected threshold result"],
)
seg_implicit_thresholds = sitk.ConfidenceConnected(
img_T1,
seedList=initial_seed_point_indexes,
numberOfIterations=0,
multiplier=2,
initialNeighborhoodRadius=1,
replaceValue=1,
)
gui.MultiImageDisplay(
image_list=[sitk.LabelOverlay(img_T1_255, seg_implicit_thresholds)],
title_list=["confidence connected result"],
)
img_T2 = sitk.ReadImage(fdata("nac-hncma-atlas2013-Slicer4Version/Data/A1_grayT2.nrrd"))
img_multi = sitk.Compose(img_T1, img_T2)
seg_implicit_threshold_vector = sitk.VectorConfidenceConnected(
img_multi, initial_seed_point_indexes, numberOfIterations=2, multiplier=4
)
gui.MultiImageDisplay(
image_list=[sitk.LabelOverlay(img_T1_255, seg_implicit_threshold_vector)],
title_list=["vector confidence connected result"],
)
vectorRadius = (1, 1, 1)
kernel = sitk.sitkBall
seg_implicit_thresholds_clean = sitk.BinaryMorphologicalClosing(
seg_implicit_thresholds, vectorRadius, kernel
)
gui.MultiImageDisplay(
image_list=[
sitk.LabelOverlay(img_T1_255, seg_implicit_thresholds),
sitk.LabelOverlay(img_T1_255, seg_implicit_thresholds_clean),
],
shared_slider=True,
title_list=["before morphological closing", "after morphological closing"],
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
tf.random.set_seed(10)
def get_values():
A = tf.random.normal([100,100])
B = tf.random.normal([100,100])
return A,B
@tf.function
def compute():
A,B = get_values()
return tf.reduce_sum(tf.matmul(A,B))
result = compute()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading data
Step2: Variables needed for analysis
Step3: Factory of different models
Step4: Define classifiers (that will be compared)
Step5: Create a copy of the factory with all classifiers
Step6: Training
Step7: Everybody loves plots!
Step8: Plot importances of features
Step9: Now feature_importances is object that can be plotted
Step10: Plot learning curves to see possible overfitting of trained classifier
Step11: Plot correlation between features
Step12: Plot data information
Step13: Plot distribution for each feature
Step14: Plot predictions distributions
Step15: ROC curves (receiver operating characteristic)
Step16: Plot 'flatness' of classifier prediction
Step17: Quality on different metrics
Step18: The same using plotly
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
!cd toy_datasets; wget -O MiniBooNE_PID.txt -nc MiniBooNE_PID.txt https://archive.ics.uci.edu/ml/machine-learning-databases/00199/MiniBooNE_PID.txt
import numpy, pandas
from rep.utils import train_test_split
import numpy, pandas
from rep.utils import train_test_split
from sklearn.metrics import roc_auc_score
data = pandas.read_csv('toy_datasets/MiniBooNE_PID.txt', sep='\s*', skiprows=[0], header=None, engine='python')
labels = pandas.read_csv('toy_datasets/MiniBooNE_PID.txt', sep=' ', nrows=1, header=None)
labels = [1] * labels[1].values[0] + [0] * labels[2].values[0]
data.columns = ['feature_{}'.format(key) for key in data.columns]
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, train_size=0.5)
train_variables = ["feature_new01: feature_0/feature_1", "feature_2", "feature_26", "feature_12", "feature_24",
"feature_25", "feature_16",]
plot_variables = train_variables + ['feature_3']
from rep.metaml import ClassifiersFactory
from rep.estimators import TMVAClassifier, SklearnClassifier, XGBoostClassifier
from sklearn.ensemble import AdaBoostClassifier
factory = ClassifiersFactory()
# There are different ways to add classifiers to Factory:
factory.add_classifier('tmva', TMVAClassifier(NTrees=50, features=train_variables, Shrinkage=0.05))
factory.add_classifier('ada', AdaBoostClassifier(n_estimators=10))
factory['xgb'] = XGBoostClassifier(features=train_variables)
from copy import deepcopy
factory_copy = deepcopy(factory)
%time factory.fit(train_data, train_labels, features=train_variables)
pass
factory.predict_proba(train_data)
%time factory_copy.fit(train_data, train_labels)
pass
report = factory.test_on(test_data, test_labels)
features_importances = report.feature_importance()
features_importances.plot()
features_importances.plot_plotly('importances', figsize=(15, 6))
from rep.report.metrics import RocAuc
learning_curve = report.learning_curve(RocAuc(), metric_label='ROC AUC', steps=1)
learning_curve.plot(new_plot=True)
# plotting the same curve (without recomputing) using different plotting library
learning_curve.plot_plotly(plotly_filename='learning curves', figsize=(18, 8))
correlation_pairs = []
correlation_pairs.append((plot_variables[0], plot_variables[1]))
correlation_pairs.append((plot_variables[0], plot_variables[2]))
report.scatter(correlation_pairs, alpha=0.01).plot()
# plot correlations between variables for signal-like and bck-like events
report.features_correlation_matrix(features=plot_variables).plot(new_plot=True, show_legend=False, figsize=(7, 5))
report.features_correlation_matrix_by_class(features=plot_variables).plot(new_plot=True, show_legend=False, figsize=(15, 5))
# plot correlations between variables just for bck-like events
corr = report.features_correlation_matrix_by_class(features=plot_variables[:4], labels_dict={0: 'background'}, grid_columns=1)
corr.plot_plotly(plotly_filename='correlations', show_legend=False, fontsize=8, figsize=(8, 6))
# use just common features for all classifiers
report.features_pdf().plot()
# use all features in data
report.features_pdf(data.columns).plot_plotly('distributions')
report.prediction_pdf().plot(new_plot=True, figsize = (9, 4))
report.prediction_pdf(labels_dict={0: 'background'}, size=5).plot_plotly('models pdf')
report.roc().plot(xlim=(0.5, 1))
report.roc(physical_notion=False).plot(ylim=(0.5, 1))
# plot the same distribution using interactive plot
report.roc().plot_plotly(plotly_filename='ROC')
efficiencies = report.efficiencies(['feature_3'], ignored_sideband=0.01)
efficiencies_with_errors = report.efficiencies(['feature_3'], errors=True, bins=15, ignored_sideband=0.01)
efficiencies.plot(figsize=(18, 25), fontsize=12, show_legend=False)
efficiencies_with_errors.plot(figsize=(18, 25), fontsize=12, show_legend=False)
efficiencies.plot_plotly("efficiencies", show_legend=False, figsize=(18, 20))
efficiencies_with_errors.plot_plotly("efficiencies error", show_legend=False, figsize=(18, 20))
# define metric functions of interest
def AMS(s, b):
b_reg = 0.01
radicand = 2 *( (s+b+b_reg) * numpy.log (1.0 + s/(b+b_reg)) - s)
return numpy.sqrt(radicand)
metrics = report.metrics_vs_cut(AMS, metric_label='AMS')
metrics.plot(new_plot=True, figsize=(15, 6))
from rep.report.metrics import significance
metrics = report.metrics_vs_cut(significance, metric_label='significance')
metrics.plot(new_plot=True, figsize=(15, 6))
metrics.plot_plotly('metrics')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using paths
Step2: A more complex example
Step3: Setting up the database
Step4: Sessions
Step5: We'd usually only want to do this metadata creation once-ever; this setup procedure can be recorded by changing the database stage
Step6: Users
Step7: Test how fast we can write into the database
Step8: Post-processing
Step10: SQL format
Step11: Sync points
|
<ASSISTANT_TASK:>
Python Code:
# import sqlexperiment as sqle
# from sqlexperiment import experimentlog
from explogger import ExperimentLog
# log some JSON data
e = ExperimentLog(":memory:", ntp_sync=False)
e.log("mouse", data={"x":0, "y":0})
e.log("mouse", data={"x":0, "y":1})
e.log("mouse", data={"x":0, "y":2})
e.close()
# from experimentlog import ExperimentLog
from explogger import extract
import logging
e = ExperimentLog(":memory:", ntp_sync=False)
## shows how to add the SQL logger
sql_handler = e.get_logger()
sql_handler.setLevel(logging.INFO)
log_formatter = logging.Formatter(fmt="%(asctime)s [%(levelname)-5.5s] %(message)s",
datefmt='%m-%d %H:%M')
sql_handler.setFormatter(log_formatter)
logging.getLogger().addHandler(sql_handler)
# use the logger
logging.info("Some information")
logging.info("Some more information")
# get the extracted logs as a line-separated single string
print("All logs")
print(extract.get_logs(e.cursor))
print("Just the log for this run")
print(extract.get_logs(e.cursor, run=e.run_id))
e.close()
e = ExperimentLog(":memory:", ntp_sync=False)
e.cd("/Experiment/Condition1")
e.log("mouse", data={"x":0, "y":0})
e.log("mouse", data={"x":0, "y":1})
e.log("mouse", data={"x":0, "y":2})
e.cd("/Experiment/Condition2")
e.log("mouse", data={"x":0, "y":0})
e.log("mouse", data={"x":0, "y":1})
e.log("mouse", data={"x":0, "y":2})
e.close()
import IPython.nbconvert
from IPython.core.display import HTML
def md_html(md):
return HTML(IPython.nbconvert.filters.markdown.markdown2html(md))
from explogger import report
md_html(report.string_report(e.cursor))
# from experimentlog import ExperimentLog, np_to_str, str_to_np
import numpy as np
## open a connection to a database; will be created if it does not exist.
# here we use a memory database so the results are not stored to disk
e = ExperimentLog(":memory:", ntp_sync=False)
# check if we've already set everything up
# note we use the special .meta field to access persistent metadata
if e.meta.stage=="init":
e.create("STREAM", name="mouse", description="A time series of x,y cursor positions",
# the data is optional, and can contain anything you want
data={
"sample_rate": 60,
"dpi": 3000,
"mouse_device":"Logitech MX600"})
# and a post-condition questionnaire
e.create("STREAM", name="satisfaction",
description="A simple satisfaction score",
# here, we store the questions used for future reference
data={
"questions":["How satisfied were you with your performance?",
"How satisfied were you with the interface?"]}
)
if e.meta.stage=="init":
# We'll register an experiment, with three different conditions
e.create("SESSION", "Experiment", description="The main experiment",
data={"target_size":40.0, "cursor_size":5.0})
e.create("SESSION","ConditionA",description="Condition A:circular targets",
data={"targets":["circle"]})
e.create("SESSION","ConditionB", description="Condition B:square targets",
data={"targets":["square"]})
e.create("SESSION","ConditionC", description="Condition C:mixed targets",
data={"targets":["circle","square"]})
# mark the database as ready to log data
# meta is a special field that looks like an object, but is actually backed
# onto the database. Any field can be read or written to, as long as the value
# can be dumped to JSON
e.meta.stage="setup"
from explogger import pseudo
user = pseudo.get_pseudo()
print(user)
# now register the user with the database
e.create("USER", name=user, data={"age":30, "leftright":"right"})
# note that passing the session="" parameter automatically
# binds to that session prototype at the start of the session
e.enter("Experiment", session="Experiment")
# attach the user to this experiment, and thus to all conditions, etc.
e.bind("USER", user)
e.enter("ConditionA", session="ConditionA")
# calling enter() without any argument creates a numbered repetition (in this case, 0)
e.enter()
print(e.session_path)
print(e.bindings)
# log some data
e.log("mouse", data={"x":0, "y":10})
e.log("mouse", data={"x":0, "y":20})
%%timeit -n 50000
e.log("mouse", data={"x":20, "y":20})
# log questionnaire output
e.log("satisfaction", data={"q1":4,"q2":5})
# leave this repetition
e.leave()
# move out of condition A
e.leave()
e.enter("ConditionB")
# could log more stuff...
from explogger import ExperimentLog, np_to_str#, str_to_np
x = np.random.uniform(-1,1,(16,16))
# if we need to attach binary data to a log file (e.g. an image), we can do this:
# in general, it is best to avoid using blobs unless absolutely necessary
i = e.log("important_matrix", binary=np_to_str({"matrix":(x)}))
# back to the root -- here we mark this session (ConditionB) as being invalid.
e.leave(valid=False)
e.leave()
# end the run; normally you would not need to do this, since
# e.close() does this automatically -- but here we keep the DB
# open to make it quicker to demo querying it
e.end()
# print some results with raw SQL queries
mouse_log = e.cursor.execute("SELECT time, json FROM mouse", ())
print("\n".join([str(m) for m in mouse_log.fetchone()]))
from explogger import report
import IPython.nbconvert
from IPython.core.display import HTML
def md_html(md):
return HTML(IPython.nbconvert.filters.markdown.markdown2html(md))
md_html(report.string_report(e.cursor))
# should only do this when all data is logged; otherwise there may be
# a performance penalty
e.add_indices()
# make the new table -- must have a reference to the main
# log table
e.execute(CREATE TABLE accelerometer
(id INTEGER PRIMARY KEY, device INT, x REAL, y REAL, z REAL, log INT,
FOREIGN KEY(log) REFERENCES log(id))
)
# register a new stream
e.create("STREAM", name="acc", description="A time series of accelerometer values")
# now log a new value, put it into the separate accelerometer table and link
# it to the main log
def log_acc(dev,x,y,z):
log_id = e.log("acc")
e.execute("INSERT INTO accelerometer VALUES (?,?,?,?,?)",
(dev, x, y, z, log_id))
e.meta.title="TestSet-1"
e.meta.institution="University of Glasgow"
e.meta.funder="ABC:XXX:101"
e.meta.ethics="CSEnnnn"
e.meta.authors="John Williamson"
e.meta.license="CC-BY-SA 2.0"
e.meta.confidential="no"
e.meta.paper="'A good paper', Williamson J., Proceedings of Things International 2016, pp.44-46"
e.meta.description="A study of the experimental logging process. Includes numerous repetitive examples of simple logged data."
e.meta.short_description="A quick logging test."
e.meta.doi= "DOI:xxxxxx"
print(dir(e.meta))
md_html(report.string_readme(e.cursor))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the first file
Step2: and the seconed one
Step3: The contour plot of the joint distribution of two variables (columns) is colored with a custom colorscale
Step4: The function kde_scipy returns data for Plotly contour plot of the estimated 2D distribution
Step5: Contour plot of the joint distribution of data from the first file
Step6: Contour plot of the joint distribution of data from the second file
Step7: One notices that the second contourplot illustrates a mixture of two bivariate
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import seaborn as sns
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
%matplotlib inline
xl = pd.ExcelFile("Data/CSCEng.xls")
dfc = xl.parse("Sheet1")
dfc.columns
xl = pd.ExcelFile("Data/SystEng.xls")
dfi = xl.parse("Sheet1")
dfi.columns
cubehelix_cs=[[0.0, '#fcf9f7'],
[0.16666666666666666, '#edcfc9'],
[0.3333333333333333, '#daa2ac'],
[0.5, '#bc7897'],
[0.6666666666666666, '#925684'],
[0.8333333333333333, '#5f3868'],
[1.0, '#2d1e3e']]
def kde_scipy( vals1, vals2, (a,b), (c,d), N ):
#vals1, vals2 are the values of two variables (columns)
#(a,b) interval for vals1; usually larger than (np.min(vals1), np.max(vals1))
#(c,d) -"- vals2
x=np.linspace(a,b,N)
y=np.linspace(c,d,N)
X,Y=np.meshgrid(x,y)
positions = np.vstack([Y.ravel(), X.ravel()])
values = np.vstack([vals1, vals2])
kernel = st.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
return [x, y, Z]
import plotly.plotly as py
from plotly.graph_objs import *
def make_kdeplot(varX, varY, (a,b), (c,d), N, colorsc, title):
#varX, varY are lists, 1d numpy.array(s), or dataframe columns, storing the values of two variables
x, y, Z = kde_scipy(varY, varX, (a,b), (c,d), N )
data = Data([
Contour(
z=Z,
x=x,
y=y,
colorscale=colorsc,
#reversescale=True,
opacity=0.9,
contours=Contours(
showlines=False)
),
])
layout = Layout(
title= title,
font= Font(family='Georgia, serif', color='#635F5D'),
showlegend=False,
autosize=False,
width=650,
height=650,
xaxis=XAxis(
range=[a,b],
showgrid=False,
nticks=7
),
yaxis=YAxis(
range=[c,d],
showgrid=False,
nticks=7
),
margin=Margin(
l=40,
r=40,
b=85,
t=100,
),
)
return Figure( data=data, layout=layout )
N=200
a,b=(5,11)
fig=make_kdeplot(dfc['multiannual'], dfc['bachelor-th'], (a,b), (a,b),
N, cubehelix_cs,'kde plot of two sets of data' )
py.sign_in('empet', 'my_api_key')
py.iplot(fig, filename='kde-2D-CSCE')
a, b=(4,12)
fig=make_kdeplot(dfi['multiannual'], dfi['bachelor-th'], (a,b), (a,b),
N, cubehelix_cs, 'kde plot of two sets of data')
py.iplot(fig, filename='kde-2D-SE')
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv')
virginica = df.loc[df.Name == "Iris-virginica"]
a, b=(5,8.5)
c,d=(2,4)
N=100
fig=make_kdeplot(virginica.SepalLength, virginica.SepalWidth, (a,b), (c,d),
N, cubehelix_cs, 'kde plot of joint distribution for virginica SepalLength and SepalWidth')
py.iplot(fig, filename='virginica-sepal-length-vs-width')
from IPython.core.display import HTML
def css_styling():
styles = open("./custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1. Install and import libraries
Step3: 2. Load dataset, in this case MovieLens data
Step4: 3. Convert dataset to Dataframe
Step6: 4. Determine characteristics of the MovieLens data (optional)
Step7: This figure shows that the average rating of a movie is actually slightly higher than 3.
Step8: 6. Calculate similarity and find nearest neighbors
Step10: 6.A.1. Pearson's User-Based Approach
Step11: find nearest neighbors
Step13: Compute Predictions #1
Step14: 6.A.1. Pearson's Item-Based Approach
Step15: 6.A.2. Calculate Constraint Pearson Correlation
Step16: 7.A. Implement Bayes
Step17: [((2.0, 593.0), (1.0, 5.0)), ((2.0, 1955.0), (1.0, 4.0)), ((5.0, 3476.0), (1.0, 3.0)), ((5.0, 1093.0), (1.0, 2.0)), ((6.0, 3508.0), (1.0, 3.0))]
Step18: accuracy = (predicted_rating == actual_rating)/total_num_ratings = 0.162442085039
Step19: actual
Step20: Output example
Step21: Output example
Step22: 1000209
Step23: [(1.0, (1, 1197)), (1.0, (1, 938))]
Step24: [(1, 1197, 1.0), (1, 938, 1.0)]
Step25: [(1.0, (2, 593)), (1.0, (2, 1955))]
Step26: [(2, 593, 1.0), (2, 1955, 1.0)]
Step27: [((1, 3.0), 1), ((1, 4.0), 1)]
Step28: 1000209
Step29: 30200
Step30: [((1, 3.0), 1), ((1, 4.0), 1)]
Step31: [((3577, 5.0), 29), ((1260, 2.0), 13)]
Step32: [(3577, (5.0, 29)), (1260, (2.0, 13))]
Step33: [(1, 53), (2, 129)]
Step34: [(2850, (43, (4.0, 12))), (2850, (43, (1.0, 5)))]
Step35: [(2850, 1.0, 0.11627906976744186), (2850, 3.0, 0.18604651162790697)]
Step36: [((1197, 3.0), 1), ((938, 4.0), 1)]
Step37: 1000209
Step38: 18530
Step39: [((1197, 3.0), 1), ((938, 4.0), 1)]
Step42: [((3577, 5.0), 3), ((1260, 2.0), 6)]
Step43: [(391, (3.0, 18)), (518, (4.0, 22))]
Step44: [(1, 2077), (2, 701)]
Step45: [(3808, (44, (5.0, 17))), (3808, (44, (3.0, 8)))]
Step46: [(3808, 5.0, 0.38636363636363635), (3808, 4.0, 0.36363636363636365)]
Step47: 1000209
Step48: [(1.0, 0.05616226208722377), (2.0, 0.1075345252842156)]
Step49: [(1.0, ((1, 914), 0.05616226208722377)), (1.0, ((1, 594), 0.05616226208722377))]
Step50: [((327, 1.0), ((1248, 0.05616226208722377), 0.038135593220338986)), ((327, 1.0), ((1254, 0.05616226208722377), 0.038135593220338986))]
Step51: [((1644, 5.0), ((1605, 0.22626271109338147, 0.038381742738589214), 0.056842105263157895)), ((1644, 5.0), ((1451, 0.22626271109338147, 0.3022636484687084), 0.056842105263157895))]
Step52: [((2168, 135, 4.0), 0.13697613242692502), ((4808, 135, 4.0), 0.12445827900425674)]
Step53: umrCombo_test.count() = 501170
Step54: [((4335, 1588), [(3.0, 0.5498862085999521), (1.0, 0.016548382705956422), (2.0, 0.13615664002520045), (4.0, 0.32236074697317796), (5.0, 0.025783030822306607)]), ((4728, 1894), [(5.0, 0.01634723322124617), (3.0, 0.7342812664378788), (4.0, 0.256444827101078), (1.0, 0.005091044955245684), (2.0, 0.1289571243789302)])]
Step55: 7.A.1. Implementing Bayes MAP
Step56: [((4335, 1588), 0), ((4728, 1894), 2)]
Step57: [((3491, 3699), (4.0, 0)), ((1120, 1654), (4.0, 0))]
Step58: 900042
Step59: [(2, 1955, 4.0)]
Step60: actual
Step61: 7.A.2. Implementing Bayes MSE
Step62: [((4335, 1588), 3.3568784305604584), ((4728, 1894), 3.5733645675372854)]
Step63: [((1120, 1654), (4.0, 3.44226621556054)), ((4439, 3005), (3.0, 3.360422113015879))]
Step64: actual
Step65: 7.A.3. Implementing Bayes MAE
Step66: [((4335, 1588), 4), ((4728, 1894), 2)]
Step67: [((1120, 1654), (4.0, 2)), ((4169, 2723), (3.0, 3))]
Step68: actual
|
<ASSISTANT_TASK:>
Python Code:
import datetime, time
# timestamp is not correct; it is 8 hours ahead
print (datetime.datetime.now() - datetime.timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S')
import importlib
import pip
def _install(package):
pip.main(['install', package])
def _import(package):
importlib.import_module(package)
def install_and_import(package):
try:
_import(package)
except ImportError:
_install(package)
# install PyMC
install_and_import("git+git://github.com/pymc-devs/pymc.git")
# zip up source_dir located in GitHub remote_url's remote_branch and add it to Spark's source context
remote_url = "https://github.com/lab41/hermes.git"
remote_branch = "master"
source_dir = "src"
debug = True
# helper functions
import os
import functools
def _list_all_in_dir(dir_path):
for path, subdirs, files in os.walk(dir_path):
for filename in files:
print os.path.join(path, filename)
def _zip_dir(srcdir_path, zipfile_handler):
try:
zipfile_handler.writepy(srcdir_path)
finally:
zipfile_handler.close()
def trackcalls(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
wrapper.has_been_called = True
return func(*args, **kwargs)
wrapper.has_been_called = False
return wrapper
@trackcalls
def _add_zipfile_to_sc(zipfile_path):
sc.addPyFile(zipfile_path)
import git
import os
import tempfile
import shutil
import zipfile
# create a temporary directory
tmpdir_path = tempfile.mkdtemp()
if debug: print "temporary directory: %s\n" % tmpdir_path
# ensure file is read/write by creator only
saved_umask = os.umask(0077)
# create a zipfile handler to zip the necessary files
ziptmpdir_path = tempfile.mkdtemp()
if debug: print "temporary directory for zip file: %s\n" % ziptmpdir_path
zipfile_path = ziptmpdir_path + "/hermes_src_2.zip"
if debug: print "zip file's path: %s\n" % zipfile_path
zipfile_handler = zipfile.PyZipFile(zipfile_path, "w")
# make zipfile handler verbose for debugging
zipfile_handler.debug = 3
try:
# clone "framework" branch from GitHub into temporary directory
local_branch = git.Repo.clone_from(remote_url, tmpdir_path, branch=remote_branch)
if debug: print "current branch: %s\n" % local_branch.head.ref
if debug: print "list all in %s:" % tmpdir_path; _list_all_in_dir(tmpdir_path); print "\n"
# zip "hermes" directory
if debug: print "zipping: %s\n" % os.path.join(tmpdir_path, source_dir)
_zip_dir(os.path.join(tmpdir_path, source_dir), zipfile_handler)
# check zip file
if debug: print "Is zip file %s valid? %s\n" % (zipfile_path, zipfile.is_zipfile(zipfile_path))
# add zip to SparkContext
# note: you can only add zip to SparkContext one time
if not _add_zipfile_to_sc.has_been_called:
if debug: print "add zip file %s into spark context\n" % zipfile_path
_add_zipfile_to_sc(zipfile_path)
else:
if debug: print "zip file %s is already added into spark context; will not re-add\n" % zipfile_path
except IOError as e:
raise e
else:
os.remove(zipfile_path)
finally:
os.umask(saved_umask)
shutil.rmtree(tmpdir_path)
shutil.rmtree(ziptmpdir_path)
# import the required modules from Hermes
from src.algorithms import performance_metrics as pm
from src.data_prep import movieLens_vectorize as mv
from src.utils import save_load as sl
# import other modules
import os
import time
class Timer(object):
To time how long a particular function runs.
Example:
import Timer
with Timer() as t:
somefunction()
print("somefunction() takes %s seconds" % t.secs)
print("somefunction() takes %s milliseconds" % t.msecs)
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000
# ratings_json_path
# movies_json_path
def convert_dataset_to_dataframe(dataset_path):
df = sqlCtx.read.json(dataset_path, None)
df = df.repartition(sc.defaultParallelism * 3)
return df
# obtaining ratings dataframe
ratingsdf = convert_dataset_to_dataframe(ratings_json_path)
# obtaining movies dataframe
moviesdf = convert_dataset_to_dataframe(movies_json_path)
# extract most commonly used vectors to be used later on
# 1. using ratingsdf
# a. [(user_id, movie_id, rating)]
umr = ratingsdf.map(lambda row: (row.user_id, row.movie_id, row.rating))
# b. [(user_id, movie_id, rating)] where rating >= 3
umr_weighted = umr.filter(lambda (user_id, movie_id, rating): rating >= 3)
print "-" * 80
print "format: [(user_id, movie_id, rating)]\n"
print "umr:\n", umr.take(2)
print "umr_weighted:\n", umr_weighted.take(2)
print "-" * 80
print "\nTo identify user-to-user similarity:"
print "format: [(movie_id, (user_id, rating))]\n"
# c. [(movie_id, (user_id, rating)] -> to identify user-to-user similarity
m_ur = ratingsdf.map(lambda row: (row.movie_id, (row.user_id, row.rating)))
# d. [(movie_id, (user_id, rating)] where rating >= 3
m_ur_weighted = m_ur.filter(lambda (movie_id, (user_id, rating)): rating >= 3)
print "m_ur:\n", m_ur.take(2)
print "m_ur_weighted (aka rating >=3):\n", m_ur_weighted.take(2)
print "-" * 80
print "\nTo identify movie-to-movie similarity:"
print "format: [(user_id, (movie_id, rating))]\n"
# e. [(user_id, (movie_id, rating))] -> to identify movie-to-movie similarity
u_mr = ratingsdf.map(lambda row: (row.user_id, (row.movie_id, row.rating)))
# f. [(user_id, (movie_id, rating))] where rating >= 3
u_mr_weighted = u_mr.filter(lambda (user_id, (movie_id, rating)): rating >= 3)
print "um_r:\n", u_mr.take(2)
print "um_r_weighted (aka rating >=3):\n", u_mr_weighted.take(2)
print "-" * 80
# total number of distinct users
num_distinct_users = ratingsdf.map(lambda row: row.user_id).distinct().count()
num_users = ratingsdf.map(lambda row: row.user_id).count()
print "total number of distinct users = ", num_distinct_users
print "total number of users = ", num_users
# total number of ratings
# should be the same as num_users
num_ratings = ratingsdf.map(lambda row: row.rating).count()
print "total number of ratings = ", num_ratings
# total number of distinct movies
num_distinct_movies = moviesdf.map(lambda row: row.movie_id).distinct().count()
num_movies = moviesdf.map(lambda row: row.movie_id).count()
print "total number of distinct movies = ", num_distinct_movies
print "total number of movies = ", num_movies
# what is the average number of ratings a user rates = number of ratings / number of users
# round it to the fourth digit
avg_num_ratings_per_user = round(float(num_ratings) / float(num_distinct_users), 4)
print "average number of ratings a user rates = ", avg_num_ratings_per_user
# what is the average number of ratings a movie receives = number of ratings / number of movies
avg_num_ratings_per_movie = round(float(num_ratings) / float(num_distinct_movies), 4)
print "average number of ratings a movie receives = ", avg_num_ratings_per_movie
# completeness = number of ratings / (number of users * number of movies)
completeness = round(float(num_ratings) / (float(num_distinct_users) * float(num_distinct_movies)), 4)
print "completeness = ", completeness
# mean rating
mean_rating = ratingsdf.map(lambda row: row.rating).mean()
print "mean rating = ", mean_rating
# mean rating per movie
# [(movie_id, rating)]
movie_rating_pair = ratingsdf.map(lambda row: (row.movie_id, row.rating))
combineByKey() requires 3 functions:
* createCombiner: first aggregation step for each key
-> lambda first_rating: (first_rating, 1)
* mergeValue: what to do when a combiner is given a new value
-> lambda x, first_rating: x[0] + first_rating, x[1] + 1
-> lambda thisNewRating_thisNumRating, firstRating: thisNewRating + firstRating, thisNumRating + 1
* mergeCombiner: how to merge two combiners
-> lambda x, y: (x[0] + y[0], x[1] + y[1])
-> lambda sumRating1_numRating1, sumRating2_numRating2: (sumRating1 + sumRating2, numRating1 + numRating2)
# [(movie_id, (sum_rating, num_rating))]
movie_sumRating_numRating_pair = movie_rating_pair.combineByKey(
lambda first_rating: (first_rating, 1),
lambda x, first_rating: (x[0] + first_rating, x[1] + 1),
lambda x, y: (x[0] + y[0], x[1] + y[1]))
# [(movie_id, mean_rating)]
movie_meanRating_pair = movie_sumRating_numRating_pair.map(lambda (movie_id, (sum_rating, num_rating)): (movie_id, sum_rating/num_rating))
movie_meanRating_pair.take(3)
# meanRating_numRating_pair will be used in plotting in the next cell
# where _1 = mean rating of the movie
# _2 = number of users who review the movie
# [(mean_rating, num_rating)]
meanRating_numRating_pair = movie_sumRating_numRating_pair.map(lambda (movie_id, (sum_rating, num_rating)): (sum_rating/num_rating, num_rating))
meanRating_numRating_pair_df = meanRating_numRating_pair.toDF()
meanRating_numRating_pair_df.show()
# plot mean rating per movie
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
meanRating_numRating_pair = movie_sumRating_numRating_pair.map(lambda (movie_id, (sum_rating, num_rating)): (sum_rating/num_rating, num_rating))
meanRating_numRating_pair_df = meanRating_numRating_pair.toDF()
meanRating_numRating_pair_panda_df = meanRating_numRating_pair_df.toPandas()
plot = meanRating_numRating_pair_panda_df.plot(
x="_2", \
y="_1", \
kind="hexbin", \
xscale="log", \
cmap="YlGnBu", \
gridsize=12, \
mincnt=1, \
title="Mean vs Number of Reviewers")
plot.set_xlabel("Number of Reviewers Per Movie")
plot.set_ylabel("Mean Rating Per Movie")
plt.show()
weights = [0.9, 0.1, 0]
seed = 41
# 1. using ratingsdf
# a. [(user_id, movie_id, rating)]
umr_train, umr_test, umr_validation = umr.randomSplit(weights, seed)
# b. [(user_id, movie_id, rating)] where rating >= 3
umr_weighted_train, umr_weighted_test, umr_weighted_validation = umr_weighted.randomSplit(weights, seed)
# c. [(movie_id, (user_id, rating)]
m_ur_train, m_ur_test, m_ur_validation = m_ur.randomSplit(weights, seed)
# d. [(movie_id, (user_id, rating)] where rating >= 3
m_ur_weighted_train, m_ur_weighted_test, m_ur_weighted_validation = m_ur_weighted.randomSplit(weights, seed)
# e. [(user_id, (movie_id, rating)]
u_mr_train, u_mr_test, u_mr_validation = u_mr.randomSplit(weights, seed)
# f. [(user_id, (movie_id, rating)] where rating >= 3
u_mr_weighted_train, u_mr_weighted_test, u_mr_weighted_validation = u_mr_weighted.randomSplit(weights, seed)
# helper functions
from scipy.stats import pearsonr
import math
# filter out duplicate pairs
# user-based approach:
# input and output: [( movie_id, ((user_id_1, rating_1), (user_id_2, rating_2)) )]
# item-based approach:
# input and output: [( user_id, ((movie_id_1, rating_1), (movie_id_2, rating_2)) )]
def removeDuplicates((key_id, ratings)):
(value_id_1, rating_1) = ratings[0]
(value_id_2, rating_2) = ratings[1]
return value_id_1 < value_id_2
# rearrange so that it will be in the format of pairs
# user-based approach:
# input: [( movie_id, ((user_id_1, rating_1), (user_id_2, rating_2)) )]
# output: [( movie_id, ((user_id_1, user_id_2), (rating_1, rating_2)) )]
# item-based approach:
# input: [( user_id, ((movie_id_1, movie_id_2), (rating_1, rating2)) )]
# output: [( user_id, ((movie_id_1, movie_id_2), (rating_1, rating2)) )]
def createPairs((key_id, ratings)):
(value_id_1, rating_1) = ratings[0]
(value_id_2, rating_2) = ratings[1]
return ((value_id_1, value_id_2), (rating_1, rating_2))
# aggregate pairs using combineByKey() instead of groupByKey()
# [( test_user_id, train_user_id), (test_rating_1, train_rating_1), (test_rating_2, train_rating_2), ...]
def aggregatePairs(keyPairs):
return keyPairs.combineByKey(
lambda firstRatingPair: ((firstRatingPair),),
lambda newRatingPair, firstRatingPair: newRatingPair + ((firstRatingPair),),
lambda tupleRatingPairs1, tupleRatingPairs2: tupleRatingPairs1 + tupleRatingPairs2)
# calculate pearson correlation when you passed in the values of
# user-based approach:
# input: values of [(user_id_1, user_id_2), ((rating_1, rating_2), (rating_1, rating_2)...)]
# output: values of [(user_id_1, user_id_2), (pearson_correlation, num_rating_pairs, p_value)]
# item-based approach:
# input: values of [(movie_id_1, movie_id_2), ((rating_1, rating_2), (rating_1, rating_2)...)]
# output: values of [(movie_id_1, movie_id_2), (pearson_correlation, num_rating_pairs, p_value)]
# NOTE: ignore p_value
def calculatePearson(ratingPairs):
rating1s = [rating1 for (rating1, _) in ratingPairs]
rating2s = [rating2 for (_, rating2) in ratingPairs]
pearson_correlation, p_value = pearsonr(rating1s, rating2s)
return (pearson_correlation, len(ratingPairs))
#((user_id, movie_id), rating)
a = sc.parallelize([ ((1, 2), 3), ((2, 2), 4) ])
#((user_id, movie_id), predicted_rating)
b = sc.parallelize([ ((1, 2), 2), ((2, 2), 5) ])
#((user_id, movie_id), (rating, predicted_rating)
c = a.join(b)
c.collect()
# combine test and train together so that
# [movie_id, ( (test_user_id, test_rating), (train_user_id, train_rating) )]
M_testUR_trainUR = m_ur_test.join(m_ur_train)
print M_testUR_trainUR.count()
M_testUR_trainUR.take(5)
# remove duplicates
M_testUR_trainUR = M_testUR_trainUR.filter(removeDuplicates)
print M_testUR_trainUR.count()
M_testUR_trainUR.take(2)
# rearrange so that it will be in the format
# [(test_user_id, train_user_id), (test_rating, train_rating)]
userPairs = M_testUR_trainUR.map(createPairs)
print userPairs.count()
userPairs.take(2)
# congregate all ratings for each user pair so that it will be in the format of:
# [( test_user_id, train_user_id), (test_rating_1, train_rating_1), (test_rating_2, train_rating_2), ...]
# instead of using groupByKey(), use combineByKey() instead.
# Implemented using groupByKey():
with Timer() as t:
aggUserPairs = userPairs.groupByKey()
print "aggregate user pairs approach #1: %s seconds" % t.secs
print aggUserPairs.count()
aggUserPairs.take(5)
-----------------------------------------------------------------
# Output:
aggregate user pairs: 0.0353801250458 seconds
10728120
Out[20]:
[((1274, 2736), <pyspark.resultiterable.ResultIterable at 0x7f180eb55350>),
((2117, 5393), <pyspark.resultiterable.ResultIterable at 0x7f180eb55510>),
((1422, 3892), <pyspark.resultiterable.ResultIterable at 0x7f180eb55550>),
((1902, 5636), <pyspark.resultiterable.ResultIterable at 0x7f180eb55590>),
((3679, 5555), <pyspark.resultiterable.ResultIterable at 0x7f180eb555d0>)]
-----------------------------------------------------------------
output = aggUserPairs.mapValues(lambda iterable: tuple(iterable))
output.take(2)
-----------------------------------------------------------------
# Output:
[((3848, 4390), ((5.0, 5.0),)),
((897, 2621), ((4.0, 5.0), (4.0, 4.0), (2.0, 2.0)))]
-----------------------------------------------------------------
with Timer() as t:
aggUserPairs = aggregatePairs(userPairs)
print "aggregate user pairs: %s seconds" % t.secs
print aggUserPairs.count()
aggUserPairs.take(2)
# calculate pearson correlation to figure out user-to-user similarity in the format of:
# [( (test_user_id, train_user_id), (pearson_correlation, num_rating_pairs) )]
userPairSimilarities = aggUserPairs.mapValues(calculatePearson)
userPairSimilarities.sortByKey()
print userPairSimilarities.count()
userPairSimilarities.take(5)
# 1.
# a. select neighbors whose similarity correlation is greater than the threshold of 0.5
# b. remove user pairs that do not share a minimum of 5 reviews
# output: number of user pairs that passes minPearson = 1692207
# number of user pairs that passes both minPearson and minSimilarReviews = 533407
minPearson = 0.5
minSimilarReviews = 5
userPairPassThreshold = userPairSimilarities.filter(
lambda (userPair, (pearson_correlation, num_rating_pairs)):
pearson_correlation > minPearson and
num_rating_pairs >= minSimilarReviews
)
print userPairPassThreshold.count()
userPairPassThreshold.take(5)
# 2. select top n neighbors for each test user
from pyspark.rdd import RDD
import heapq
def takeOrderedByKey(self, topN, sortValueFn=None, ascending=False):
def base(a):
return [a]
def combiner(agg, a):
agg.append(a)
return getTopN(agg)
def merger(x, y):
agg = x + y
return getTopN(agg)
def getTopN(agg):
if ascending == True:
return heapq.nsmallest(topN, agg, sortValueFn)
else:
return heapq.nlargest(topN, agg, sortValueFn)
return self.combineByKey(base, combiner, merger)
# add takeOrderedByKey() function to RDD class
RDD.takeOrderedByKey = takeOrderedByKey
# convert
# [( (test_user_id, train_user_id), (pearson_correlation, num_rating_pairs) )]
# to
# [( test_user_id, [(test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)] )]
# so that you can sort by test_user_id after sorting the highest pearson correlation per test_user_id
testU_testUtrainU_sim = userPairPassThreshold.map(
lambda ((test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)):
(test_user_id, ((test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)))
)
print testU_testUtrainU_sim.count()
testU_testUtrainU_sim.take(5)
# for each test user, take the top N neighbors and ordering with the highest pearson correlation first
# [( test_user_id, [(test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)] )]
topN = 20
testUserTopNeighbors = testU_testUtrainU_sim.takeOrderedByKey(
topN,
sortValueFn=lambda ((test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)): (pearson_correlation, num_rating_pairs),
ascending=False)
# note: testUserTopNeighbors.count() should be less than the number of users
print testUserTopNeighbors.count()
testUserTopNeighbors.take(5)
num_distinct_test_users = m_ur_test.map(lambda (movie_id, (user_id, rating)): user_id).distinct().count()
num_distinct_test_users_pass_threshold = userPairPassThreshold.map(lambda ((test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)): test_user_id).distinct().count()
num_test_users_in_top_neighbors = testUserTopNeighbors.count()
print "num_distinct_test_users = ", num_distinct_test_users
print "num_distinct_test_users that passes the threshold check (aka pearson > 0.5, minReviews >= 5) = ", num_distinct_test_users_pass_threshold
print "num_test_users in testUserTopNeighbors = ", num_test_users_in_top_neighbors
# flattened version, meaning
# convert
# [( test_user_id, [(test_user_id, train_user_id), (pearson_correlation, num_rating_pairs)] )]
# to
# [( (test_user_id, train_user_id), (pearson_correlation, num_rating_pairs) )]
testUserTopNeighborsFlattened = testUserTopNeighbors.flatMap(lambda (test_user_id, rest): rest)
print testUserTopNeighborsFlattened.count()
testUserTopNeighborsFlattened.take(5)
# determine mean rating of each test user (aka find M)
# output: [(user_id, mean_rating)]
# convert to [(user_id, rating)]
ur = m_ur.map(lambda (movie_id, (user_id, rating)): (user_id, rating))
# [(user_id, (sum_rating, num_rating))]
u_sumRating_numRating = ur.combineByKey(
lambda first_rating: (first_rating, 1),
lambda x, first_rating: (x[0] + first_rating, x[1] + 1),
lambda x, y: (x[0] + y[0], x[1] + y[1]))
# [(test_user_id, mean_rating)]
u_meanRating = u_sumRating_numRating.map(
lambda (user_id, (sum_rating, num_rating)): (user_id, sum_rating/num_rating))
u_meanRating.take(5)
# for each movie i,
# determine pearson correlation of user a and all other users who rates movie i
# determine rating of each user u on movie i - mean rating of user u
# testUserTopNeighborsFlattened == [( (test_user_id, train_user_id), (pearson_correlation, num_rating_pairs) )]
# M_testUR_trainUR == # [movie_id, ( (test_user_id, test_rating), (train_user_id, train_rating) )]
# movie_id, (for every users who rate movie_id, add all pearson correlation * rating of user u on movie i - mean rating of user u)
# compute predictions #2
# using a weighted sum of deviations from the mean
sum of user u has rated(pearson correlation of user a and user u) * (rating of user u on movie i - mean rating of user u)
divided by
# list all ratings in the format:
# [user_id, (movie_id, rating)]
print u_mr.count()
u_mr.take(5)
# list all combinations of movies rated by the same user in the format:
# [user_id, ( (movie_id_1, rating_1), (movie_id_2, rating_2) )]
# this is to find movie's similarity with each other
sameUserRatingsCombo = u_mr.join(u_mr)
print sameUserRatingsCombo.count()
sameUserRatingsCombo.take(5)
# filter out duplicate pairs
def removeDuplicates((user_id, ratings)):
(movie_id_1, rating_1) = ratings[0]
(movie_id_2, rating_2) = ratings[1]
return movie_id_1 < movie_id_2
sameUserRatingsCombo = sameUserRatingsCombo.filter(removeDuplicates)
print sameUserRatingsCombo.count()
sameUserRatingsCombo.take(5)
# rearrange so that it will be in the format of movie pairs:
# [(movie_id_1, movie_id_2), (rating_1, rating2)]
def createMoviePairs((user_id, ratings)):
(movie_id_1, rating_1) = ratings[0]
(movie_id_2, rating_2) = ratings[1]
return ((movie_id_1, movie_id_2), (rating_1, rating_2))
moviePairs = sameUserRatingsCombo.map(createMoviePairs)
print moviePairs.count()
moviePairs.take(5)
# congregate all ratings for each movie pair so that it will be in the format of:
# [( movie_id_1, movie_id_2), (rating_1, rating_2), (rating_1, rating_2), ...]
moviePairRatings = moviePairs.groupByKey()
print moviePairRatings.count()
moviePairRatings.take(5)
# calculate pearson correlation approach #1
# using udemy's approach
# I prefer approach #2
import math
def computePearsonCorrelationCoefficient(ratingPairs):
numPairs = 0
if not ratingPairs:
return (0, 0)
muX = sum(1.*ratingX for (ratingX, _) in ratingPairs)/len(ratingPairs)
muY = sum(1.*ratingY for (_, ratingY) in ratingPairs)/len(ratingPairs)
cov = sum_sqdev_x = sum_sqdev_y = 0
for ratingX, ratingY in ratingPairs:
dev_x = ratingX - muX
dev_y = ratingY - muY
cov += dev_x * dev_y
sum_sqdev_x += dev_x**2
sum_sqdev_y += dev_y**2
numPairs += 1
numerator = cov
denominator = math.sqrt(sum_sqdev_x) * math.sqrt(sum_sqdev_y)
score = 0
if (denominator):
score = (numerator / (float(denominator)))
return (score, numPairs)
moviePairSimilarities = moviePairRatings.mapValues(computePearsonCorrelationCoefficient).cache()
moviePairSimilarities.sortByKey()
moviePairSimilarities.take(5)
print moviePairRatings.count()
print moviePairSimilarities.count()
# calculate pearson correlation approach #2
# using scipy
# note: you cannot use pyspark.mllib.stat.Statistics's corr() function within the map function
from scipy.stats import pearsonr
def calculatePearson(ratingPairsPerMoviePairResultIterable):
ratingPairsPerMoviePair = tuple(ratingPairsPerMoviePairResultIterable)
rating1s = [rating1 for (rating1, _) in ratingPairsPerMoviePair]
rating2s = [rating2 for (_, rating2) in ratingPairsPerMoviePair]
pearson_correlation, p_value = pearsonr(rating1s, rating2s)
return (pearson_correlation, len(ratingPairsPerMoviePair))
moviePairSimilarities2 = moviePairRatings.mapValues(calculatePearson).cache()
moviePairSimilarities2.sortByKey()
moviePairSimilarities2.take(5)
print moviePairRatings.count()
print moviePairSimilarities2.count()
# divide movielens data into 10 parts to perform 10-fold cross-validation
# training model using 9 parts
# test model using last part
# results are better when default ratings are based on item information than when they are based on user information
# using mean rating is better than using majority rating
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.regression import LabeledPoint
# To use MLlib's Naive Bayes model, it requires the input to be in a format of a LabeledPoint
# therefore, convert dataset so that it will be in the following format:
# [(rating, (user_id, movie_id))]
r_um = ratingsdf.map(lambda row: LabeledPoint(row.rating, (row.user_id, row.movie_id)))
# split the data
r_um_train, r_um_test, r_um_validation = r_um.randomSplit(weights, seed)
# train a Naive Bayes model
naiveBayesModel = NaiveBayes.train(r_um_train, lambda_=1.0)
# save this Naive Bayes model
#naiveBayesModel.save(sc, "NaiveBayes_MovieLens1M_UserUser")
# load this Naive Bayes model into the SparkContext
#sameNaiveBayesModel = NaiveBayesModel.load(sc, "NaiveBayes_MovieLens1M_UserUser")
# make prediction
# [((test_user_id, test_movie_id), (predicted_rating, actual_rating))]
r_um_predicted = r_um_test.map(
lambda p: ( (p.features[0], p.features[1]), (naiveBayesModel.predict(p.features), p.label) )
)
print r_um_predicted.take(5)
# test accuracy
sameRating = r_um_predicted.filter(
lambda ((test_user_id, test_movie_id), (predicted_rating, actual_rating)): predicted_rating == actual_rating)
accuracy = 1.0 * sameRating.count() / r_um_test.count()
print "accuracy = (predicted_rating == actual_rating)/total_num_ratings = ", accuracy
# calculate RMSE and MAE
# convert into two vectors where
# one vector describes the actual ratings in the format [(user_id, movie_id, actual_rating)]
# second vector describes the predicted ratings in the format [(user_id, movie_id, predicted_rating)]
actual = r_um_predicted.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating)
)
predicted = r_um_predicted.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating)
)
print "actual:\n", actual.take(5)
print "predicted:\n", predicted.take(5)
rmse = pm.calculate_rmse_using_rdd(actual, predicted)
print "rmse = ", rmse
mae = pm.calculate_mae_using_rdd(actual, predicted)
print "mae = ", mae
# determine min and max of ratings
minRating = ratingsdf.map(lambda row: row.rating).min()
maxRating = ratingsdf.map(lambda row: row.rating).max()
print "minRating = ", minRating
print "maxRating = ", maxRating
# create RDD for the range of ratings
# [(1, 2, 3, 4, 5)]
rangeOfRatings = sc.parallelize( list(range(int(minRating), int(maxRating + 1))) )
print rangeOfRatings.collect()
print rangeOfRatings.count()
# [(user_id, movie_id, rating)]
umr = ratingsdf.map(lambda row: (row.user_id, row.movie_id, row.rating))
umr.count()
# since we have to determine the probability of rating r for each user_id and movie_id,
# we have to create a RDD with [(rating, (user_id, movie_id))] for each rating
# ie. (rating_1, (user_id, movie_id)), (rating_2, (user_id, movie_id)), ..., (rating_5, (user_id, movie_id))
um = umr.map(lambda (user_id, movie_id, rating): (user_id, movie_id))
rCombo_um = rangeOfRatings.cartesian(um).map(lambda (rating, (user_id, movie_id)): (float(rating), (user_id, movie_id)))
print rCombo_um.take(2)
print rCombo_um.count() # == umr.count() * 5
umrCombo = rCombo_um.map(lambda (rating, (user_id, movie_id)): (user_id, movie_id, rating))
print umrCombo.take(2)
print umrCombo.count()
# since we have to determine the probability of rating r for each user_id and movie_id,
# we have to create a RDD with [(rating, (user_id, movie_id))] for each rating
# ie. (rating_1, (user_id, movie_id)), (rating_2, (user_id, movie_id)), ..., (rating_5, (user_id, movie_id))
um_test = umr_test.map(lambda (user_id, movie_id, rating): (user_id, movie_id))
rCombo_um_test = rangeOfRatings.cartesian(um_test).map(lambda (rating, (user_id, movie_id)): (float(rating), (user_id, movie_id)))
print rCombo_um_test.take(2)
print rCombo_um_test.count() # == umr.count() * 5
umrCombo_test = rCombo_um_test.map(lambda (rating, (user_id, movie_id)): (user_id, movie_id, rating))
print umrCombo_test.take(2)
print umrCombo_test.count()
# [((user_id, rating), 1)]
ur_1 = umr.map(lambda (user_id, movie_id, rating): ((user_id, rating), 1))
ur_1.take(2)
ur_1.count()
# [(((user_id, rating_1), 0), ((user_id, rating_2), 0), ..., ((user_id, rating_5), 0))]
urCombo_0 = umrCombo.map(lambda (user_id, movie_id, rating): ((user_id, rating), 0)).distinct()
#print urCombo_0.sortByKey().collect()
print urCombo_0.count()
ur_1Or0 = ur_1.union(urCombo_0)
print ur_1Or0.take(2)
print ur_1Or0.count()
# ur_1Or0.count() == ur_1.count() + urCombo_0.count()
# 1000209 + 30200
# 1030409
ur_1Or0.sortByKey().collect()
from operator import add
# [(user_id, rating), (num_rating)]
ur_numRating = ur_1Or0.reduceByKey(add)
print ur_numRating.take(2)
print ur_numRating.count()
# [(user_id, (rating, num_rating))]
u_r_numRating = ur_numRating.map(lambda ((user_id, rating), num_rating): (user_id, (rating, num_rating)))
print u_r_numRating.take(2)
print u_r_numRating.count()
# [(user_id, total_rating)]
u_totalRating = sc.parallelize(umr.map(lambda (user_id, movie_id, rating): (user_id, rating)).countByKey().items())
print u_totalRating.take(2)
print u_totalRating.count()
# [(user_id, (total_rating, (rating, num_rating)))]
u_componentsOfProb = u_totalRating.join(u_r_numRating)
print u_componentsOfProb.take(2)
print u_componentsOfProb.count()
# [(user_id, rating, probRU)]
probRU = u_componentsOfProb.map(lambda (user_id, (total_rating, (rating, num_rating))):
(user_id, rating, float(num_rating)/float(total_rating))
)
print probRU.take(2)
print probRU.count()
# [((movie_id, rating), 1)]
mr_1 = umr.map(lambda (user_id, movie_id, rating): ((movie_id, rating), 1))
mr_1.take(2)
mr_1.count()
# [(((user_id, rating_1), 0), ((user_id, rating_2), 0), ..., ((user_id, rating_5), 0))]
mrCombo_0 = umrCombo.map(lambda (user_id, movie_id, rating): ((movie_id, rating), 0)).distinct()
#print mrCombo_0.sortByKey().collect()
print mrCombo_0.count()
mr_1Or0 = mr_1.union(mrCombo_0)
print mr_1Or0.take(2)
print mr_1Or0.count()
# ur_1Or0.count() == ur_1.count() + urCombo_0.count()
# 1000209 + 18530
# 1018739
# [(movie_id, rating), (num_rating)]
mr_numRating = mr_1Or0.reduceByKey(add)
print mr_numRating.take(2)
print mr_numRating.count()
# OPTION instead of using union() and then reduceByKey()
mr_1Or0 = mr_1.reduceByKey(add).rightOuterJoin(mrCombo_0)
print mr_1Or0.take(2)
print mr_1Or0.count()
[((2001, 5.0), (129, 0)), ((3654, 4.0), (266, 0))]
18530
# [(movie_id, (rating, num_rating))]
m_r_numRating = mr_numRating.map(lambda ((movie_id, rating), num_rating): (movie_id, (rating, num_rating)))
print m_r_numRating.take(2)
print m_r_numRating.count()
# [(movie_id, total_rating)]
m_totalRating = sc.parallelize(umr.map(lambda (user_id, movie_id, rating): (movie_id, rating)).countByKey().items())
print m_totalRating.take(2)
print m_totalRating.count()
# [(user_id, (total_rating, (rating, num_rating)))]
m_componentsOfProb = m_totalRating.join(m_r_numRating)
print m_componentsOfProb.take(2)
print m_componentsOfProb.count()
# [(movie_id, rating, probRI)]
probRI = m_componentsOfProb.map(lambda (movie_id, (total_rating, (rating, num_rating))):
(movie_id, rating, float(num_rating)/float(total_rating))
)
print probRI.take(2)
print probRI.count()
totalRatings = umr.count()
print totalRatings
# [(rating, 1)]
r_1 = umr.map(lambda (user_id, movie_id, rating): (rating, 1))
# [(rating, num_rating)]
r_numRating = r_1.reduceByKey(add)
# [(rating, probR)]
probR = r_numRating.mapValues(lambda num_rating: float(num_rating)/float(totalRatings))
probR.take(2)
# add probR to user_id, movie_id, rating
components = rCombo_um.join(probR)
print components.take(2)
print components.count()
# add probRU to user_id, movie_id, rating, probR
tmp_a = components.map(lambda (rating, ((user_id, movie_id), prob_r)): ((user_id, rating), (movie_id, prob_r)))
tmp_b = probRU.map(lambda (user_id, rating, prob_ru): ((user_id, rating), prob_ru))
components = tmp_a.join(tmp_b)
print components.take(2)
print components.count()
# add probRI to user_id, movie_id, rating, probR, probRU
tmp_a = components.map(lambda ( (user_id, rating), ((movie_id, prob_r), prob_ru) ):
( (movie_id, rating), (user_id, prob_r, prob_ru) )
)
tmp_b = probRI.map(lambda (movie_id, rating, prob_ri): ((movie_id, rating), prob_ri))
components = tmp_a.join(tmp_b)
print components.take(2)
print components.count()
# re-format
# [((user_id, movie_id, rating), bayes_probability)]
componentsReformat = components.map(lambda ((movie_id, rating), ((user_id, prob_r, prob_ru), prob_ri)):
((user_id, movie_id, rating), (prob_r, prob_ru, prob_ri))
)
# calculate bayes probability
bayesProb = componentsReformat.mapValues(lambda (prob_r, prob_ru, prob_ri): prob_ru * prob_ri / prob_r)
print bayesProb.take(2)
print "umr = ", umr.count()
print "probR = ", probR.count()
print "probRU = ", probRU.count()
print "probRI = ", probRI.count()
print "bayesProb = ", bayesProb.count()
# note: bayesProb.count() = umr.count() * 5
# bayesProb = umr_train * 5
1000209 * 5
# extract only user_id, movie_id in umr_test from bayes_prob
# remember that we have to extract the bayes_prob for each rating too
# [(user_id, movie_id, rating)]
print "umrCombo_test.count() = ", umrCombo_test.count()
# [((user_id, movie_id, rating), bayes_prob)]
print "bayesProb.count() = ", bayesProb.count()
# [((user_id, movie_id), (rating, bayes_prob))]
tmp_a = umrCombo_test.map(lambda (user_id, movie_id, rating): ((user_id, movie_id, rating), 1))
tmp_b = bayesProb
bayesProb_test = tmp_a.join(tmp_b).map(
lambda ((user_id, movie_id, rating), (_, bayes_prob)): ((user_id, movie_id), (rating, bayes_prob)))
print bayesProb_test.take(2)
print bayesProb_test.count() # == umrCombo_test.count()
# [((user_id, movie_id), [(rating_1, bayes_prob_1), (rating_2, bayes_prob_2), ..., (rating_5, bayes_prob_5)])]
um_allBayesProb = bayesProb_test.mapValues(lambda value: [value]).reduceByKey(lambda a, b: a + b)
print um_allBayesProb.take(2)
print um_allBayesProb.count() # == bayesProb_test.count()/5 == umr_test.count() == 100234
um_allBayesProb = um_allBayesProb.mapValues(lambda value: sorted(value, key=lambda(rating, bayes_prob): rating))
print um_allBayesProb.take(2)
print um_allBayesProb.count()
def calculate_bayes_map(value):
# extract the bayes_prob
bayesProbList = [x[1] for x in value]
# define the argmax, return the index
argmax = bayesProbList.index(max(bayesProbList))
return argmax
predicted_bayes_map = um_allBayesProb.mapValues(calculate_bayes_map)
print predicted_bayes_map.take(2)
print predicted_bayes_map.count()
# [(test_user_id, test_movie_id), (actual_rating, predicted_rating)]
tmp_a = umr_test.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating))
tmp_b = predicted_bayes_map
um_testBayesMap = tmp_a.join(tmp_b)
print um_testBayesMap.take(2)
print um_testBayesMap.count()
# [(train_user_id, train_movie_id), (actual_rating, predicted_rating)]
tmp_a = umr_train.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating))
tmp_b = predicted_bayes_map
um_trainBayesMap = tmp_a.join(tmp_b)
a, b, c = umr.randomSplit(weights, seed)
print a.count()
print b.count()
print c.count()
print a.take(1)
print b.take(1)
# calculate RMSE and MAE
# convert into two vectors where
# one vector describes the actual ratings in the format [(user_id, movie_id, actual_rating)]
# second vector describes the predicted ratings in the format [(user_id, movie_id, predicted_rating)]
actual = um_testBayesMap.map(
lambda((test_user_id, test_movie_id), (actual_rating, predicted_rating)): (test_user_id, test_movie_id, actual_rating)
)
predicted = um_testBayesMap.map(
lambda((test_user_id, test_movie_id), (actual_rating, predicted_rating)): (test_user_id, test_movie_id, predicted_rating)
)
print "actual:\n", actual.take(5)
print "predicted:\n", predicted.take(5)
rmse = pm.calculate_rmse_using_rdd(actual, predicted)
print "rmse = ", rmse
mae = pm.calculate_mae_using_rdd(actual, predicted)
print "mae = ", mae
# y_test
y_test = um_testBayesMap.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating)
)
# y_train
y_train = um_trainBayesMap.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating)
)
# y_predicted
y_predicted = um_testBayesMap.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating)
)
pm_results_bayes_map = pm.get_perform_metrics(y_test, y_train, y_predicted, content_array, sqlCtx)
from pprint import pprint
pprint(pm_results_bayes_map)
def calculate_bayes_mse(value):
predicted = 0.
for rating, bayes_prob in value:
predicted += rating * bayes_prob
return predicted
predicted_bayes_mse = um_allBayesProb.mapValues(calculate_bayes_mse)
print predicted_bayes_mse.take(2)
print predicted_bayes_mse.count()
# [(test_user_id, test_movie_id), (predicted_rating, actual_rating)]
tmp_a = umr_test.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating))
tmp_b = predicted_bayes_mse
um_testBayesMse = tmp_a.join(tmp_b)
print um_testBayesMse.take(2)
print um_testBayesMse.count()
# calculate RMSE and MAE
# convert into two vectors where
# one vector describes the actual ratings in the format [(user_id, movie_id, actual_rating)]
# second vector describes the predicted ratings in the format [(user_id, movie_id, predicted_rating)]
actual = um_testBayesMse.map(
lambda((test_user_id, test_movie_id), (actual_rating, predicted_rating)): (test_user_id, test_movie_id, actual_rating)
)
predicted = um_testBayesMse.map(
lambda((test_user_id, test_movie_id), (actual_rating, predicted_rating)): (test_user_id, test_movie_id, predicted_rating)
)
print "actual:\n", actual.take(5)
print "predicted:\n", predicted.take(5)
rmse = pm.calculate_rmse_using_rdd(actual, predicted)
print "rmse = ", rmse
mae = pm.calculate_mae_using_rdd(actual, predicted)
print "mae = ", mae
# y_test
y_test = um_testBayesMse.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating)
)
# y_train
tmp_a = umr_train.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating))
tmp_b = predicted_bayes_mse
um_trainBayesMse = tmp_a.join(tmp_b)
y_train = um_trainBayesMse.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating)
)
# y_predicted
y_predicted = um_testBayesMse.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating)
)
pm_results_bayes_mse = pm.get_perform_metrics(y_test, y_train, y_predicted, content_array, sqlCtx)
from pprint import pprint
pprint(pm_results_bayes_mse)
# TODO: fix this the same as argmax
def calculate_bayes_mae(value):
sumOfProductList = []
for rating, bayes_prob in value:
sumOfProduct = 0.
for i in range(1, 6):
sumOfProduct += bayes_prob * abs(rating - i)
sumOfProductList.append(sumOfProduct)
argmin = sumOfProductList.index(min(sumOfProductList))
return argmin
predicted_bayes_mae = um_allBayesProb.mapValues(calculate_bayes_mae)
print predicted_bayes_mae.take(2)
print predicted_bayes_mae.count()
# [(test_user_id, test_movie_id), (predicted_rating, actual_rating)]
tmp_a = umr_test.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating))
tmp_b = predicted_bayes_map
um_testBayesMae = tmp_a.join(tmp_b)
print um_testBayesMae.take(2)
print um_testBayesMae.count()
# calculate RMSE and MAE
from src.algorithms import performance_metrics as pm
# convert into two vectors where
# one vector describes the actual ratings in the format [(user_id, movie_id, actual_rating)]
# second vector describes the predicted ratings in the format [(user_id, movie_id, predicted_rating)]
actual = um_testBayesMae.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating)
)
predicted = um_testBayesMae.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating)
)
print "actual:\n", actual.take(5)
print "predicted:\n", predicted.take(5)
rmse = pm.calculate_rmse_using_rdd(actual, predicted)
print "rmse = ", rmse
mae = pm.calculate_mae_using_rdd(actual, predicted)
print "mae = ", mae
# y_test
y_test = um_testBayesMae.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating)
)
# y_train
tmp_a = umr_train.map(lambda (user_id, movie_id, rating): ((user_id, movie_id), rating))
tmp_b = predicted_bayes_mae
um_trainBayesMae = tmp_a.join(tmp_b)
y_train = um_trainBayesMae.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, actual_rating)
)
# y_predicted
y_predicted = um_testBayesMae.map(
lambda((test_user_id, test_movie_id), (predicted_rating, actual_rating)): (test_user_id, test_movie_id, predicted_rating)
)
pm_results_bayes_mae = get_results(y_test, y_train, y_predicted, content_array, sqlCtx)
pprint(pm_results_bayes_mae)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Once again, read() accepts a string containing a path to either a single DfR dataset, or a directory containing several. Here, "DfR dataset" refers to the folder containing the file "citations.xml", and the contents of that folder.
Step2: Combining DfR and WoS data
Step3: Both of these datasets are for the Journal of the History of Biology. But note that the WoS and DfR corpora have different numbers of Papers
Step4: Then import merge() from tethne.readers
Step5: We then create a new Corpus by passing both Corpus objects to merge(). If there is conflicting information in the two corpora, the first Corpus gets priority.
Step6: merge() has combined data where possible, and discarded any duplicates in the original datasets.
Step7: FeatureSets
Step8: Note that citations and authors are also FeatureSets. In fact, the majority of network-building functions in Tethne operate on FeatureSets -- including the coauthors() and bibliographic_coupling() functions that we used in the WoS notebook.
Step9: The index contains our "vocabulary"
Step10: We can use the feature_distribution() method of our Corpus to look at the distribution of words over time. In the example below I used MatPlotLib to visualize the distribution.
Step11: If we add the argument mode='documentCounts', we get the number of documents in which 'evolutionary' occurs.
Step12: Note that we can look how documents themselves are distributed using the distribution() method.
Step13: So, putting these together, we can normalize our feature_distribution() data to get a sense of the relative use of the word 'evolution'.
Step14: Topic Modeling with DfR wordcounts
Step15: We then need to define what elements to keep, and what elements to discard. We will use a function that will evaluate whether or not a word is in our stoplist. The function should take three arguments
Step16: We apply the stoplist using the transform() method. FeatureSets are not modified in place; instead, a new FeatureSet is generated that reflects the specified changes. We'll call the new FeatureSet 'wordcounts_filtered'.
Step17: There should be significantly fewer words in our new "wordcounts_filtered" FeatureSet.
Step18: The LDA topic model
Step19: Now we'll create a new LDAModel for our Corpus. The featureset_name parameter tells the LDAModel which FeatureSet we want to use. We'll use our filtered wordcounts.
Step20: Next we'll fit the model. We need to tell MALLET how many topics to fit (the hyperparameter Z), and how many iterations (max_iter) to perform. This step may take a little while, depending on the size of your corpus.
Step21: You can inspect the inferred topics using the model's print_topics() method. By default, this will print the top ten words for each topic.
Step22: We can also look at the representation of a topic over time using the topic_over_time() method. In the example below we'll print the first five of the topics on the same plot.
Step23: Generating networks from topic models
Step24: The terms function generates a network of words connected on the basis of shared affinity with a topic. If two words i and j are both associated with a topic z with $\Phi(i|z) >= 0.01$ and $\Phi(j|z) >= 0.01$, then an edge is drawn between them.
Step25:
|
<ASSISTANT_TASK:>
Python Code:
from tethne.readers import dfr
dfr_corpus = dfr.read('/Users/erickpeirson/Dropbox/HSS ThatCamp Workshop/sample_data/DfR')
from tethne.readers import wos
wos_corpus = wos.read('/Users/erickpeirson/Dropbox/HSS ThatCamp Workshop/sample_data/wos')
len(dfr_corpus), len(wos_corpus)
from tethne.readers import merge
corpus = merge(dfr_corpus, wos_corpus)
len(corpus)
corpus.features
corpus.features['wordcounts'].features.items()[0] # Just show data for the first Paper.
print 'There are %i words in the wordcounts featureset' % len(corpus.features['wordcounts'].index)
plt.figure(figsize=(10, 5))
plt.bar(*corpus.feature_distribution('wordcounts', 'evolutionary')) # <-- The action.
plt.ylabel('Frequency of the word ``evolutionary`` in this Corpus')
plt.xlabel('Publication Date')
plt.show()
plt.figure(figsize=(10, 5))
plt.bar(*corpus.feature_distribution('wordcounts', 'evolutionary', mode='documentCounts')) # <-- The action.
plt.ylabel('Documents containing ``evolutionary``')
plt.xlabel('Publication Date')
plt.show()
plt.figure(figsize=(10, 5))
plt.bar(*corpus.distribution()) # <-- The action.
plt.ylabel('Number of Documents')
plt.xlabel('Publication Date')
plt.show()
dates, N_evolution = corpus.feature_distribution('wordcounts', 'evolutionary', mode='documentCounts')
dates, N = corpus.distribution()
normalized_frequency = [f/N[i] for i, f in enumerate(N_evolution)]
plt.figure(figsize=(10, 5))
plt.bar(dates, normalized_frequency) # <-- The action.
plt.ylabel('Proportion of documents containing ``evolutionary``')
plt.xlabel('Publication Date')
plt.show()
from nltk.corpus import stopwords
stoplist = stopwords.words()
def apply_stoplist(f, v, c, dc):
if f in stoplist or dc > 500 or dc < 3 or len(f) < 4:
return None # Discard the element.
return v
corpus.features['wordcounts_filtered'] = corpus.features['wordcounts'].transform(apply_stoplist)
print 'There are %i words in the wordcounts featureset' % len(corpus.features['wordcounts'].index)
print 'There are %i words in the wordcounts_filtered featureset' % len(corpus.features['wordcounts_filtered'].index)
from tethne import LDAModel
model = LDAModel(corpus, featureset_name='wordcounts_filtered')
model.fit(Z=50, max_iter=500)
model.print_topics()
plt.figure(figsize=(15, 5))
for k in xrange(5): # Generates numbers k in [0, 4].
x, y = model.topic_over_time(k) # Gets topic number k.
plt.plot(x, y, label='topic {0}'.format(k), lw=2, alpha=0.7)
plt.legend(loc='best')
plt.show()
from tethne.networks import topics
termGraph = topics.terms(model, threshold=0.01)
termGraph.order(), termGraph.size()
termGraph.name = ''
from tethne.writers.graph import to_graphml
to_graphml(termGraph, '/Users/erickpeirson/Desktop/topic_terms.graphml')
topicCoupling = topics.topic_coupling(model, threshold=0.2)
print '%i nodes and %i edges' % (topicCoupling.order(), topicCoupling.size())
to_graphml(topicCoupling, '/Users/erickpeirson/Desktop/lda_topicCoupling.graphml')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span id="plat_prod">Choose Platforms and Products ▴</span>
Step2: Choose products
Step3: <span id="extents">Get the Extents of the Cube ▴</span>
Step4: Visualize the available area
Step5: <span id="define_extents">Define the Extents of the Analysis ▴</span>
Step6: Visualize the selected area
Step7: <span id="calc_cloud_coverage">Calculate the Cloud Coverage Percentage for Each Pixel▴</span>
Step8: <span id="create_cloud_cov_table">Create a Table of Cloud Coverage Percentage for Each Date▴</span>
Step9: <span id="plot_cloud_cov">Create a Plot of Cloud Coverage Percentage for Each Date▴</span>
Step10: <span id="pct_clear_img">Create an Image of the Percent of Clear Views Per Pixel for the Entire Time Period▴</span>
Step11: <span id="rgb_time_slice">Review an RGB Scene for a Selected Time Slice▴</span>
Step12: <p style="color
|
<ASSISTANT_TASK:>
Python Code:
# Enable importing of utilities.
import sys
import os
sys.path.append(os.environ.get('NOTEBOOK_ROOT'))
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
# Load Data Cube Configuration
import datacube
import utils.data_cube_utilities.data_access_api as dc_api
api = dc_api.DataAccessApi()
dc = api.dc
from datacube.utils.aws import configure_s3_access
configure_s3_access(requester_pays=True)
# Get available products
products_info = dc.list_products()
# List LANDSAT 7 products
print("LANDSAT 7 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_7"]
# List LANDSAT 8 products
print("LANDSAT 8 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_8"]
# These are the platforms (satellites) and products (datacube sets)
# used for this demonstration. Uncomment only 1 set.
platform = 'LANDSAT_8'
product = 'ls8_usgs_sr_scene'
collection = 'c1'
level = 'l2'
# platform = 'LANDSAT_8'
# product = 'ls8_l2_c2'
# collection = 'c2'
# level = 'l2'
band_no_data_values = dc.list_measurements().loc[product, 'nodata']
from utils.data_cube_utilities.dc_load import get_product_extents
from utils.data_cube_utilities.dc_time import dt_to_str
full_lat, full_lon, min_max_dates = get_product_extents(api, platform, product)
# Print the extents of the data.
print("Latitude Extents:", full_lat)
print("Longitude Extents:", full_lon)
print("Time Extents:", list(map(dt_to_str, (min_max_dates[0], min_max_dates[1]))))
from utils.data_cube_utilities.dc_display_map import display_map
display_map(full_lat, full_lon)
# Select an analysis region (Lat-Lon) within the extents listed above.
# Select a time period (Min-Max) within the extents listed above (Year-Month-Day)
# This region and time period will be used for the cloud assessment
# Nairobi, Kenya
latitude = (-1.3407, -1.2809)
longitude = (36.7640, 36.9206)
# Mombasa, Kenya
# latitude = (-4.12, -3.975)
# longitude = (39.55, 39.7)
# Mau Forest - Western Kenya
# latitude = (-0.13406, 0.21307)
# longitude = (35.28322, 35.56681)
# Dar es Salaam, Tanzania
# latitude = (-7.0, -6.7)
# longitude = (39.1, 39.4)
# Lake Sulunga, Tanzania
# latitude = (-6.2622, -5.8822)
# longitude = (34.9802, 35.3602)
# Freetown, Sierra Leone
# latitude = (8.3267, 8.5123)
# longitude = (-13.3109, -13.1197 )
# Vietnam
# latitude = (10.9358, 11.0358)
# longitude = (107.1899, 107.2899)
# Ghanas
# latitude = (5.5, 5.7) # Accra
# longitude = (-0.4, 0.0) # Accra
# Time Period
time_extents = ('2016-01-01', '2016-01-31')
display_map(latitude,longitude)
from utils.data_cube_utilities.clean_mask import landsat_clean_mask_invalid, landsat_qa_clean_mask
def build_cloud_coverage_table_landsat(product,
platform,
collection,
level,
latitude,
longitude,
time = None,
dc = None,
extra_band = 'green',
band_no_data_values = None):
dc = dc if dc is not None else datacube.Datacube(app = "")
load_params = dict(platform=platform,
product=product,
latitude = latitude,
longitude = longitude,
measurements = [extra_band, 'pixel_qa'],
group_by='solar_day')
if time is not None:
load_params["time"] = time
landsat_dataset = dc.load(**load_params)
clean_mask = landsat_qa_clean_mask(landsat_dataset, platform=platform,
collection=collection, level=level) & \
landsat_clean_mask_invalid(landsat_dataset, platform, collection, level)
data_mask = xr.full_like(clean_mask, True)
if band_no_data_values is not None:
for data_var_name in landsat_dataset.data_vars:
band_data_mask = landsat_dataset[data_var_name] != band_no_data_values[data_var_name]
data_mask = data_mask & band_data_mask
clean_data_mask = clean_mask & data_mask
landsat_dataset = landsat_dataset.where(clean_data_mask)
times = list(landsat_dataset.time.values)
scene_slice_list = list(map(lambda t: landsat_dataset.sel(time = str(t)), times))
clean_data_mask_list = [clean_data_mask.sel(time=str(time)).values for time in clean_data_mask.time.values]
# Calculate the percentage of all pixels which are not cloud.
percentage_list = [clean_data_mask.mean()*100 for clean_data_mask in clean_data_mask_list]
clean_pixel_count_list = list(map(np.sum, clean_data_mask_list))
data = {"times": times,
"clean_percentage": percentage_list,
"clean_count": clean_pixel_count_list }
return landsat_dataset, pd.DataFrame(data=data, columns=["times", "clean_percentage", "clean_count"]), \
clean_mask, data_mask, clean_data_mask
extra_band = 'green'
landsat_dataset, coverage_table, clean_mask, data_mask, clean_data_mask = \
build_cloud_coverage_table_landsat(product = product,
platform = platform,
collection = collection,
level = level,
latitude = latitude,
longitude = longitude,
time = time_extents,
extra_band=extra_band,
band_no_data_values=band_no_data_values)
pd.set_option('display.max_rows', len(coverage_table))
coverage_table
plt.figure(figsize = (15,5))
plt.plot(coverage_table["times"].values, coverage_table["clean_percentage"].values, 'bo', markersize=8)
plt.title("Percentage of Clean (not cloud) Pixels for Each Time Slice")
plt.show()
# We are really plotting the fraction of times that are not no_data which are clear.
# This is done to account for regions filled with no_data - such as when querying across path/rows.
clear_and_data_per_px = clean_data_mask.sum(dim='time')
data_per_px = data_mask.sum(dim='time')
frac_clear_per_data_per_px = clear_and_data_per_px / data_per_px
num_cbar_ticks = 8 # The number of ticks to use for the colorbar.
quad_mesh = (frac_clear_per_data_per_px).plot(figsize=(12,10),cmap = "RdYlGn", vmin=0, vmax=1)
plt.show()
print("Percent of pixels with data: {:.2%}".format(data_mask.mean().values))
print("Percent of pixels that are clear: {:.2%}".format(clean_mask.mean().values))
print("Percent of pixels that are clear and have data: {:.2%}".format(clean_data_mask.mean().values))
(frac_clear_per_data_per_px == 0).sum() / frac_clear_per_data_per_px.count()
print("Number of pixels which have no non-cloud data:", (frac_clear_per_data_per_px == 0).sum().values)
print("Total number of pixels:", frac_clear_per_data_per_px.count().values)
# Load the data to create an RGB image
landsat_dataset = dc.load(latitude = latitude,
longitude = longitude,
platform = platform,
time = time_extents,
product = product,
measurements = ['red', 'green', 'blue'],
group_by='solar_day')
from utils.data_cube_utilities.dc_rgb import rgb
# CHANGE HERE >>>>>>>>>>>>>>
time_ind = 0 # The acquisition to select. The first acquisition has index 0.
# Select one of the time slices and create an RGB image.
# Time slices are numbered from 0 to x and shown in the table above
# Review the clean_percentage values above to select scenes with few clouds
# Clouds will be visible in WHITE and cloud-shadows will be visible in BLACK
rgb(landsat_dataset.isel(time=time_ind), width=12)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The source dataset
Step2: Create the training data table
Step3: Verify table creation
Step4: Baseline Model
Step5: Note, the query takes several minutes to complete. After the first iteration is complete, your model (baseline_model) appears in the navigation panel of the BigQuery web UI. Because the query uses a CREATE MODEL statement to create a model, you do not see query results.
Step6: NOTE
Step7: Model 1
Step8: Once the training is done, visit the BigQuery Cloud Console and look at the model that has been trained. Then, come back to this notebook.
Step9: Here we run a SQL query to take the SQRT() of the mean squared error as your loss metric for evaluation for the benchmark_model.
Step10: Model 2
Step11: Model 3
|
<ASSISTANT_TASK:>
Python Code:
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
%%bash
# Create a BigQuery dataset for feat_eng if it doesn't exist
datasetexists=$(bq ls -d | grep -w feat_eng)
if [ -n "$datasetexists" ]; then
echo -e "BigQuery dataset already exists, let's not recreate it."
else
echo "Creating BigQuery dataset titled: feat_eng"
bq --location=US mk --dataset \
--description 'Taxi Fare' \
$PROJECT:feat_eng
echo "\nHere are your current datasets:"
bq ls
fi
%%bigquery
CREATE OR REPLACE TABLE
feat_eng.feateng_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
passenger_count*1.0 AS passengers,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat
FROM
`nyc-tlc.yellow.trips`
WHERE
MOD(ABS(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING))), 10000) = 1
AND fare_amount >= 2.5
AND passenger_count > 0
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
%%bigquery
# LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT
*
FROM
feat_eng.feateng_training_data
LIMIT
0
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.baseline_model OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
pickup_datetime,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
feat_eng.feateng_training_data
%%bigquery
# Eval statistics on the held out data.
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.baseline_model)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.baseline_model)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.baseline_model)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_1 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
pickup_datetime,
EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS dayofweek,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
feat_eng.feateng_training_data
%%bigquery
SELECT
*,
SQRT(loss) AS rmse
FROM
ML.TRAINING_INFO(MODEL feat_eng.model_1)
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_1)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_1)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_2 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
#pickup_datetime,
EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS dayofweek,
EXTRACT(HOUR
FROM
pickup_datetime) AS hourofday,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_2)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_2)
%%bigquery
CREATE OR REPLACE MODEL
feat_eng.model_3 OPTIONS (model_type='linear_reg',
input_label_cols=['fare_amount']) AS
SELECT
fare_amount,
passengers,
#pickup_datetime,
#EXTRACT(DAYOFWEEK FROM pickup_datetime) AS dayofweek,
#EXTRACT(HOUR FROM pickup_datetime) AS hourofday,
CONCAT(CAST(EXTRACT(DAYOFWEEK
FROM
pickup_datetime) AS STRING), CAST(EXTRACT(HOUR
FROM
pickup_datetime) AS STRING)) AS hourofday,
pickuplon,
pickuplat,
dropofflon,
dropofflat
FROM
`feat_eng.feateng_training_data`
%%bigquery
SELECT
*
FROM
ML.EVALUATE(MODEL feat_eng.model_3)
%%bigquery
SELECT
SQRT(mean_squared_error) AS rmse
FROM
ML.EVALUATE(MODEL feat_eng.model_3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load software and filenames definitions
Step2: Data folder
Step3: List of data files
Step4: Data load
Step5: Laser alternation selection
Step6: We need to define some parameters
Step7: We should check if everithing is OK with an alternation histogram
Step8: If the plot looks good we can apply the parameters with
Step9: Measurements infos
Step10: Or check the measurements duration
Step11: Compute background
Step12: Burst search and selection
Step13: Fret fit
Step14: Weighted mean of $E$ of each burst
Step15: Gaussian fit (no weights)
Step16: Gaussian fit (using burst size as weights)
Step17: Stoichiometry fit
Step18: The Maximum likelihood fit for a Gaussian population is the mean
Step19: Computing the weighted mean and weighted standard deviation we get
Step20: Save data to file
Step21: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
Step22: This is just a trick to format the different variables
|
<ASSISTANT_TASK:>
Python Code:
ph_sel_name = "None"
data_id = "27d"
# data_id = "7d"
from fretbursts import *
init_notebook()
from IPython.display import display
data_dir = './data/singlespot/'
import os
data_dir = os.path.abspath(data_dir) + '/'
assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
from glob import glob
file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)
## Selection for POLIMI 2012-11-26 datatset
labels = ['17d', '27d', '7d', '12d', '22d']
files_dict = {lab: fname for lab, fname in zip(labels, file_list)}
files_dict
data_id
d = loader.photon_hdf5(filename=files_dict[data_id])
d.ph_times_t, d.det_t
d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)
plot_alternation_hist(d)
loader.alex_apply_period(d)
d
d.time_max
d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)
dplot(d, timetrace_bg)
d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa
d_orig = d
d = bext.burst_search_and_gate(d, m=10, F=7)
assert d.dir_ex == 0
assert d.leakage == 0
print(d.ph_sel)
dplot(d, hist_fret);
# if data_id in ['7d', '27d']:
# ds = d.select_bursts(select_bursts.size, th1=20)
# else:
# ds = d.select_bursts(select_bursts.size, th1=30)
ds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)
n_bursts_all = ds.num_bursts[0]
def select_and_plot_ES(fret_sel, do_sel):
ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)
ds_do = ds.select_bursts(select_bursts.ES, **do_sel)
bpl.plot_ES_selection(ax, **fret_sel)
bpl.plot_ES_selection(ax, **do_sel)
return ds_fret, ds_do
ax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)
if data_id == '7d':
fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)
do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '12d':
fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '17d':
fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '22d':
fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '27d':
fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
bandwidth = 0.03
n_bursts_fret = ds_fret.num_bursts[0]
n_bursts_fret
dplot(ds_fret, hist2d_alex, scatter_alpha=0.1);
nt_th1 = 50
dplot(ds_fret, hist_size, which='all', add_naa=False)
xlim(-0, 250)
plt.axvline(nt_th1)
Th_nt = np.arange(35, 120)
nt_th = np.zeros(Th_nt.size)
for i, th in enumerate(Th_nt):
ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)
nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th
plt.figure()
plot(Th_nt, nt_th)
plt.axvline(nt_th1)
nt_mean = nt_th[np.where(Th_nt == nt_th1)][0]
nt_mean
E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')
E_fitter = ds_fret.E_fitter
E_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
E_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))
E_fitter.fit_res[0].params.pretty_print()
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(E_fitter, ax=ax[0])
mfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))
display(E_fitter.params*100)
# ds_fret.add(E_fitter = E_fitter)
# dplot(ds_fret, hist_fret_kde, weights='size', bins=np.r_[-0.2:1.2:bandwidth], bandwidth=bandwidth);
# plt.axvline(E_pr_fret_kde, ls='--', color='r')
# print(ds_fret.ph_sel, E_pr_fret_kde)
ds_fret.fit_E_m(weights='size')
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')
E_kde_w = E_fitter.kde_max_pos[0]
E_gauss_w = E_fitter.params.loc[0, 'center']
E_gauss_w_sig = E_fitter.params.loc[0, 'sigma']
E_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))
E_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr
E_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr
S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)
S_fitter = ds_fret.S_fitter
S_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
S_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(S_fitter, ax=ax[0])
mfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))
display(S_fitter.params*100)
S_kde = S_fitter.kde_max_pos[0]
S_gauss = S_fitter.params.loc[0, 'center']
S_gauss_sig = S_fitter.params.loc[0, 'sigma']
S_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))
S_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr
S_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr
S = ds_fret.S[0]
S_ml_fit = (S.mean(), S.std())
S_ml_fit
weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)
S_mean = np.dot(weights, S)/weights.sum()
S_std_dev = np.sqrt(
np.dot(weights, (S - S_mean)**2)/weights.sum())
S_wmean_fit = [S_mean, S_std_dev]
S_wmean_fit
sample = data_id
variables = ('sample n_bursts_all n_bursts_fret '
'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '
'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '
'nt_mean\n')
variables_csv = variables.replace(' ', ',')
fmt_float = '{%s:.6f}'
fmt_int = '{%s:d}'
fmt_str = '{%s}'
fmt_dict = {**{'sample': fmt_str},
**{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}
var_dict = {name: eval(name) for name in variables.split()}
var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n'
data_str = var_fmt.format(**var_dict)
print(variables_csv)
print(data_str)
# NOTE: The file name should be the notebook name but with .csv extension
with open('results/usALEX-5samples-PR-raw-AND-gate.csv', 'a') as f:
f.seek(0, 2)
if f.tell() == 0:
f.write(variables_csv)
f.write(data_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utility Methods
Step2: Preparing data set sweep
Step3: Preparing the method/parameter combinations and generating commands
Step4: Preparing the pipelines
Step5: Test
Step6: Do the Sweep
Step7: A quick sanity check never hurt anyone...
Step8: Generate per-method biom tables
Step9: Move result files to repository
|
<ASSISTANT_TASK:>
Python Code:
from os.path import join, exists, split, sep, expandvars
from os import makedirs, getpid
from glob import glob
from shutil import rmtree
import csv
import json
import tempfile
from itertools import product
from qiime2.plugins import feature_classifier
from qiime2 import Artifact
from joblib import Parallel, delayed
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from q2_feature_classifier.classifier import spec_from_pipeline
from q2_types.feature_data import DNAIterator
from pandas import DataFrame
from tax_credit.framework_functions import (
gen_param_sweep, generate_per_method_biom_tables, move_results_to_repository)
project_dir = expandvars('$HOME/Desktop/projects/short-read-tax-assignment/')
analysis_name = 'mock-community'
data_dir = join(project_dir, 'data', analysis_name)
reference_database_dir = expandvars("$HOME/Desktop/ref_dbs/")
results_dir = expandvars("$HOME/Desktop/projects/mock-community/")
# *** one glaring flaw here is that generate_pipeline_sweep iterates
# *** through all method_parameters_combinations and reference_dbs
# *** and hence will generate training sets for each combo even if
# *** not all are called by commands in sweep. This is not an issue
# *** if sweep uses all classifiers but is inconvenient if attempting
# *** to test on a subset of sweep. Need to explicitly set all inputs!
def train_and_run_classifier(method_parameters_combinations, reference_dbs,
pipelines, sweep, verbose=False, n_jobs=4):
'''Train and run q2-feature-classifier across a parameter sweep.
method_parameters_combinations: dict of dicts of lists
Classifier methods to run and their parameters/values to sweep
Format: {method_name: {'parameter_name': [parameter_values]}}
reference_dbs: dict of tuples
Reference databases to use for classifier training.
Format: {database_name: (ref_seqs, ref_taxonomy)}
pipelines: dict
Classifier pipelines to use for training each method.
Format: {method_name: sklearn.pipeline.Pipeline}
sweep: list of tuples
output of gen_param_sweep(), format:
(parameter_output_dir, input_dir, reference_seqs, reference_tax, method, params)
n_jobs: number of jobs to run in parallel.
'''
# train classifier once for each pipeline param combo
for method, db, pipeline_param, subsweep in generate_pipeline_sweep(
method_parameters_combinations, reference_dbs, sweep):
ref_reads, ref_taxa = reference_dbs[db]
# train classifier
classifier = train_classifier(
ref_reads, ref_taxa, pipeline_param, pipelines[method], verbose=verbose)
# run classifier. Only run in parallel once classifier is trained,
# to minimize memory usage (don't want to train large refs in parallel)
Parallel(n_jobs=n_jobs)(delayed(run_classifier)(
classifier, output_dir, input_dir, split_params(params)[0], verbose=verbose)
for output_dir, input_dir, rs, rt, mt, params in subsweep)
def generate_pipeline_sweep(method_parameters_combinations, reference_dbs, sweep):
'''Generate pipeline parameters for each classifier training step'''
# iterate over parameters
for method, params in method_parameters_combinations.items():
# split out pipeline parameters
classifier_params, pipeline_params = split_params(params)
# iterate over reference dbs
for db, refs in reference_dbs.items():
# iterate over all pipeline parameter combinations
for param_product in product(*[params[id_] for id_ in pipeline_params]):
# yield parameter combinations to use for a each classifier
pipeline_param = dict(zip(pipeline_params, param_product))
subsweep = [p for p in sweep if split_params(p[5])[1]
== pipeline_param and p[2] == refs[0]]
yield method, db, pipeline_param, subsweep
def train_classifier(ref_reads, ref_taxa, params, pipeline, verbose=False):
ref_reads = Artifact.load(ref_reads)
ref_taxa = Artifact.load(ref_taxa)
pipeline.set_params(**params)
spec = json.dumps(spec_from_pipeline(pipeline))
if verbose:
print(spec)
classifier = feature_classifier.methods.fit_classifier(ref_reads, ref_taxa, spec)
#return classifier.classifier
def run_classifier(classifier, output_dir, input_dir, params, verbose=False):
# Classify the sequences
rep_seqs = Artifact.load(join(input_dir, 'rep_seqs.qza'))
if verbose:
print(output_dir)
classification = feature_classifier.methods.classify(rep_seqs, classifier, **params)
# Save the results
makedirs(output_dir, exist_ok=True)
output_file = join(output_dir, 'rep_set_tax_assignments.txt')
dataframe = classification.classification.view(DataFrame)
dataframe.to_csv(output_file, sep='\t', header=False)
def split_params(params):
classifier_params = feature_classifier.methods.\
classify.signature.parameters.keys()
pipeline_params = {k:v for k, v in params.items()
if k not in classifier_params}
classifier_params = {k:v for k, v in params.items()
if k in classifier_params}
return classifier_params, pipeline_params
dataset_reference_combinations = [
('mock-1', 'gg_13_8_otus'), # formerly S16S-1
('mock-2', 'gg_13_8_otus'), # formerly S16S-2
('mock-3', 'gg_13_8_otus'), # formerly Broad-1
('mock-4', 'gg_13_8_otus'), # formerly Broad-2
('mock-5', 'gg_13_8_otus'), # formerly Broad-3
# ('mock-6', 'gg_13_8_otus'), # formerly Turnbaugh-1
('mock-7', 'gg_13_8_otus'), # formerly Turnbaugh-2
('mock-8', 'gg_13_8_otus'), # formerly Turnbaugh-3
('mock-9', 'unite_20.11.2016_clean_fullITS'), # formerly ITS1
('mock-10', 'unite_20.11.2016_clean_fullITS'), # formerly ITS2-SAG
('mock-12', 'gg_13_8_otus'), # Extreme
# ('mock-13', 'gg_13_8_otus_full16S'), # kozich-1
# ('mock-14', 'gg_13_8_otus_full16S'), # kozich-2
# ('mock-15', 'gg_13_8_otus_full16S'), # kozich-3
('mock-16', 'gg_13_8_otus'), # schirmer-1
]
reference_dbs = {'gg_13_8_otus' : (join(reference_database_dir, 'gg_13_8_otus/rep_set/99_otus_515f-806r.qza'),
join(reference_database_dir, 'gg_13_8_otus/taxonomy/99_otu_taxonomy.qza')),
# 'gg_13_8_otus_full16S' : (join(reference_database_dir, 'gg_13_8_otus/rep_set/99_otus.qza'),
# join(reference_database_dir, 'gg_13_8_otus/taxonomy/99_otu_taxonomy.qza')),
'unite_20.11.2016_clean_fullITS' : (join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_refs_qiime_ver7_99_20.11.2016_dev_clean.qza'),
join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_taxonomy_qiime_ver7_99_20.11.2016_dev_clean.qza')),
# 'unite_20.11.2016' : (join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_refs_qiime_ver7_99_20.11.2016_dev_BITSf-B58S3r_trim250.qza'),
# join(reference_database_dir, 'sh_qiime_release_20.11.2016/developer/sh_taxonomy_qiime_ver7_99_20.11.2016_dev.qza'))
}
method_parameters_combinations = {
'q2-multinomialNB': {'confidence': [0.0, 0.2, 0.4, 0.6, 0.8],
'classify__alpha': [0.001, 0.01, 0.1],
'feat_ext__ngram_range': [[8,8], [12,12], [20,20]]},
'q2-logisticregression': {'classify__solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag']},
'q2-randomforest': {'classify__max_features': ['sqrt', 'None'],
'classify__n_estimators': [5, 10, 100]}
}
# pipeline params common to all classifiers are set here
hash_params = dict(
analyzer='char_wb', n_features=8192, non_negative=True, ngram_range=[8, 8])
# any params common to all classifiers can be set here
classify_params = dict()
def build_pipeline(classifier, hash_params, classify_params):
return Pipeline([
('feat_ext', HashingVectorizer(**hash_params)),
('classify', classifier(**classify_params))])
# Now fit the pipelines.
pipelines = {'q2-multinomialNB': build_pipeline(
MultinomialNB, hash_params, {'fit_prior': False}),
'q2-logisticregression': build_pipeline(
LogisticRegression, hash_params, classify_params),
'q2-randomforest': build_pipeline(
RandomForestClassifier, hash_params, classify_params)}
dataset_reference_combinations = [
('mock-3', 'gg_13_8_otus'), # formerly Broad-1
]
method_parameters_combinations = {
'q2-randomforest': {'classify__max_features': ['sqrt'],
'classify__n_estimators': [5]}
}
reference_dbs = {'gg_13_8_otus' : (join(reference_database_dir, 'gg_13_8_otus/rep_set/99_otus_515f-806r.qza'),
join(reference_database_dir, 'gg_13_8_otus/taxonomy/99_otu_taxonomy.qza'))}
sweep = gen_param_sweep(data_dir, results_dir, reference_dbs,
dataset_reference_combinations,
method_parameters_combinations)
sweep = list(sweep)
print(len(sweep))
sweep[0]
train_and_run_classifier(method_parameters_combinations, reference_dbs, pipelines, sweep, verbose=True, n_jobs=4)
taxonomy_glob = join(results_dir, '*', '*', '*', '*', 'rep_set_tax_assignments.txt')
generate_per_method_biom_tables(taxonomy_glob, data_dir)
precomputed_results_dir = join(project_dir, "data", "precomputed-results", analysis_name)
method_dirs = glob(join(results_dir, '*', '*', '*', '*'))
move_results_to_repository(method_dirs, precomputed_results_dir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'miroc', 'miroc6', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
DOC.set_value("Other: concentration, ice thickness, snow thickness, ice temperature, horizontal velocity")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
DOC.set_value("Ocean grid")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
DOC.set_value("Other: 5")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
DOC.set_value("Other: linear remapping")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
DOC.set_value("Other: hibler (1979) type, p*=2.0e2 n/m")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
DOC.set_value("Ridging")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
DOC.set_value("Other: evp")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
DOC.set_value("Other: multi-layer on a regular vertical grid")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
DOC.set_value("Other: parametrized (calculated in seaice)")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("From open water, bottom, and lateral")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
DOC.set_value("Other: no")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
DOC.set_value("Snow-ice")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
DOC.set_value("Other: one layer")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
DOC.set_value("Other: 0.85-0.95 for visible, 0.65-0.8 for near infrared, depends on temperature")
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: Import helper functions
Step3: Change matplotlib graph style
Step4: Constants
Step5: Get number of classes
Step6: Fetch and decode data
Step7: Normal labels
Step8: One hot encoded labels
Step9: Load the testing dataset.
Step10: The CIFAR-10 dataset has been loaded and consists of a total of 60,000 images and corresponding labels.
Step11: Build model
Step12: Train model on the training data
Step13: Configure the model for training
Step14: For more information on categorical cross entropy loss function see - https
Step15: Evaluate the model
Step16: Model accuracy and loss plots
Step17: Predictions
Step18: Get the index of the largest element in each vector
Step19: Check which labels have been predicted correctly
Step20: Calculate accuracy using manual calculation
Step21: Show some mis-classifications
Step22: Plot the first 9 mis-classified images
|
<ASSISTANT_TASK:>
Python Code:
%%bash
source ~/kerai/bin/activate
%matplotlib inline
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.layers import Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation
from helper import get_class_names, get_train_data, get_test_data, plot_images, plot_model
matplotlib.style.use('ggplot')
class_names = get_class_names()
print(class_names)
num_classes = len(class_names)
print(num_classes)
# Hight and width of the images
IMAGE_SIZE = 32
# 3 channels, Red, Green and Blue
CHANNELS = 3
images_train, labels_train, class_train = get_train_data()
print(labels_train)
print(class_train)
images_test, labels_test, class_test = get_test_data()
print("Training set size:\t",len(images_train))
print("Testing set size:\t",len(images_test))
def cnn_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(IMAGE_SIZE,IMAGE_SIZE,CHANNELS)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
return model
model = cnn_model()
checkpoint = ModelCheckpoint('best_model_simple.h5', # model filename
monitor='val_loss', # quantity to monitor
verbose=0, # verbosity - 0 or 1
save_best_only= True, # The latest best model will not be overwritten
mode='auto') # The decision to overwrite model is made
# automatically depending on the quantity to monitor
model.compile(loss='categorical_crossentropy', # Better loss function for neural networks
optimizer=Adam(lr=1.0e-4), # Adam optimizer with 1.0e-4 learning rate
metrics = ['accuracy']) # Metrics to be evaluated by the model
model_details = model.fit(images_train, class_train,
batch_size = 128, # number of samples per gradient update
epochs = 100, # number of iterations
validation_data= (images_test, class_test),
callbacks=[checkpoint],
verbose=1)
scores = model.evaluate(images_test, class_test, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
plot_model(model_details)
class_pred = model.predict(images_test, batch_size=32)
print(class_pred[0])
labels_pred = np.argmax(class_pred,axis=1)
print(labels_pred)
correct = (labels_pred == labels_test)
print(correct)
print("Number of correct predictions: %d" % sum(correct))
num_images = len(correct)
print("Accuracy: %.2f%%" % ((sum(correct)*100)/num_images))
incorrect = (correct == False)
# Images of the test-set that have been incorrectly classified.
images_error = images_test[incorrect]
# Get predicted classes for those images
labels_error = labels_pred[incorrect]
# Get true classes for those images
labels_true = labels_test[incorrect]
plot_images(images=images_error[0:9],
labels_true=labels_true[0:9],
class_names=class_names,
labels_pred=labels_error[0:9])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame.from_dict({'id': ['A', 'B', 'A', 'C', 'D', 'B', 'C'],
'val': [1,2,-3,1,5,6,-2],
'stuff':['12','23232','13','1234','3235','3236','732323']})
def g(df):
df['cumsum'] = df.groupby('id')['val'].transform(pd.Series.cumsum)
return df
df = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What is it?
Step2: Define a Function to Read and Preprocess Text
Step3: Let's take a look at the training corpus
Step4: And the testing corpus looks like this
Step5: Notice that the testing corpus is just a list of lists and does not contain any tags.
Step6: Build a Vocabulary
Step7: Essentially, the vocabulary is a dictionary (accessible via model.wv.vocab) of all of the unique words extracted from the training corpus along with the count (e.g., model.wv.vocab['penalty'].count for counts for the word penalty).
Step8: Inferring a Vector
Step9: Assessing Model
Step10: Let's count how each document ranks with respect to the training corpus
Step11: Basically, greater than 95% of the inferred documents are found to be most similar to itself and about 5% of the time it is mistakenly most similar to another document. the checking of an inferred-vector against a training-vector is a sort of 'sanity check' as to whether the model is behaving in a usefully consistent manner, though not a real 'accuracy' value.
Step12: Notice above that the most similar document is has a similarity score of ~80% (or higher). However, the similarity score for the second ranked documents should be significantly lower (assuming the documents are in fact different) and the reasoning becomes obvious when we examine the text itself
Step13: Testing the Model
|
<ASSISTANT_TASK:>
Python Code:
import gensim
import os
import collections
import smart_open
import random
# Set file names for train and test data
test_data_dir = '{}'.format(os.sep).join([gensim.__path__[0], 'test', 'test_data'])
lee_train_file = test_data_dir + os.sep + 'lee_background.cor'
lee_test_file = test_data_dir + os.sep + 'lee.cor'
def read_corpus(fname, tokens_only=False):
with smart_open.smart_open(fname, encoding="iso-8859-1") as f:
for i, line in enumerate(f):
if tokens_only:
yield gensim.utils.simple_preprocess(line)
else:
# For training data, add tags
yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(line), [i])
train_corpus = list(read_corpus(lee_train_file))
test_corpus = list(read_corpus(lee_test_file, tokens_only=True))
train_corpus[:2]
print(test_corpus[:2])
model = gensim.models.doc2vec.Doc2Vec(size=50, min_count=2, iter=55)
model.build_vocab(train_corpus)
%time model.train(train_corpus, total_examples=model.corpus_count, epochs=model.iter)
model.infer_vector(['only', 'you', 'can', 'prevent', 'forrest', 'fires'])
ranks = []
second_ranks = []
for doc_id in range(len(train_corpus)):
inferred_vector = model.infer_vector(train_corpus[doc_id].words)
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
rank = [docid for docid, sim in sims].index(doc_id)
ranks.append(rank)
second_ranks.append(sims[1])
collections.Counter(ranks) # Results vary due to random seeding and very small corpus
print('Document ({}): «{}»\n'.format(doc_id, ' '.join(train_corpus[doc_id].words)))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words)))
# Pick a random document from the test corpus and infer a vector from the model
doc_id = random.randint(0, len(train_corpus))
# Compare and print the most/median/least similar documents from the train corpus
print('Train Document ({}): «{}»\n'.format(doc_id, ' '.join(train_corpus[doc_id].words)))
sim_id = second_ranks[doc_id]
print('Similar Document {}: «{}»\n'.format(sim_id, ' '.join(train_corpus[sim_id[0]].words)))
# Pick a random document from the test corpus and infer a vector from the model
doc_id = random.randint(0, len(test_corpus))
inferred_vector = model.infer_vector(test_corpus[doc_id])
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
# Compare and print the most/median/least similar documents from the train corpus
print('Test Document ({}): «{}»\n'.format(doc_id, ' '.join(test_corpus[doc_id])))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span style="color
Step2: <span style="color
Step3: Python's built-in mathematical operators include +, -, *, **, for exponentiation, / for division, // for integer division (without rest), % for division but giving you the rest.
Step4: And here is how to use an existing variable
Step5: <span style="color
Step6: When a command requires input arguments, as for math.sqrt(put a number here), a handy keyboard shortcut is Shift-Tab.
Step7: Note how we used the # character to write comments. Whereas Python discards the comments completely, adding comments in the code is important when the code is to be read by other humans (including yourself in the future).
Step8: Python tries to tell you where it stopped understanding, but in the above examples, each program is only 1 line long.
Step9: The Written Word, i.e. strings
Step10: Strings are surrounded by quotes. Without the quotes Hello by itself would be viewed as a variable name.
Step11: Strings in Python are a bit more complicated because the operations on them aren't just + and * (though those are valid operations).
Step12: Lists
Step13: Note the syntax we used to create the list
Step14: Now, let's compute the sum of all elements in the list. Python provides a built-in function for this
Step15: We can also access individual elements in the list, using the following syntax
Step16: Note that indexing starts at 0 in Python
Step17: We can access sublists with the following syntax
Step18: Here, 1
Step19: The keys in a dictionary can be anything including numbers
Step20: <span style="color
Step21: Note that the for loop steps in sequence through the numbers in the list.
Step22: There are several things to note here
Step23: This is called a list comprehension. A new list is created here; it contains the squares of all numbers in the list. This concise syntax leads to highly readable and Pythonic code.
|
<ASSISTANT_TASK:>
Python Code:
# ignore the first two lines: they allow you to show multiple outputs per cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import this
print("The classic view of the central dogma of biology states that \
'the coded genetic information hard-wired into DNA is transcribed into \
individual transportable cassettes, composed of messenger RNA (mRNA); \
each mRNA cassette contains the program for synthesis of a particular \
protein (or small number of proteins).'")
2 * 2
3/2
3//2
6%2
8%3
2**5
avogadro = 6e23
print('How many molecules are contained in a mole? Answer:',avogadro,'molecules.')
print('How many molecules in a 2 moles? Answer:',2*avogadro,'molecules')
import math # load some common mathematical operations
?math.sqrt(2)
somefloat = 3.1415
sometext = 'pi is about' # You can also use double quotes.
print(sometext, somefloat) # Display several variables.
I_am_true = False
I_am_true
gibberish
*adsflf_
print('Hello'
1v34
2000 / 0
x = 'One definition of systems biology: the study of the interactions between the components of biological systems, \
and how these interactions give rise to the function and behavior of that system (for example, the enzymes and \
metabolites in a metabolic pathway or the heart beats)' # string
print(type(x))
print(x)
print()
x = [0.5,2,24] # list
print(type(x))
print(x)
print('The (rough) cell cycle time of e.coli =',x[0],'hrs, of yeast =',x[1],'hrs, of a human cell',x[2],'hrs')
print()
x = {'e. coli':5,'yeast':12,'human':2.9e3} # dictionary
print(type(x))
print(x)
print('The genome size of e.coli =',x['e. coli'],'Mbp, of yeast =',x['yeast'],'Mbp, of a human cell',x['human'],'Mbp')
"Hello, World!"
your_name = "James Watson"
print("Hello,",your_name)
your_string = 'something'
your_string
items = [1, 3, 0, 4, 1]
len(items)
sum(items)
items[0]
items[-1]
items[1] = 9
items
items[1:3]
my_dict = {'a': 1, 'b': 2, 'c': 3}
print('a:', my_dict['a'])
list(my_dict.keys())
my_dict = {18: 1, 23: 2, 0: 3}
my_dict[18]
a_list = [1,2,3,4,5,6]
for number in a_list:
number
genome_sizes = {'e. coli':5,'yeast':12,'human':2.9e3} # dictionary
print('A list of genome-sizes in #Mbp:')
for organism in genome_sizes.keys():
print(organism,genome_sizes[organism])
items = [1,2,3,4,5,6]
squares = [item * item for item in items]
squares
even = [item for item in items if item % 2 == 0]
even
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Fifty Books
Step2: Cleaning up the Data
Step3: Next, we iterate through all of the words, strip all characters that are not upper or lower-case letters. If the the resulting word is considered, non-empty, we throw it out. Else, we add the word in all lowercase stripped of all non-ASCII letters to our list of words for that book.
Step4: Determining Frequencies
Step5: Top 20 Words
Step6: Creating the 20-dimensional vectors
Step7: Creating the Elbow Graph
Step8: We can see that the best k is 3 or 6.
Step9: Do the clusters make sense?
Step10: We can see the data clusters well and the most important words are i and the based on them having the standard deviation. This is based on the concept of PCA.fracs aligning to the variance based on this documentation
Step11: Now, let's do k-means based on the previously determined k.
Step12: We can see that the new book is the black square above. In addition, it makes sense it fits into that cluster especially when we compare it to Jane Eyre.
Step13: We can see that our k could be 3 or 7. Let's choose 7.
|
<ASSISTANT_TASK:>
Python Code:
## Imports!
%matplotlib inline
import os
import re
import string
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.mlab import PCA
from scipy.cluster.vq import kmeans, vq
os.chdir("/home/ryan/School/scientific_computing/labs/lab4/books")
filenames = os.listdir()
books = []
for name in filenames:
with open(name) as f:
books.append(f.read())
def get_title(text):
pattern = "\*\*\*\s*START OF (THIS|THE) PROJECT GUTENBERG EBOOK ([A-Z,;' ]*)\*\*\*"
m = re.search(pattern, text)
if m:
return m.group(2).strip()
return None
def remove_gutenberg_info(text):
pattern = "\*\*\*\s*START OF (THIS|THE) PROJECT GUTENBERG EBOOK ([A-Z,;' ]*)\*\*\*"
start = re.search(pattern, text).end()
pattern = "\*\*\*\s*END OF (THIS|THE) PROJECT GUTENBERG EBOOK ([A-Z,;' ]*)\*\*\*"
end = re.search(pattern, text).start()
return text[start:end]
cut_off_books = { get_title(book):remove_gutenberg_info(book) for book in books}
pd.DataFrame(cut_off_books, index=["Book's Text"]).T.head()
def strip_word(word, alphabet):
ret = ""
for c in word:
if c in alphabet:
ret += c.lower()
if len(ret) == 0:
return None
else:
return ret
def get_words(book):
alphabet = set(string.ascii_letters)
b = book.split()
words = []
for word in b:
w = strip_word(word, alphabet)
if w:
words.append(w)
return words
cut_books = {name:get_words(book) for name, book in cut_off_books.items()}
def get_word_freq(words):
word_counts = {}
for word in words:
if word in word_counts:
word_counts[word] += 1
else:
word_counts[word] = 1
return word_counts
book_freqs = {}
for name, words in cut_books.items():
book_freqs[name] = get_word_freq(words)
total_word_count = {}
for dicts in book_freqs.values():
for word, count in dicts.items():
if word in total_word_count:
total_word_count[word] += count
else:
total_word_count[word] = count
a, b = zip(*total_word_count.items())
tuples = list(zip(b, a))
tuples.sort()
tuples.reverse()
tuples[:20]
_, top_20_words = zip(*tuples[:20])
top_20_words
def filter_frequencies(frequencies, words):
d = {}
for word, freq in frequencies.items():
if word in words:
d[word] = freq
return d
labels = {}
for name, freqs in book_freqs.items():
labels[name] = filter_frequencies(freqs, top_20_words)
df = pd.DataFrame(labels).fillna(0)
df = (df / df.sum()).T
df.head()
kvals = []
dists = []
for k in range(2, 11):
centroids, distortion = kmeans(df, k)
kvals.append(k)
dists.append(distortion)
plt.plot(kvals, dists)
plt.show()
centroids, _ = kmeans(df, 3)
idx, _ = vq(df, centroids)
clusters = {}
for i, cluster in enumerate(idx):
if cluster in clusters:
clusters[cluster].append(df.iloc[i].name)
else:
clusters[cluster] = [df.iloc[i].name]
clusters
m = PCA(df)
fig, ax = plt.subplots()
for i in range(len(idx)):
plt.plot(m.Y[idx==i, 0], m.Y[idx==i, 1], "o", alpha=.75)
for index, (x, y) in enumerate(zip(m.Y[:, 0], m.Y[:, 1])):
plt.text(x, y, df.index[index])
fig.set_size_inches(36,40)
plt.show()
m.sigma.sort_values()[-2:]
with open("../pg45.txt") as f:
anne = f.read()
get_title(anne)
anne_cut = remove_gutenberg_info(anne)
anne_words = get_words(anne_cut)
anne_freq = {get_title(anne):filter_frequencies(get_word_freq(anne_words), top_20_words)}
anne_frame = pd.DataFrame(anne_freq).fillna(0)
anne_frame = (anne_frame / anne_frame.sum()).T
anne_frame
df_with_anne = df.append(anne_frame).sort_index()
centroids, _ = kmeans(df_with_anne, 3)
idx2, _ = vq(df_with_anne, centroids)
clusters = {}
for i, cluster in enumerate(idx2):
if cluster in clusters:
clusters[cluster].append(df_with_anne.iloc[i].name)
else:
clusters[cluster] = [df_with_anne.iloc[i].name]
clusters
coords = m.project(np.array(anne_frame).flatten())
fig, _ = plt.subplots()
plt.plot(coords[0], coords[1], "s", markeredgewidth=5)
for i in range(len(idx)):
plt.plot(m.Y[idx==i, 0], m.Y[idx==i, 1], "o", alpha=.75)
for index, (x, y) in enumerate(zip(m.Y[:, 0], m.Y[:, 1])):
plt.text(x, y, df.index[index])
fig.set_size_inches(36,40)
plt.show()
stop_words_text = open("../common-english-words.txt").read()
stop_words = stop_words_text.split(",")
stop_words[:5]
word_counts_without_stop = [t for t in tuples if t[1] not in stop_words]
word_counts_without_stop[:20]
_, top_20_without_stop = zip(*word_counts_without_stop[:20])
top_20_without_stop
no_stop_labels = {}
for name, freqs in book_freqs.items():
no_stop_labels[name] = filter_frequencies(freqs, top_20_without_stop)
df_without_stop = pd.DataFrame(no_stop_labels).fillna(0)
df_without_stop = (df_without_stop / df_without_stop.sum()).T
df_without_stop.head()
kvals = []
dists = []
for k in range(2, 11):
centroids, distortion = kmeans(df_without_stop, k)
kvals.append(k)
dists.append(distortion)
plt.plot(kvals, dists)
plt.show()
centroids, _ = kmeans(df_without_stop, 7)
idx3, _ = vq(df, centroids)
clusters = {}
for i, cluster in enumerate(idx3):
if cluster in clusters:
clusters[cluster].append(df_without_stop.iloc[i].name)
else:
clusters[cluster] = [df_without_stop.iloc[i].name]
clusters
m2 = PCA(df_without_stop)
fig, _ = plt.subplots()
for i in range(len(idx3)):
plt.plot(m2.Y[idx3==i, 0], m2.Y[idx3==i, 1], "o", alpha=.75)
for index, (x, y) in enumerate(zip(m2.Y[:, 0], m2.Y[:, 1])):
plt.text(x, y, df_without_stop.index[index])
fig.set_size_inches(36,40)
plt.show()
m2.sigma.sort_values()[-2:]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We've selected four features that cover a range of properties. Each of these features also has a high MI score with the target, price. We'll standardize the data since these features aren't naturally on the same scale.
Step2: Now we can fit scikit-learn's PCA estimator and create the principal components. You can see here the first few rows of the transformed dataset.
Step3: After fitting, the PCA instance contains the loadings in its components_ attribute. (Terminology for PCA is inconsistent, unfortunately. We're following the convention that calls the transformed columns in X_pca the components, which otherwise don't have a name.) We'll wrap the loadings up in a dataframe.
Step4: Recall that the signs and magnitudes of a component's loadings tell us what kind of variation it's captured. The first component (PC1) shows a contrast between large, powerful vehicles with poor gas milage, and smaller, more economical vehicles with good gas milage. We might call this the "Luxury/Economy" axis. The next figure shows that our four chosen features mostly vary along the Luxury/Economy axis.
Step5: Let's also look at the MI scores of the components. Not surprisingly, PC1 is highly informative, though the remaining components, despite their small variance, still have a significant relationship with price. Examining those components could be worthwhile to find relationships not captured by the main Luxury/Economy axis.
Step6: The third component shows a contrast between horsepower and curb_weight -- sports cars vs. wagons, it seems.
Step7: To express this contrast, let's create a new ratio feature
|
<ASSISTANT_TASK:>
Python Code:
#$HIDE_INPUT$
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from IPython.display import display
from sklearn.feature_selection import mutual_info_regression
plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True)
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=14,
titlepad=10,
)
def plot_variance(pca, width=8, dpi=100):
# Create figure
fig, axs = plt.subplots(1, 2)
n = pca.n_components_
grid = np.arange(1, n + 1)
# Explained variance
evr = pca.explained_variance_ratio_
axs[0].bar(grid, evr)
axs[0].set(
xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0)
)
# Cumulative Variance
cv = np.cumsum(evr)
axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-")
axs[1].set(
xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0)
)
# Set up figure
fig.set(figwidth=8, dpi=100)
return axs
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
df = pd.read_csv("../input/fe-course-data/autos.csv")
features = ["highway_mpg", "engine_size", "horsepower", "curb_weight"]
X = df.copy()
y = X.pop('price')
X = X.loc[:, features]
# Standardize
X_scaled = (X - X.mean(axis=0)) / X.std(axis=0)
from sklearn.decomposition import PCA
# Create principal components
pca = PCA()
X_pca = pca.fit_transform(X_scaled)
# Convert to dataframe
component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])]
X_pca = pd.DataFrame(X_pca, columns=component_names)
X_pca.head()
loadings = pd.DataFrame(
pca.components_.T, # transpose the matrix of loadings
columns=component_names, # so the columns are the principal components
index=X.columns, # and the rows are the original features
)
loadings
# Look at explained variance
plot_variance(pca);
mi_scores = make_mi_scores(X_pca, y, discrete_features=False)
mi_scores
# Show dataframe sorted by PC3
idx = X_pca["PC3"].sort_values(ascending=False).index
cols = ["make", "body_style", "horsepower", "curb_weight"]
df.loc[idx, cols]
df["sports_or_wagon"] = X.curb_weight / X.horsepower
sns.regplot(x="sports_or_wagon", y='price', data=df, order=2);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-aerchem', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Option 2
Step2: Option 3
Step3: Step 3. Perform the NDVI calculation
Step4: As a quick check of our calculations, let's print the minimum and maximum values in our calculated ndvi. Because we're using the NDVI formula to normalize the input bands, we know that our expected values should fall within -1.0 to +1.0.
Step5: Assuming your min & max values are in-range -- congratulations! You have performed what is known as raster band math. Well done. This skill has many applications beyond the NDVI you're calculating in this exercise
Step7: Step 6. Apply a color scheme to visualize the NDVI values on the image
Step8: 7. Generate a histogram of NDVI values
|
<ASSISTANT_TASK:>
Python Code:
# To use Planet's CLI from this Notebook, begin your line as follows:
!planet data
# Here is an example of using Planet's CLI to search for a known item id:
# !planet data download --item-type PSScene --asset-type ortho_analytic_4b_sr --dest data --string-in id 20160831_180302_0e26
# To use Planet's API, you'll probably begin by importing your favorite HTTP toolkit, e.g.:
import requests
from requests.auth import HTTPBasicAuth
# Your Planet API key is available in this Notebook as an env variable, e.g.:
import os
PLANET_API_KEY = os.getenv('PL_API_KEY')
import rasterio
filename = "data/20160831_180302_0e26_3B_AnalyticMS_SR.tif"
# Load red and NIR bands - note all PlanetScope 4-band images have band order BGRN
with rasterio.open(filename) as src:
band_red = src.read(3)
with rasterio.open(filename) as src:
band_nir = src.read(4)
# allow division by zero without throwing a warning
import numpy
numpy.seterr(divide='ignore', invalid='ignore')
# Calculate NDVI - remember, bands read via rasterio are just numpy arrays
ndvi = (band_nir.astype(float) - band_red.astype(float)) / (band_nir + band_red)
# check range NDVI values, excluding NaN
print(numpy.nanmin(ndvi))
print(numpy.nanmax(ndvi))
# get the metadata of original GeoTIFF:
meta = src.meta
print(meta)
# get the dtype of our NDVI array:
ndvi_dtype = ndvi.dtype
print(ndvi_dtype)
# set the source metadata as kwargs we'll use to write the new data:
kwargs = meta
# update the 'dtype' value to match our NDVI array's dtype:
kwargs.update(dtype=ndvi_dtype)
# update the 'count' value since our output will no longer be a 4-band image:
kwargs.update(count=1)
# Finally, use rasterio to write new raster file 'data/ndvi.tif':
with rasterio.open('data/ndvi.tif', 'w', **kwargs) as dst:
dst.write(ndvi, 1)
from matplotlib import colors
# Credit: Joe Kington
class MidpointNormalize(colors.Normalize):
Normalize the colorbar so that diverging bars work there way either side from a prescribed midpoint value
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return numpy.ma.masked_array(numpy.interp(value, x, y), numpy.isnan(value))
# Begin by pulling in pyplot
import matplotlib.pyplot as plt
# Set min/max values from NDVI range for image
# HINT: refer back to earlier, when we verified our min & max values were within expected range
min=numpy.nanmin(ndvi)
max=numpy.nanmax(ndvi)
# Set our custom midpoint for most effective NDVI analysis
mid=0.1
# Set your favorite diverging color scheme
# You can use https://matplotlib.org/users/colormaps.html as a reference
colormap = plt.cm.RdYlGn
# Call MidPointNormalize with our min, max, and custom midpoint
norm = MidpointNormalize(vmin=min, vmax=max, midpoint=mid)
# Create a pyplot figure, in which we'll display our colorized NDVI
fig = plt.figure(figsize=(20,10))
# Add a subplot to our figure, which will contain the colorbar
ax = fig.add_subplot(111)
# Use 'imshow' to specify the input data, colormap, min, max, and norm for the colorbar
cbar_plot = ax.imshow(ndvi, cmap=colormap, vmin=min, vmax=max, norm=norm)
# Turn off the display of axis labels
ax.axis('off')
# Set a title
ax.set_title('Normalized Difference Vegetation Index', fontsize=18, fontweight='bold')
# Configure the colorbar
cbar = fig.colorbar(cbar_plot, orientation='horizontal', shrink=0.65)
# Call 'savefig' to save this plot to an image file
fig.savefig("data/ndvi-fig.png", dpi=200, bbox_inches='tight', pad_inches=0.7)
# Finally - let's take a look!
plt.show()
# Define a new figure
fig2 = plt.figure(figsize=(20,10))
# Give this new figure a subplot, which will contain the histogram itself
ax = fig2.add_subplot(111)
# Add a title & (x,y) labels to the plot
plt.title("NDVI Histogram", fontsize=18, fontweight='bold')
plt.xlabel("NDVI values", fontsize=14)
plt.ylabel("Number of pixels", fontsize=14)
# For the x-axis, we want to count every pixel that is not an empty value
x = ndvi[~numpy.isnan(ndvi)]
# Define the number of bins to divide the data into
bins = 20
# Define a color for the histogram
# You can use https://matplotlib.org/2.0.0/examples/color/named_colors.html as a reference
color = 'lightgreen'
# call 'hist` with our x-axis, bins, and color details
ax.hist(x,bins,color=color)
# Save the generated figure to an external image file
fig2.savefig("data/ndvi-histogram.png", dpi=200, bbox_inches='tight', pad_inches=0.7)
# Finally - let's take a look!
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: n = 10
Step2: n=100
Step3: converting binary to decimal
Step4: testing more binary to decimal conversions
|
<ASSISTANT_TASK:>
Python Code:
n = 1
print n.bit_length()
a = n.bit_length()
print bin(n)
print '%0*d' % (a, int(bin(n)[2:]))
print '{0:08b}'.format(n)
n = 10
print n.bit_length()
a = n.bit_length()
print bin(n)
print '%0*d' % (a, int(bin(n)[2:]))
print '{0:08b}'.format(n)
n = 10
print n.bit_length()
a = n.bit_length()
print bin(n)
print '%0*d' % (a, int(bin(n)[2:]))
print '{0:08b}'.format(n)
n = 157
count_one = 0
count_gap = 0
binarygap = 0
for count in xrange(n.bit_length()):
print count
if n % 2:
count_one +=1
n = n /2
elif count_one == 1:
count_gap += 1
print "count gap: ", count_gap
n = n / 2
if count_one > 1:
if binarygap < count_gap:
binarygap = count_gap
print "binary gap", binarygap
count_one = 1
count_gap = 0
print binarygap
157 /2
78/2
n = 37
count_one = 0
count_gap = 0
binarygap = 0
for count in xrange(n.bit_length()):
print count
print "n", n
if n % 2:
count_one +=1
n = n /2
elif count_one == 1:
count_gap += 1
print "count gap: ", count_gap
n = n / 2
if count_one > 1:
if binarygap < count_gap:
binarygap = count_gap
print "binary gap", binarygap
count_one = 1
count_gap = 0
print binarygap
n = 142
count_one = 0
count_gap = 0
binarygap = 0
for count in xrange(n.bit_length()):
print count
print "n", n
if n % 2:
count_one +=1
n = n /2
elif count_one == 1:
count_gap += 1
print "count gap: ", count_gap
n = n / 2
else:
n = n/2
if count_one > 1:
if binarygap < count_gap:
binarygap = count_gap
print "binary gap", binarygap
count_one = 1
count_gap = 0
print binarygap
142 / 2
71/2
35/2
n = 488
count_one = 0
count_gap = 0
binarygap = 0
for count in xrange(n.bit_length()):
print count
print "n", n
if n % 2:
count_one +=1
n = n /2
elif count_one == 1:
count_gap += 1
print "count gap: ", count_gap
n = n / 2
else:
n = n/2
if count_one > 1:
if binarygap < count_gap:
binarygap = count_gap
print "binary gap", binarygap
count_one = 1
count_gap = 0
print binarygap
n = 181
count_one = 0
count_gap = 0
binarygap = 0
for count in xrange(n.bit_length()):
print count
print "n", n
if n % 2:
count_one +=1
n = n /2
elif count_one == 1:
count_gap += 1
print "count gap: ", count_gap
n = n / 2
else:
n = n/2
if count_one > 1:
if binarygap < count_gap:
binarygap = count_gap
print "binary gap", binarygap
count_one = 1
count_gap = 0
print binarygap
def solution(N):
count_one = 0
count_gap = 0
binarygap = 0
for count in xrange(N.bit_length()):
if N % 2:
count_one +=1
N = N /2
elif count_one == 1:
count_gap += 1
N = N / 2
else:
N = N/2
if count_one > 1:
if binarygap < count_gap:
binarygap = count_gap
count_one = 1
count_gap = 0
return binarygap
print solution(181)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install useful extensions
Step2: Then need to edit the file
Step3: and add an IPython load statment
Step4: The following command will download and copy the extension to the appropriate location.
Step5: Usability tools from the IPython-notebook extensions project
Step6: Activate Extensions
Step8: The following command will install the 4 extensions.
|
<ASSISTANT_TASK:>
Python Code:
# Initialize profile
import IPython
import os.path as path
# Create a profile
profile, = !ipython locate profile
if not profile:
!ipython profile create
print 'Your profile is located at:\t', profile
# Get local paths for later use
ip = IPython.get_ipython()
nbext = path.join(ip.ipython_dir, 'nbextensions')
custom = path.join(profile, 'static/custom/custom.js')
print "Extension files should be placed in: ", nbext
print "Extension settings should be added to: ", custom
print nbext
print custom
from IPython.display import YouTubeVideo
YouTubeVideo('o4xCp3b4oCw')
!ipython install-nbextension https://bitbucket.org/ipre/calico/downloads/calico-spell-check-1.0.zip --user
print nbext
%%bash -c "mydir=$nbext bash"
cd $HOME/Downloads
wget https://github.com/ipython-contrib/IPython-notebook-extensions/archive/master.zip --quiet
unzip -q master.zip
mv ./IPython-notebook-extensions-master/usability $mydir/
print custom
settings=require(["base/js/events"], function (events) {
events.on("app_initialized.NotebookApp", function () {
IPython.load_extensions('calico-spell-check');
IPython.load_extensions('usability/runtools/main');
IPython.load_extensions('usability/codefolding/main');
IPython.load_extensions('usability/python-markdown/main');
});
});
with open(custom, 'w') as OUT:
OUT.write(settings)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Working with Data
Step2: What can we do from here?
|
<ASSISTANT_TASK:>
Python Code:
def cdf_to_dataframe(netcdf_file, exclude_qc=True):
Takes in a netCDF object and returns a pandas DataFrame object
# import packages
from netCDF4 import Dataset
import pandas as pd
import datetime
with Dataset(netcdf_file, 'r') as D:
# create an empty dictionary for the netCDF variables
ncvars = {}
for v in D.variables.keys():
time_check = (D.variables[v].dimensions
== D.variables['time'].dimensions)
if exclude_qc:
qc_check = 'qc_' not in v
var_check = qc_check and time_check
else:
var_check = time_check
if var_check:
ncvars[v] = D.variables[v][:]
D = pd.DataFrame(ncvars,
index = (datetime.datetime.utcfromtimestamp(D.variables['base_time'][:])+
pd.to_timedelta(D.variables['time'][:], unit='s')))
return D
import os
file_path = os.path.abspath('enametC1.b1.20140531.000000.cdf')
DATA = cdf_to_dataframe(file_path)
import pandas as pd
import numpy as np
hourly = pd.TimeGrouper('1H')
T = DATA['temp_mean'].groupby(hourly).agg([np.min, np.mean, np.max])
T
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's tackle the list of cafes first. In the cell below, write some code that creates a list of dictionaries with information about each cafe, assigning it to the variable cafe_list. I've written some of the code for you; you just need to fill in the rest. The list should end up looking like this
Step2: Great! In the following cell, write some code that creates a list of cats from the <table> tag on the page, storing them as a list of dictionaries in a variable called cat_list. Again, I've written a bit of the code for you. Expected output
Step3: Problem set #2
Step4: Here's a cell you can run if something goes wrong and you need to rollback the current query session
Step8: In the cell below, you're going to create three tables, necessary to represent the data you scraped above. I've given the basic framework of the Python code and SQL statements to create these tables. I've given the entire CREATE TABLE statement for the cafe table, but for the other two, you'll need to supply the field names and the data types for each column. If you're unsure what to call the fields, or what fields should be in the tables, consult the queries in "The Queries" below. Hints
Step9: After executing the above cell, issuing a \d command in psql should yield something that looks like the following
Step10: Issuing SELECT * FROM cafe in the psql client should yield something that looks like this
Step11: The dictionary maps the name of the cat cafe to its ID in the database. You'll need these values later when you're adding records to the linking table (cat_cafe).
Step12: Issuing a SELECT * FROM cat LIMIT 10 in psql should yield something that looks like this
Step14: In which zip codes can I find a lilac-colored tabby?
Step16: What's the average weight of cats currently residing at all locations?
Step18: Which location has the most cats with tortoiseshell coats?
|
<ASSISTANT_TASK:>
Python Code:
from bs4 import BeautifulSoup
from urllib.request import urlopen
html = urlopen("http://static.decontextualize.com/cats.html").read()
document = BeautifulSoup(html, "html.parser")
cafe_list = list()
cafe_table = document.find('table', {'class': 'cafe-list'})
tbody = cafe_table.find('tbody')
for tr_tag in tbody.find_all('tr'):
name = tr_tag.find('td', {'class':'name'}).string
Zip = tr_tag.find('td', {'class':'zip'}).string
cafe_list.append({'name': name, 'zip': Zip})
cafe_list
cat_list = list()
cat_table = document.find('table', {'class': 'cat-list'})
tbody = cat_table.find('tbody')
for tr_tag in tbody.find_all('tr'):
name = tr_tag.find('td', {'class':'name'}).string
birthdate = tr_tag.find('td', {'class':'birthdate'}).string
weight = float(tr_tag.find('td', {'class':'weight'}).string)
coat_color = tr_tag.find('td', {'class':'color'}).string
where = tr_tag.find('td', {'class':'locations'}).string.split(', ')
coat_pattern = tr_tag.find('td', {'class':'pattern'}).string
cat_dict={
'birthdate': birthdate,
'color': coat_color,
'locations': where,
'name': name,
'pattern': coat_pattern,
'weight': weight}
cat_list.append(cat_dict)
cat_list
import pg8000
conn = pg8000.connect(database="catcafes")
conn.rollback()
cursor = conn.cursor()
cursor.execute(
CREATE TABLE cafe (
id serial,
name varchar(40),
zip varchar(5)
)
)
cursor.execute(
CREATE TABLE cat (
id serial,
name varchar(40),
birthdate varchar(10),
weight numeric(10,2),
color varchar(40),
pattern varchar(40)
)
)
cursor.execute(
CREATE TABLE cat_cafe (
cat_id int,
cafe_id int,
active boolean
)
)
conn.commit()
# cursor class: Allows Python code to execute PostgreSQL command in a database session.
cafe_name_id_map = {}
for item in cafe_list:#insert into SQL statement
cursor.execute("INSERT INTO cafe (name, zip) VALUES (%s, %s) RETURNING id",
[str(item['name']), str(item['zip'])])# %s: special replacement character sequence, string representation
rowid = cursor.fetchone()[0]
#cursor.fetchone(): retrieve the next row of a query result set and return a single sequence
#fetchone always returns a list of values, even if there's only one column;
cafe_name_id_map[str(item['name'])] = rowid
conn.commit()
cafe_name_id_map
conn.rollback()
import re
cat="INSERT INTO cat (name, birthdate, weight, color, pattern) VALUES (%s,%s,%s,%s,%s) RETURNING id"
cat_cafe="INSERT INTO cat_cafe(cat_id, cat_id, active) VALUES(%s,%s,%s)"
for cat in cat_list:
cursor.execute(cat,str(cat['name']),str(cat['birthdate']),float(cat['weight']),str(cat['color']),str(cat['pattern']))
rowid=cursor.fetchone()[0]
for item in cat['location']:
if re.search("\*",item):
location=item.replace("*","")
cursor.execute(cat_cafe,[rowid,cafe_name_id_map[location],True])
else:
cursor.execute(catcafe_insert,[rowid,cafe_name_id_map[loc],False])
conn.commit()
cursor.execute("SELECT max(birthdate) FROM cat")
birthdate = cursor.fetchone()[0]
cursor.execute("SELECT name FROM cat WHERE birthdate = %s", [birthdate])
print(cursor.fetchone()[0])
cursor.execute(SELECT DISTINCT(cafe.zip)
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat.color = 'lilac' AND cat.pattern = 'tabby' AND cat_cafe.active = true
)
print(', '.join([x[0] for x in cursor.fetchall()]))
cursor.execute(
SELECT cafe.name, avg(cat.weight)
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat_cafe.active = true
GROUP BY cafe.name
)
for rec in cursor.fetchall():
print(rec[0]+":", "%0.2f" % rec[1])
cursor.execute(
SELECT cafe.name
FROM cat
JOIN cat_cafe ON cat.id = cat_cafe.cat_id
JOIN cafe ON cafe.id = cat_cafe.cafe_id
WHERE cat_cafe.active = true AND cat.pattern = 'tortoiseshell'
GROUP BY cafe.name
ORDER BY count(cat.name) DESC
LIMIT 1
)
print(cursor.fetchone()[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise
Step2: If you inspect the dictionary closely, you will find that node 19 is the one that has the highest degree centrality, just as we had measured by counting the number of neighbors.
Step3: Paths in a Network
Step5: Let's say we wanted to find the shortest path between two nodes. How would we approach this? One approach is what one would call a breadth-first search (http
Step6: And testing the function on a few test cases
Step7: Meanwhile... thankfully, NetworkX has a function for us to use, titled has_path, so we don't have to always implement this on our own.
Step8: NetworkX also has other shortest path algorithms implemented.
Step9: nx.shortest_path(G, source, target) gives us a list of nodes that exist within one of the shortest paths between the two nodes. (Not all paths are guaranteed to be found.)
Step11: Incidentally, the node list is in order as well - we will travel through 19 and 17 in that order to get from 14 from 4.
Step13: Exercise
Step15: Challenge Exercises (optional)
Step16: Hubs Revisited
Step17: Exercise
|
<ASSISTANT_TASK:>
Python Code:
# Let's find out the number of neighbors that individual #7 has.
G.neighbors(7)
nx.degree_centrality(G)
# Your answer here.
nx.draw(G, with_labels=True)
def path_exists(node1, node2, G):
This function checks whether a path exists between two nodes (node1, node2) in graph G.
path_exists(18, 5, G)
path_exists(29, 26, G)
nx.has_path(G, 18, 5)
nx.draw(G, with_labels=True)
nx.shortest_path(G, 4, 14)
# Possible Answer:
def extract_path_edges(G, source, target):
Fill in the code below.
# Test your function with the following block of code.
newG = extract_path_edges(G, 1, 14)
nx.draw(newG, with_labels=True)
# Possible Answer
def extract_neighbor_edges(G, node):
Fill in code below.
# Test your function with the following block of code.
fig = plt.figure(0)
newG = extract_neighbor_edges(G, 19)
nx.draw(newG, with_labels=True)
# Your answer to Question 1:
# All we need here is the length of the path.
def compute_transmission_time(G, source, target):
Fill in code below.
# Test with the following line of code.
compute_transmission_time(G, 14, 4)
# Your answer to Question 2:
# We need to know the length of every single shortest path between every pair of nodes.
# If we don't put a source and target into the nx.shortest_path_length(G) function call, then
# we get a dictionary of dictionaries, where all source-->target-->lengths are shown.
# Your answer to Question 3:
# You may want to use the Counter object from collections, as well as combinations from itertools.
from collections import Counter
from itertools import combinations
# Your answer to Question 4:
# Hint: You may want to use bar graphs or histograms.
plt.bar(totals.keys(), totals.values())
btws = nx.betweenness_centrality(G, normalized=False)
plt.bar(btws.keys(), btws.values())
nx.draw(nx.barbell_graph(5, 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: data is now a "structured" numpy array. We can access it like a normal 2d array
Step2: but we can also access columns using their names
Step3: And we can work with these columns just like a standard numpy array
Step4: But what happens if we try to add new columns? It'd be nice if it behaved like a dict
Step5: Okay. That's not a huge problem. There are alternatives, but it can be ugly.
Step6: Pandas version
Step7: Similar to astropy tables, DataFrames have nice ways display an overview of the data
Step8: We can access columns through their names (like with structured arrays) or we can access columns as an object attribute
Step9: We can add new columns
Step10: It also provides nice wrapper functions for visualizing an entire dataset
Step11: We can create groups based on the values of a column
Step12: And we can plot using these groups. It's much less verbose than when we did it in numpy
Step13: You can also do fancier plots. But sometimes the defaults aren't very pretty (e.g. the plots below are missing x-axis labels, and aren't labeled by cut).
Step14: Pandas with SQL Databases
Step15: Time Domain
Step16: Let's take the average weekly temperature (resample on a weekly timescale)
Step17: That's an ugly plot. It'd be nicer if we just smooth the plot, instead of down-sampling
|
<ASSISTANT_TASK:>
Python Code:
url = "https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/ggplot2/diamonds.csv"
data = np.genfromtxt(url, delimiter=",", dtype=None, names=True)
data
data[0][2]
data[0]["cut"]
data["price"].mean()
data["price_per_carat"] = data["price"] / data["carat"]
cuts = set(data["cut"])
for cut in cuts:
plt.figure()
plt.title(cut.decode())
plt.hist(data[data["cut"]==cut]["price"])
url = 'https://github.com/vincentarelbundock/Rdatasets/raw/master/csv/ggplot2/diamonds.csv'
df = pd.read_csv(url, index_col=0)
type(df)
df.head()
df.describe()
df["price"].std()
df.price.max()
df["price_per_carat"] = df["price"] / df.carat
df.hist(figsize=(15,15))
df.groupby("cut").price.std()
df.hist(column="price", by="cut", figsize=(20,20))
df.groupby("cut").plot.hexbin("price", "carat", gridsize=20, title="")
df_from_table = pd.read_sql_table("status", "sqlite:///sample_database.db")
df_from_table = df_from_table.set_index("id")
df_from_table.head()
df_from_table[df_from_table["status"] == "Running"]
df_from_table.loc["cb33250c-7c9a-490a-be79-903e8bb8e338"]
url = "http://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?station=WVI&data=tmpf&year1=2014&month1=1&day1=1&year2=2014&month2=12&day2=31&tz=America%2FLos_Angeles&format=comma&latlon=no&direct=no"
df = pd.read_csv(url,
comment="#",
names=["Station", "Time", "Temp"],
parse_dates=True,
header=1,
na_values="M",
index_col="Time")
df.head()
df.plot()
df.resample("w").plot()
pd.rolling_mean(df, freq="d", window=7).plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TensorFlow Federated での画像分類
Step2: load_data() によって返されるデータセットは、tff.simulation.datasets.ClientData という、ユーザーのセットを列挙して、特定のユーザーのデータを表現する tf.data.Dataset を構築し、個別の要素の構造をクエリするインターフェースのインスタンスです。
Step3: 非 iid データを調べる
Step5: データを前処理する
Step6: 機能していることを確認します。
Step7: 以下は、トレーニングまたは評価のラウンドへの入力として特定のユーザーセットからデータセットのリストを作成する単純なヘルパー関数です。
Step8: では、どのようにしてクライアントを選択すればよいのでしょうか?
Step9: Keras でモデルを作成する
Step10: Keras による集中型トレーニング
Step11: Keras モデルを使用した連合トレーニング
Step12: 連合データでモデルをトレーニングする
Step13: ここでは、TFF は、連合計算のペアを構築し、それらを tff.templates.IterativeProcessに パッケージ化しました。これらの計算は、initialize と next のプロパティのペアとして使用できます。
Step14: 2 つの連合計算の 2 つ目の next は、Federated Averaging の 1 つのラウンドを表します。これには、クライアントへのサーバー状態(モデルパラメータを含む)のプッシュ、ローカルデータのオンデバイストレーニング、モデル更新の収集と平均、およびサーバーでの新しい更新モデルの作成が含まれます。
Step15: 数ラウンド実行します。前述のように、通常、この時点では各ラウンドでランダムに選択された新しい各ユーザーのサンプルからシミュレーションデータのサブセットを選択します。これは、ユーザーが継続的に出入りする現実的なデプロイをシミュレートするためです。ただし、このインタラクティブなノートブックのデモでは、システムが迅速に収束するように同じユーザーを再利用します。
Step16: 連合トレーニングの各ラウンドの後、トレーニングの損失は減少し、モデルが収束していることを示しています。これらのトレーニングメトリクスにはいくつかの重要な注意事項があります。このチュートリアルの後半にある評価のセクションを参照してください。
Step17: 同じサマリーライターを使用して、関連するスカラーメトリクスをプロットします。
Step18: 上記で指定したルートログディレクトリを使用して TensorBoard を起動します。データの読み込みには数秒かかる場合があります。
Step19: 同じ方法で評価メトリクスを表示するには、"logs/scalars/eval" のような別のフォルダを作成して、TensorBoard に書き込むことができます。
Step20: 次に、連合データのテストサンプルをコンパイルして、テストデータの評価を返しましょう。データは、ユーザーの異なるサンプルから取得されますが、別に保持されていたデータセットから取得されます。
Step22: チュートリアルは以上です。異なるパラメーター(バッチサイズ、ユーザー数、エポック、学習率など)を試して、上記のコードを変更し、各ラウンドでユーザーのランダムサンプルのトレーニングをシミュレートしてみてください。また、他のチュートリアルも参照してください。
Step23: モデルを準備する
Step24: この Keras モデルを tff.learning.Model としてラップします。
Step25: FL アルゴリズムをカスタマイズする
Step26: この関数は適切なようですが、後でわかるように、TFF 計算にするために、少しの変更を行う必要があります。
Step28: これらの 4 つのコンポーネントを個別に実装することに専念します。まず、純粋な TensorFlow に実装可能な部分に焦点を当てることにします。クライアントの更新ステップとサーバーの更新ステップです。
Step30: サーバーの更新
Step31: mean_client_weights を返せばよいだけなので、上記のコードスニペットは明らかに行き過ぎています。ただし、Federated Averaging の実装がより高度になれば、運動量や適合性などのより洗練されたテクニックで mean_client_weights を使用することができます。
Step32: 連合型は、連合メンバーの型 T(例
Step33: なぜ配置にこだわるのでしょうか。TFF の主要目標は、実際の分散システムにデプロイできるコードを記述できるようにすることです。つまり、デバイスの度のサブセットがどのコードを実行し、データの異なるピースがどこに存在するかを理由づけることが重要なのです。
Step34: これが TensorFlow の tf.function デコレータとどのように異なるのか疑問に思うかもしれません。ここで重要なのは、tff.federated_computation が生成するコードは、TensorFlow コードでも Python コードでもないということです。つまり、これは内部プラットフォーム非依存型のグルー言語による分散システムの仕様です。
Step35: この tff.federated_computation は、連合型 <float>@CLIENTS の引数を受け入れ、連合型 <float>@SERVER の出力を返します。連合計算もサーバーからクライアント、クライアントからクライアント、またはサーバーからサーバーに移動することができます。また、型シグネチャが一致する限り、通常の関数のように作成することができます。
Step36: 非 eager 計算と TensorFlow
Step37: これらにも型シグネチャがありますが、位置付けされていません。たとえば、以下を呼び出すことができます。
Step38: ここでは、tff.federated_computation と tff.tf_computation の重要な違いがわかります。前者は明示的な位置づけがあり、後者にはありません。
Step39: この関数はほぼ add_half と同じですが、tff.CLIENTS に配置されている値のみを受け入れ、同じ配置の値を返します。これは型シグネチャで確認できます。
Step40: 要約
Step41: 次に、tff.federated_value を使用して、これを直接連合計算に渡します。
Step42: next_fn を作成する
Step43: データセットの型シグネチャを確認しましょう。28 x 28 の画像(整数のラベル付き)を取得して、平坦化したことを思い出してください。
Step44: また、上記の server_init 関数を使用して、モデルの重みの型を抽出することもできます。
Step45: 型シグネチャを調べると、モデルのアーキテクチャを確認できます!
Step46: 次に、クライアントの更新用の tff.tf_computation を作成します。
Step47: サーバー更新バージョンの tff.tf_computation は、すでに抽出した型を使用して、同じようにして定義することができます。
Step48: 最後に、このすべてをまとめる tff.federated_computation を作成する必要があります。この関数は、サーバーの重みに対応する値(配置が tff.SERVER のもの)とクライアントデータセットに対応する値(配置が tff.CLIENTS のもの)の 2 つの連合値を受け入れます。
Step49: FL アルゴリズムの 4 つの要素を覚えていますか?
Step50: 両方のアルゴリズム初期化と、アルゴリズムの 1 つのステップの実行を行うめの tff.federated_computation を用意できました。このアルゴリズムを終了するために、これらを tff.templates.IterativeProcess に渡します。
Step51: 反復プロセスの initialize と next 関数の型シグネチャを見てみましょう。
Step52: これは、federated_algorithm.initialize が単一レイヤーモデル(784 x10 の重み行列と 10 バイアスユニット)を返す引数なし関数であることを反映しています。
Step53: ここでは、federated_algorithm.next がサーバーモデルとクライアントデータを受け入れて、更新されたサーバーモデルを返すことがわかります。
Step54: 次に、サーバーの状態を受け入れる関数を記述し、Keras を使用してテストデータセットで評価します。tf.Keras の使用に慣れているのであれば、これも見慣れているかもしれませんが、set_weights の使用に注意してください!
Step55: では、アルゴリズムを初期化して、テストセットで評価してみましょう。
Step56: 数ラウンド程度トレーニングし、何かが変化するかどうかを確認しましょう。
|
<ASSISTANT_TASK:>
Python Code:
#@title Upgrade tensorflow_federated and load TensorBoard
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
%load_ext tensorboard
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
#@title
import collections
from matplotlib import pyplot as plt
from IPython.display import display, HTML, IFrame
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
np.random.seed(0)
def greetings():
display(HTML('<b><font size="6" color="#ff00f4">Greetings, virtual tutorial participants!</font></b>'))
return True
l = tff.federated_computation(greetings)()
# Code for loading federated data from TFF repository
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
len(emnist_train.client_ids)
# Let's look at the shape of our data
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
example_dataset.element_spec
# Let's select an example dataset from one of our simulated clients
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
# Your code to get an example element from one client:
example_element = next(iter(example_dataset))
example_element['label'].numpy()
plt.imshow(example_element['pixels'].numpy(), cmap='gray', aspect='equal')
plt.grid(False)
_ = plt.show()
## Example MNIST digits for one client
f = plt.figure(figsize=(20,4))
j = 0
for e in example_dataset.take(40):
plt.subplot(4, 10, j+1)
plt.imshow(e['pixels'].numpy(), cmap='gray', aspect='equal')
plt.axis('off')
j += 1
# Number of examples per layer for a sample of clients
f = plt.figure(figsize=(12,7))
f.suptitle("Label Counts for a Sample of Clients")
for i in range(6):
ds = emnist_train.create_tf_dataset_for_client(emnist_train.client_ids[i])
k = collections.defaultdict(list)
for e in ds:
k[e['label'].numpy()].append(e['label'].numpy())
plt.subplot(2, 3, i+1)
plt.title("Client {}".format(i))
for j in range(10):
plt.hist(k[j], density=False, bins=[0,1,2,3,4,5,6,7,8,9,10])
# Let's play around with the emnist_train dataset.
# Let's explore the non-iid charateristic of the example data.
for i in range(5):
ds = emnist_train.create_tf_dataset_for_client(emnist_train.client_ids[i])
k = collections.defaultdict(list)
for e in ds:
k[e['label'].numpy()].append(e['pixels'].numpy())
f = plt.figure(i, figsize=(12,5))
f.suptitle("Client #{}'s Mean Image Per Label".format(i))
for j in range(10):
mn_img = np.mean(k[j],0)
plt.subplot(2, 5, j+1)
plt.imshow(mn_img.reshape((28,28)))#,cmap='gray')
plt.axis('off')
# Each client has different mean images -- each client will be nudging the model
# in their own directions.
NUM_CLIENTS = 10
NUM_EPOCHS = 5
BATCH_SIZE = 20
SHUFFLE_BUFFER = 100
PREFETCH_BUFFER=10
def preprocess(dataset):
def batch_format_fn(element):
Flatten a batch `pixels` and return the features as an `OrderedDict`.
return collections.OrderedDict(
x=tf.reshape(element['pixels'], [-1, 784]),
y=tf.reshape(element['label'], [-1, 1]))
return dataset.repeat(NUM_EPOCHS).shuffle(SHUFFLE_BUFFER).batch(
BATCH_SIZE).map(batch_format_fn).prefetch(PREFETCH_BUFFER)
preprocessed_example_dataset = preprocess(example_dataset)
sample_batch = tf.nest.map_structure(lambda x: x.numpy(),
next(iter(preprocessed_example_dataset)))
sample_batch
def make_federated_data(client_data, client_ids):
return [
preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids
]
sample_clients = emnist_train.client_ids[0:NUM_CLIENTS]
# Your code to get the federated dataset here for the sampled clients:
federated_train_data = make_federated_data(emnist_train, sample_clients)
print('Number of client datasets: {l}'.format(l=len(federated_train_data)))
print('First dataset: {d}'.format(d=federated_train_data[0]))
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
## Centralized training with keras ---------------------------------------------
# This is separate from the TFF tutorial, and demonstrates how to train a
# Keras model in a centralized fashion (contrasting training in a federated env)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
# Preprocess the data (these are NumPy arrays)
x_train = x_train.reshape(60000, 784).astype("float32") / 255
y_train = y_train.astype("float32")
mod = create_keras_model()
mod.compile(
optimizer=tf.keras.optimizers.RMSprop(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
h = mod.fit(
x_train,
y_train,
batch_size=64,
epochs=2
)
# ------------------------------------------------------------------------------
def model_fn():
# We _must_ create a new model here, and _not_ capture it from an external
# scope. TFF will call this within different graph contexts.
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
iterative_process = tff.learning.build_federated_averaging_process(
model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
# Add server optimizer here!
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
state = iterative_process.initialize()
# Run one single round of training.
state, metrics = iterative_process.next(state, federated_train_data)
print('round 1, metrics={}'.format(metrics['train']))
NUM_ROUNDS = 11
for round_num in range(2, NUM_ROUNDS):
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics['train']))
#@test {"skip": true}
import os
import shutil
logdir = "/tmp/logs/scalars/training/"
if os.path.exists(logdir):
shutil.rmtree(logdir)
# Your code to create a summary writer:
summary_writer = tf.summary.create_file_writer(logdir)
state = iterative_process.initialize()
#@test {"skip": true}
with summary_writer.as_default():
for round_num in range(1, NUM_ROUNDS):
state, metrics = iterative_process.next(state, federated_train_data)
for name, value in metrics['train'].items():
tf.summary.scalar(name, value, step=round_num)
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
# Construct federated evaluation computation here:
evaluation = tff.learning.build_federated_evaluation(model_fn)
import random
shuffled_ids = emnist_test.client_ids.copy()
random.shuffle(shuffled_ids)
sample_clients = shuffled_ids[0:NUM_CLIENTS]
federated_test_data = make_federated_data(emnist_test, sample_clients)
len(federated_test_data), federated_test_data[0]
# Run evaluation on the test data here, using the federated model produced from
# training:
test_metrics = evaluation(state.model, federated_test_data)
str(test_metrics)
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
NUM_CLIENTS = 10
BATCH_SIZE = 20
def preprocess(dataset):
def batch_format_fn(element):
Flatten a batch of EMNIST data and return a (features, label) tuple.
return (tf.reshape(element['pixels'], [-1, 784]),
tf.reshape(element['label'], [-1, 1]))
return dataset.batch(BATCH_SIZE).map(batch_format_fn)
client_ids = np.random.choice(emnist_train.client_ids, size=NUM_CLIENTS, replace=False)
federated_train_data = [preprocess(emnist_train.create_tf_dataset_for_client(x))
for x in client_ids
]
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
def model_fn():
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=federated_train_data[0].element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
def initialize_fn():
model = model_fn()
return model.weights.trainable
def next_fn(server_weights, federated_dataset):
# Broadcast the server weights to the clients.
server_weights_at_client = broadcast(server_weights)
# Each client computes their updated weights.
client_weights = client_update(federated_dataset, server_weights_at_client)
# The server averages these updates.
mean_client_weights = mean(client_weights)
# The server updates its model.
server_weights = server_update(mean_client_weights)
return server_weights
@tf.function
def client_update(model, dataset, server_weights, client_optimizer):
Performs training (using the server model weights) on the client's dataset.
# Initialize the client model with the current server weights.
client_weights = model.weights.trainable
# Assign the server weights to the client model.
tf.nest.map_structure(lambda x, y: x.assign(y),
client_weights, server_weights)
# Use the client_optimizer to update the local model.
for batch in dataset:
with tf.GradientTape() as tape:
# Compute a forward pass on the batch of data
outputs = model.forward_pass(batch)
# Compute the corresponding gradient
grads = tape.gradient(outputs.loss, client_weights)
grads_and_vars = zip(grads, client_weights)
# Apply the gradient using a client optimizer.
client_optimizer.apply_gradients(grads_and_vars)
return client_weights
@tf.function
def server_update(model, mean_client_weights):
Updates the server model weights as the average of the client model weights.
model_weights = model.weights.trainable
# Assign the mean client weights to the server model.
tf.nest.map_structure(lambda x, y: x.assign(y),
model_weights, mean_client_weights)
return model_weights
federated_float_on_clients = tff.type_at_clients(tf.float32)
str(federated_float_on_clients)
@tff.federated_computation(tff.type_at_clients(tf.float32))
def get_average_temperature(client_temperatures):
return tff.federated_mean(client_temperatures)
str(get_average_temperature.type_signature)
get_average_temperature([68.5, 70.3, 69.8])
@tff.tf_computation(tf.float32)
def add_half(x):
return tf.add(x, 0.5)
str(add_half.type_signature)
@tff.federated_computation(tff.type_at_clients(tf.float32))
def add_half_on_clients(x):
return tff.federated_map(add_half, x)
str(add_half_on_clients.type_signature)
@tff.tf_computation
def server_init():
model = model_fn()
return model.weights.trainable
@tff.federated_computation
def initialize_fn():
return tff.federated_value(server_init(), tff.SERVER)
whimsy_model = model_fn()
tf_dataset_type = tff.SequenceType(whimsy_model.input_spec)
str(tf_dataset_type)
model_weights_type = server_init.type_signature.result
str(model_weights_type)
@tff.tf_computation(tf_dataset_type, model_weights_type)
def client_update_fn(tf_dataset, server_weights):
model = model_fn()
client_optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
return client_update(model, tf_dataset, server_weights, client_optimizer)
@tff.tf_computation(model_weights_type)
def server_update_fn(mean_client_weights):
model = model_fn()
return server_update(model, mean_client_weights)
federated_server_type = tff.type_at_server(model_weights_type)
federated_dataset_type = tff.type_at_clients(tf_dataset_type)
@tff.federated_computation(federated_server_type, federated_dataset_type)
def next_fn(server_weights, federated_dataset):
# Broadcast the server weights to the clients.
server_weights_at_client = tff.federated_broadcast(server_weights)
# Each client computes their updated weights.
client_weights = tff.federated_map(
client_update_fn, (federated_dataset, server_weights_at_client))
# The server averages these updates.
mean_client_weights = tff.federated_mean(client_weights)
# The server updates its model.
server_weights = tff.federated_map(server_update_fn, mean_client_weights)
return server_weights
federated_algorithm = tff.templates.IterativeProcess(
initialize_fn=initialize_fn,
next_fn=next_fn
)
str(federated_algorithm.initialize.type_signature)
str(federated_algorithm.next.type_signature)
central_emnist_test = emnist_test.create_tf_dataset_from_all_clients().take(1000)
central_emnist_test = preprocess(central_emnist_test)
def evaluate(server_state):
keras_model = create_keras_model()
keras_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
keras_model.set_weights(server_state)
keras_model.evaluate(central_emnist_test)
server_state = federated_algorithm.initialize()
evaluate(server_state)
for round in range(15):
server_state = federated_algorithm.next(server_state, federated_train_data)
evaluate(server_state)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Opgave 2. Nu je de programma's hebt om de conversie van een willekeurig talstelsel
Step2: Opgave 3. Pas het programma uit opgave 2 aan zodat de gebruiker de beide bases
|
<ASSISTANT_TASK:>
Python Code:
number = "3DB"
base = 16
result = 0
digits = "0123456789ABCDEF"
power = 0
for digit in reversed(number):
result += digits.index(digit) * base**power
power += 1
print("Resultaat: {}".format(tgt_number))
# Van collega Peter kreeg ik een mooie recursieve oplossing
# die ik jullie toch niet wil onthouden.
# In periode 3 gaan we recursieve definities bekijken,
# maar hier vast een voorproefje.
# Kun je achterhalen waarom dit werkt?
# To understand recursion you gotta understand recursion
def base2dec(number, base):
digits = "0123456789ABCDEF"
if number < base:
return digits[number]
else:
return base2dec(number // base, base) + digits[number % base]
print(base2dec(3021, 16))
# To understand recursion you gotta understand recursion
def dec2base(number, base):
digits = "0123456789ABCDEF"
if len(number) > 0:
digit = number.pop(0)
position = digits.index(digit)
power = len(number)
result = position * base**power
return result + dec2base(number, base)
else:
return 0
print(dec2base("BCD", 16))
org_base = 8
org_number = "4607"
tgt_base = 16
digits = "0123456789ABCDEF"
## converteer org_number (basis org_base) naar dec_number (10-tallig)
dec_number = 0
power = 0
for digit in reversed(org_number):
dec_number += digits.index(digit) * org_base**power
power += 1
## converteer dec_number (10-tallig) naar tgt_number (basis tgt_base)
tgt_number = ""
while dec_number > 0:
remainder = dec_number % tgt_base
tgt_number = str(remainder) + tgt_number
dec_number = dec_number // tgt_base
## druk tgt_number af
print("Resultaat: {}".format(tgt_number))
org_base = int(input("Geef het talstelsel (2..16) voor het originele getal: "))
org_number = input("Geef het originele getal: ")
tgt_base = int(input("Geef het gewenste talstelsel: "))
digits = "0123456789ABCDEF"
## converteer org_number (basis org_base) naar dec_number (10-tallig)
dec_number = 0
power = 0
for digit in reversed(org_number):
dec_number += digits.index(digit) * org_base**power
power += 1
## converteer dec_number (10-tallig) naar tgt_number (basis tgt_base)
tgt_number = ""
while dec_number > 0:
remainder = dec_number % tgt_base
tgt_number = str(remainder) + tgt_number
dec_number = dec_number // tgt_base
## druk tgt_number af
print("Resultaat: {}".format(tgt_number))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Read epochs for the channel of interest
Step3: Find the FieldTrip neighbor definition to setup sensor connectivity
Step4: Compute permutation statistic
Step5: Note. The same functions work with source estimate. The only differences
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.viz import plot_topomap
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import find_ch_connectivity
from mne.viz import plot_compare_evokeds
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id)
X = [epochs[k].get_data() for k in event_id] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
connectivity, ch_names = find_ch_connectivity(epochs.info, ch_type='mag')
print(type(connectivity)) # it's a sparse matrix!
plt.imshow(connectivity.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.01
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=1, buffer_size=None,
connectivity=connectivity)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
# configure variables for visualization
colors = {"Aud": "crimson", "Vis": 'steelblue'}
linestyles = {"L": '-', "R": '--'}
# get sensor positions via layout
pos = mne.find_layout(epochs.info).pos
# organize data for plotting
evokeds = {cond: epochs[cond].average() for cond in event_id}
# loop over clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster information, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at the sensors contributing to the cluster
sig_times = epochs.times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
# plot average test statistic and mark significant sensors
image, _ = plot_topomap(f_map, pos, mask=mask, axes=ax_topo, cmap='Reds',
vmin=np.min, vmax=np.max, show=False)
# create additional axes (for ERF and colorbar)
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel(
'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))
if len(ch_inds) > 1:
title += "s (mean)"
plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals,
colors=colors, linestyles=linestyles, show=False,
split_legend=True, truncate_yaxis='auto')
# plot temporal cluster extent
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.