markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
Load training history
To generate the models and training history used in this notebook, run the following commands:
./train.py --seed 1 --task repeat-copy --checkpoint_interval 500
./train.py --seed 10 --task repeat-copy --checkpoint_interval 500
./train.py --seed 100 --task repeat-copy --checkpoint_interval 500
./train.py --seed 1000 --task repeat-copy --checkpoint_interval 500
|
batch_num = 120000
files = glob("./repeat-copy/*-{}.json".format(batch_num))
files
# Read the metrics from the .json files
history = [json.loads(open(fname, "rt").read()) for fname in files]
training = np.array([(x['cost'], x['loss'], x['seq_lengths']) for x in history])
print("Training history (seed x metric x sequence) =", training.shape)
# Average every dv values across each (seed, metric)
dv = 2000
training = training.reshape(len(files), 3, -1, dv).mean(axis=3)
print(training.shape)
# Average the seeds
training_mean = training.mean(axis=0)
training_std = training.std(axis=0)
print(training_mean.shape)
print(training_std.shape)
fig = plt.figure(figsize=(14, 5))
# X axis is normalized to thousands
x = np.arange(dv / 1000, (batch_num / 1000) + (dv / 1000), dv / 1000)
# Plot the cost
# plt.plot(x, training_mean[0], 'o-', linewidth=2, label='Cost')
plt.errorbar(x, training_mean[0], yerr=training_std[0], fmt='o-', elinewidth=2, linewidth=2, label='Cost')
plt.grid()
plt.yticks(np.arange(0, training_mean[0][0]+10, 10))
plt.ylabel('Cost per sequence (bits)')
plt.xlabel('Sequence (thousands)')
plt.title('Training Convergence', fontsize=16)
ax = plt.axes([.57, .55, .25, .25], facecolor=(0.97, 0.97, 0.97))
plt.title("BCELoss")
plt.plot(x, training_mean[1], 'r-', label='BCE Loss')
plt.yticks(np.arange(0, training_mean[1][0]+0.2, 0.2))
plt.grid()
plt.show()
loss = history[0]['loss']
cost = history[0]['cost']
seq_lengths = history[0]['seq_lengths']
unique_sls = set(seq_lengths)
all_metric = list(zip(range(1, batch_num+1), seq_lengths, loss, cost))
fig = plt.figure(figsize=(12, 5))
plt.ylabel('Cost per sequence (bits)')
plt.xlabel('Iteration (thousands)')
plt.title('Training Convergence (Per Sequence Length)', fontsize=16)
for sl in unique_sls:
sl_metrics = [i for i in all_metric if i[1] == sl]
x = [i[0] for i in sl_metrics]
y = [i[3] for i in sl_metrics]
num_pts = len(x) // 50
total_pts = num_pts * 50
x_mean = [i.mean()/1000 for i in np.split(np.array(x)[:total_pts], num_pts)]
y_mean = [i.mean() for i in np.split(np.array(y)[:total_pts], num_pts)]
plt.plot(x_mean, y_mean, label='Seq-{}'.format(sl))
plt.yticks(np.arange(0, 80, 5))
plt.legend(loc=0)
plt.show()
|
notebooks/repeat-copy-task-plots.ipynb
|
loudinthecloud/pytorch-ntm
|
bsd-3-clause
|
Evaluate
|
import torch
from IPython.display import Image as IPythonImage
from PIL import Image, ImageDraw, ImageFont
import io
from tasks.repeatcopytask import dataloader
from train import evaluate
from tasks.repeatcopytask import RepeatCopyTaskModelTraining
model = RepeatCopyTaskModelTraining()
model.net.load_state_dict(torch.load("./repeat-copy/repeat-copy-task-10-batch-120000.model"))
def cmap(value):
pixval = value * 255
low = 64
high = 240
factor = (255 - low - (255-high)) / 255
return int(low + pixval * factor)
def draw_sequence(y, u=12):
seq_len = y.size(0)
seq_width = y.size(2)
inset = u // 8
pad = u // 2
width = seq_len * u + 2 * pad
height = seq_width * u + 2 * pad
im = Image.new('L', (width, height))
draw = ImageDraw.ImageDraw(im)
draw.rectangle([0, 0, width, height], fill=250)
for i in range(seq_len):
for j in range(seq_width):
val = 1 - y[i, 0, j].data[0]
draw.rectangle([pad + i*u + inset,
pad + j*u + inset,
pad + (i+1)*u - inset,
pad + (j+1)*u - inset], fill=cmap(val))
return im
def im_to_png_bytes(im):
png = io.BytesIO()
im.save(png, 'PNG')
return bytes(png.getbuffer())
def im_vconcat(im1, im2, pad=8):
assert im1.size == im2.size
w, h = im1.size
width = w
height = h * 2 + pad
im = Image.new('L', (width, height), color=255)
im.paste(im1, (0, 0))
im.paste(im2, (0, h+pad))
return im
def make_eval_plot(y, y_out, u=12):
im_y = draw_sequence(y, u)
im_y_out = draw_sequence(y_out, u)
im = im_vconcat(im_y, im_y_out, u//2)
w, h = im.size
pad_w = u * 7
im2 = Image.new('L', (w+pad_w, h), color=255)
im2.paste(im, (pad_w, 0))
# Add text
font = ImageFont.truetype("./fonts/PT_Sans-Web-Regular.ttf", 13)
draw = ImageDraw.ImageDraw(im2)
draw.text((u,4*u), "Targets", font=font)
draw.text((u,13*u), "Outputs", font=font)
return im2
def visualize(model, seq_len, max_reps):
seq_len = 8
_, x, y = next(iter(dataloader(1, 1, 8, seq_len, seq_len, 1, max_reps)))
result = evaluate(model.net, model.criterion, x, y)
y_out = result['y_out']
cost = result['cost']
inp_im = draw_sequence(x, u=10)
eval_im = make_eval_plot(y, y_out, u=10)
return inp_im, eval_im, cost
inp_im, eval_im, cost = visualize(model, 8, 10)
print("Cost:", cost)
IPythonImage(im_to_png_bytes(inp_im))
IPythonImage(im_to_png_bytes(eval_im))
|
notebooks/repeat-copy-task-plots.ipynb
|
loudinthecloud/pytorch-ntm
|
bsd-3-clause
|
Create an animated GIF
Lets see how the prediction looks like in each checkpoint that we saved.
|
seq_len = 10
_, x, y = next(iter(dataloader(1, 1, 8, seq_len, seq_len, 1, 10)))
frames = []
font = ImageFont.truetype("./fonts/PT_Sans-Web-Regular.ttf", 13)
for batch_num in range(500, 20500, 500):
model = RepeatCopyTaskModelTraining()
model.net.load_state_dict(torch.load("./repeat-copy/repeat-copy-task-10-batch-{}.model".format(batch_num)))
result = evaluate(model.net, model.criterion, x, y)
y_out = result['y_out']
frame = make_eval_plot(y, y_out, u=10)
w, h = frame.size
frame_seq = Image.new('L', (w, h+40), color=255)
frame_seq.paste(frame, (0, 40))
draw = ImageDraw.ImageDraw(frame_seq)
draw.text((10, 10), "Sequence Num: {} (Cost: {})".format(batch_num, result['cost']), font=font)
frames += [frame_seq]
im = frames[0]
im.save("./repeat-copy-train-10.gif", save_all=True, append_images=frames[1:], loop=0, duration=750)
|
notebooks/repeat-copy-task-plots.ipynb
|
loudinthecloud/pytorch-ntm
|
bsd-3-clause
|
Explore the Data
Play around with view_sentence_range to view different parts of the data.
|
view_sentence_range = (30, 40)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
|
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
# Split text into sentences
source_sentences, target_sentences = source_text.split('\n'), target_text.split('\n')
# Split sentences into words and convert to integers
source_id_text = [[source_vocab_to_int[word] for word in sentence.split()] for sentence in source_sentences]
target_id_text = [[target_vocab_to_int[word] for word in sentence.split()] for sentence in target_sentences]
# Append <EOS> to all target sentences
for sentence in target_id_text:
sentence.append(target_vocab_to_int['<EOS>'])
return source_id_text, target_id_text
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoder_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Target sequence length placeholder named "target_sequence_length" with rank 1
Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0.
Source sequence length placeholder named "source_sequence_length" with rank 1
Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length)
|
def model_inputs():
"""
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
"""
input_ = tf.placeholder(tf.int32, [None, None], name='input')
target = tf.placeholder(tf.int32, [None, None])
learning_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
target_sequence_length = tf.placeholder(tf.int32, [None], name='target_sequence_length')
max_target_len = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, [None], name='source_sequence_length')
return input_, target, learning_rate, keep_prob, target_sequence_length, max_target_len, source_sequence_length
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Process Decoder Input
Implement process_decoder_input by removing the last word id from each batch in target_data and concat the GO ID to the begining of each batch.
|
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# Code taken from https://github.com/udacity/deep-learning/blob/master/seq2seq/sequence_to_sequence_implementation.ipynb
# Slice out the last column (will remove the last word ids in each batch)
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
# Fill a new vector column with <GO> tags
go_fill = tf.fill([batch_size, 1], target_vocab_to_int['<GO>'])
# Concatenate the <GO> fill vector to the beginning of the batches
preprocessed = tf.concat([go_fill, ending], 1)
return preprocessed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_encoding_input(process_decoder_input)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Encoding
Implement encoding_layer() to create a Encoder RNN layer:
* Embed the encoder input using tf.contrib.layers.embed_sequence
* Construct a stacked tf.contrib.rnn.LSTMCell wrapped in a tf.contrib.rnn.DropoutWrapper
* Pass cell and embedded input to tf.nn.dynamic_rnn()
|
from imp import reload
reload(tests)
def build_cell(lstm_size, keep_prob):
"""
Build a basic LSTM cell with dropout
:param lstm_size: Number of LSTM units
:param keep_prob: Dropout keep value
"""
# initializer = tf.random_uniform_initializer(-0.1, 0.1, seed=100)
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
"""
# Create the embedding layer and LSTM layer(s)
embed = tf.contrib.layers.embed_sequence(
rnn_inputs, vocab_size=source_vocab_size, embed_dim=encoding_embedding_size)
cell = tf.contrib.rnn.MultiRNNCell([build_cell(rnn_size, keep_prob) for _ in range(num_layers)])
# Join the embedding/LSTM layers together
outputs, final_state = tf.nn.dynamic_rnn(cell, embed, sequence_length=source_sequence_length, dtype=tf.float32)
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Decoding - Training
Create a training decoding layer:
* Create a tf.contrib.seq2seq.TrainingHelper
* Create a tf.contrib.seq2seq.BasicDecoder
* Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode
|
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
"""
# The training decoding layer takes training examples as input
helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length)
decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, encoder_state, output_layer=output_layer)
final_outputs, final_state = tf.contrib.seq2seq.dynamic_decode(
decoder, impute_finished=True, maximum_iterations=max_summary_length)
return final_outputs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Decoding - Inference
Create inference decoder:
* Create a tf.contrib.seq2seq.GreedyEmbeddingHelper
* Create a tf.contrib.seq2seq.BasicDecoder
* Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode
|
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
"""
# Broadcast the start sequence id to a list of start tokens, one for each batch
start_tokens = tf.tile(tf.constant([start_of_sequence_id]), [batch_size])
# The inference decoding layer feeds takes its output as input
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, start_tokens, end_of_sequence_id)
decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, encoder_state, output_layer=output_layer)
final_outputs, final_state = tf.contrib.seq2seq.dynamic_decode(
decoder, impute_finished=True, maximum_iterations=max_target_sequence_length)
return final_outputs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Embed the target sequences
Construct the decoder LSTM cell (just like you constructed the encoder cell above)
Create an output layer to map the outputs of the decoder to the elements of our vocabulary
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
|
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
"""
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# Create the embedding layer; note that an embedding lookup is passed to the training decoder,
# while the whole embedding is passed to the inference decoder (pass the output back into itself)
dec_embedding = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size], -1, 1))
dec_embed = tf.nn.embedding_lookup(dec_embedding, dec_input)
dec_cell = tf.contrib.rnn.MultiRNNCell([build_cell(rnn_size, keep_prob) for _ in range(num_layers)])
# Define a dense layer for both training and inference decoders, outputs vocabulary
kernel_initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.1)
output_layer = Dense(target_vocab_size, kernel_initializer=kernel_initializer)
with tf.variable_scope('decode') as decoding_scope:
# Training decoder
train_output = decoding_layer_train(
encoder_state, dec_cell, dec_embed, target_sequence_length,
max_target_sequence_length, output_layer, keep_prob)
# Reuse the same components from the training decoder
decoding_scope.reuse_variables()
# Inference decoder
infer_output = decoding_layer_infer(
encoder_state, dec_cell, dec_embedding, target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'], max_target_sequence_length,
target_vocab_size, output_layer, batch_size, keep_prob)
return train_output, infer_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Build the Neural Network
Apply the functions you implemented above to:
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size).
Process target data using your process_decoder_input(target_data, target_vocab_to_int, batch_size) function.
Decode the encoded input using your decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) function.
|
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# Pass input through the encoder, retrieving the state (discard output)
_, encoder_state = encoding_layer(
input_data, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size, enc_embedding_size)
# Preprocess the target input for the training decoder
dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
# Decode encoded input
train_output, infer_output = decoding_layer(
dec_input, encoder_state, target_sequence_length,
max_target_sentence_length, rnn_size, num_layers,
target_vocab_to_int, target_vocab_size, batch_size,
keep_prob, dec_embedding_size)
return train_output, infer_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
Set display_step to state how many steps between each debug output statement
|
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 3
# Embedding Size
encoding_embedding_size = 128
decoding_embedding_size = 128
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.7
display_step = 50
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
|
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
sentence_lower = [word.lower() for word in sentence.split()]
sentence_int = [
vocab_to_int[word] if word in vocab_to_int
else vocab_to_int['<UNK>']
for word in sentence_lower
]
return sentence_int
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Translate
This will translate translate_sentence from English to French.
|
# translate_sentence = 'he saw a old yellow truck .'
translate_sentence = sentences[10]
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
|
language-translation/dlnd_language_translation.ipynb
|
Hyperparticle/deep-learning-foundation
|
mit
|
Randomly select 20% of the samples as test set.
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
part-1.ipynb
|
JungeAlexander/dl
|
mit
|
Using cross-validation, try out $d=1,2,\ldots,20$.
Use accuracy to determine the train/test error.
|
parameters = {'degree':list(range(1, 21))}
svc = svm.SVC(kernel='poly')
clf = GridSearchCV(svc, parameters, scoring='accuracy')
clf.fit(X_train, y_train)
|
part-1.ipynb
|
JungeAlexander/dl
|
mit
|
The cross-validation results can be loaded into a pandas DataFrame. We see that the model starts overfitting for polynomial degrees $>3$.
|
pd.DataFrame(clf.cv_results_)
|
part-1.ipynb
|
JungeAlexander/dl
|
mit
|
Finally, train the model with lowest mean test error in cross-validation on all training data and determine the error on the test set.
|
e = clf.estimator.fit(X_train, y_train)
e
y_pred = e.predict(X_test)
accuracy_score(y_test, y_pred)
|
part-1.ipynb
|
JungeAlexander/dl
|
mit
|
We will create a simple velocity model here by hand for demonstration purposes. This model essentially consists of three layers, each with a different velocity: 1.5km/s in the top layer, 2.5km/s in the middle layer and 4.5 km/s in the bottom layer.
|
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Model, plot_velocity
shape = (301, 501) # Number of grid point (nx, ny, nz)
spacing = (10., 10) # Grid spacing in m. The domain size is now 3km by 5km
origin = (0., 0) # What is the location of the top left corner.
# Define a velocity profile. The velocity is in km/s
v = np.empty(shape, dtype=np.float32)
v[:,:100] = 1.5
v[:,100:350] = 2.5
v[:,350:] = 4.5
# With the velocity and model size defined, we can create the seismic model that
# encapsulates these properties. We also define the size of the absorbing layer as 10 grid points
model = Model(vp=v, origin=origin, shape=shape, spacing=spacing, space_order=4, nbl=40, bcs="damp")
plot_velocity(model)
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
Next we define the positioning and the wave signal of our source, as well as the location of our receivers. To generate the wavelet for our sources we require the discretized values of time that we are going to use to model a multiple "shot", which depends on the grid spacing used in our model. We will use one source and eleven receivers. The source is located in the position (550, 20). The receivers start at (550, 20) with an even horizontal spacing of 100m at consistent depth.
|
from examples.seismic import TimeAxis
t0 = 0. # Simulation starts a t=0
tn = 2400. # Simulation last 2.4 second (2400 ms)
dt = model.critical_dt # Time step from model grid spacing
time_range = TimeAxis(start=t0, stop=tn, step=dt)
nrcv = 250 # Number of Receivers
#NBVAL_IGNORE_OUTPUT
from examples.seismic import RickerSource
f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)
src = RickerSource(name='src', grid=model.grid, f0=f0,
npoint=1, time_range=time_range)
# Define the wavefield with the size of the model and the time dimension
u = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=4)
# We can now write the PDE
pde = model.m * u.dt2 - u.laplace + model.damp * u.dt
stencil = Eq(u.forward, solve(pde, u.forward))
src.coordinates.data[:, 0] = 400 # Source coordinates
src.coordinates.data[:, -1] = 20. # Depth is 20m
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Receiver
rec = Receiver(name='rec', grid=model.grid, npoint=nrcv, time_range=time_range)
rec.coordinates.data[:,0] = np.linspace(src.coordinates.data[0, 0], model.domain_size[0], num=nrcv)
rec.coordinates.data[:,-1] = 20. # Depth is 20m
# Finally we define the source injection and receiver read function to generate the corresponding code
src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)
# Create interpolation expression for receivers
rec_term = rec.interpolate(expr=u.forward)
op = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)
op(time=time_range.num-1, dt=model.critical_dt)
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
How we are modelling a horizontal layers, we will group this traces and made a NMO correction using this set traces.
|
offset = []
data = []
for i, coord in enumerate(rec.coordinates.data):
off = (src.coordinates.data[0, 0] - coord[0])
offset.append(off)
data.append(rec.data[:,i])
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
Auxiliary function for plotting traces:
|
#NBVAL_IGNORE_OUTPUT
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
mpl.rc('font', size=16)
mpl.rc('figure', figsize=(8, 6))
def plot_traces(rec, xb, xe, t0, tn, colorbar=True):
scale = np.max(rec)/100
extent = [xb, xe, 1e-3*tn, t0]
plot = plt.imshow(rec, cmap=cm.gray, vmin=-scale, vmax=scale, extent=extent)
plt.xlabel('X position (km)')
plt.ylabel('Time (s)')
# Create aligned colorbar on the right
if colorbar:
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(plot, cax=cax)
plt.show()
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
Common Midpoint Gather
At this point, we have a dataset composed of the receivers. "If our model wasn't purely horizontal, we would have to sort these traces by common midpoints prior to NMO correction."
|
plot_traces(np.transpose(data), rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
NMO Correction
We can correct the measured traveltime of a reflected wave $t$ at a given offset $x$ to obtain the traveltime at normal incidence $t_0$ by applying the following equation:
\begin{equation}
t = \sqrt{t_0^2 + \frac{x^2}{V_{nmo}^2}}
\end{equation}
in which $V_{nmo}$ is the NMO velocity. This equation results from the Pythagorean theorem, and is only valid for horizontal reflectors. There are variants of this equation with different degrees of accuracy, but we'll use this one for simplicity.
For the NMO Correction we use a grid of size samples x traces.
|
ns = time_range.num # Number of samples in each trace
grid = Grid(shape=(ns, nrcv)) # Construction of grid with samples X traces dimension
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
In this example we will use a constant velocity guide. The guide will be arranged in a SparseFunction with the number of points equal to number of samples in the traces.
|
vnmo = 1500
vguide = SparseFunction(name='v', grid=grid, npoint=ns)
vguide.data[:] = vnmo
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
The computed offset for each trace will be arraged in another SparseFunction with number of points equal to number of traces.
|
off = SparseFunction(name='off', grid=grid, npoint=nrcv)
off.data[:] = offset
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
The previous modelled traces will be arranged in a SparseFunction with the same dimensions as the grid.
|
amps = SparseFunction(name='amps', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
amps.data[:] = np.transpose(data)
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
Now, we define SparseFunctions with the same dimensions as the grid, describing the NMO traveltime equation. The $t_0$ SparseFunction isn't offset dependent, so the number of points is equal to the number of samples.
|
sample, trace = grid.dimensions
t_0 = SparseFunction(name='t0', grid=grid, npoint=ns, dimensions=[sample], shape=[grid.shape[0]])
tt = SparseFunction(name='tt', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
snmo = SparseFunction(name='snmo', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
s = SparseFunction(name='s', grid=grid, dtype=np.intc, npoint=ns*nrcv, dimensions=grid.dimensions,
shape=grid.shape)
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
The Equation relates traveltimes: the one we can measure ($t_0$) and the one we want to know (t). But the data in our CMP gather are actually a matrix of amplitudes measured as a function of time ($t_0$) and offset. Our NMO-corrected gather will also be a matrix of amplitudes as a function of time (t) and offset. So what we really have to do is transform one matrix of amplitudes into the other.
With Equations we describe the NMO traveltime equation, and use the Operator to compute the traveltime and the samples for each trace.
|
#NBVAL_IGNORE_OUTPUT
dtms = model.critical_dt/1000 # Time discretization in ms
E1 = Eq(t_0, sample*dtms)
E2 = Eq(tt, sp.sqrt(t_0**2 + (off[trace]**2)/(vguide[sample]**2) ))
E3 = Eq(s, sp.floor(tt/dtms))
op1 = Operator([E1, E2, E3])
op1()
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
With the computed samples, we remove all that are out of the samples range, and shift the amplitude for the correct sample.
|
#NBVAL_IGNORE_OUTPUT
s.data[s.data >= time_range.num] = 0
E4 = Eq(snmo, amps[s[sample, trace], trace])
op2 = Operator([E4])
op2()
stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!!
plot_traces(snmo.data, rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)
|
examples/seismic/tutorials/10_nmo_correction.ipynb
|
opesci/devito
|
mit
|
Certainly seems so. That one is a little strange. If x and y are strings, x + y runs in O(len(y)).
Out of curiosity, let's check the bytes type.
|
counts = [int(x) for x in np.logspace(3, 5)]
@benchmark(counts)
def accumulate_bytes(n):
s = b''
for i in range(n):
s += b'a'
@benchmark(counts)
def join_bytes(n):
l = []
for i in range(n):
l.append(b'a')
b''.join(l)
plt.plot(counts, accumulate_bytes())
plt.plot(counts, join_bytes())
plt.legend(['accumulate', 'join'], loc='upper left')
counts = [int(x) for x in np.logspace(3, 6)]
@benchmark(counts)
def accumulate_1ref(n):
s = ''
for i in range(n):
s += 'a'
@benchmark(counts)
def accumulate_2ref(n):
s = ''
blah = s
q = s
for i in range(n):
s += 'a'
plt.plot(counts, accumulate_1ref())
plt.plot(counts, accumulate_2ref())
plt.legend(['1ref', '2ref'], loc='best')
|
ipy/str-join-benchmark.ipynb
|
noammor/noammor.github.io
|
apache-2.0
|
Visualize the process using plt.plot with t on the x-axis and W(t) on the y-axis. Label your x and y axes.
|
plt.plot(t, W)
plt.xlabel('time')
plt.ylabel('W(t)')
assert True # this is for grading
|
Numpy/NumpyEx03.ipynb
|
JAmarel/Phys202
|
mit
|
Use np.diff to compute the changes at each step of the motion, dW, and then compute the mean and standard deviation of those differences.
|
dW = np.diff(W)
mean = dW.mean()
std = dW.std()
print "mean: " + str(mean)
print "std: " + str(std)
assert len(dW)==len(W)-1
assert dW.dtype==np.dtype(float)
|
Numpy/NumpyEx03.ipynb
|
JAmarel/Phys202
|
mit
|
Write a function that takes $W(t)$ and converts it to geometric Brownian motion using the equation:
$$
X(t) = X_0 e^{((\mu - \sigma^2/2)t + \sigma W(t))}
$$
Use Numpy ufuncs and no loops in your function.
|
def geo_brownian(t, W, X0, mu, sigma):
"Return X(t) for geometric brownian motion with drift mu, volatility sigma."""
# This returns an array for every value of x(t) at a specific
return X0*np.exp(((mu-.5*sigma**2)*t)+sigma*W)
assert True # leave this for grading
|
Numpy/NumpyEx03.ipynb
|
JAmarel/Phys202
|
mit
|
Use your function to simulate geometric brownian motion, $X(t)$ for $X_0=1.0$, $\mu=0.5$ and $\sigma=0.3$ with the Wiener process you computed above.
Visualize the process using plt.plot with t on the x-axis and X(t) on the y-axis. Label your x and y axes.
|
geo_brownian(t, W, 1, .5, .3)
plt.plot(t, geo_brownian(t, W, 1, .5, .3))
plt.xlabel('time')
plt.ylabel('X(t)')
assert True # leave this for grading
|
Numpy/NumpyEx03.ipynb
|
JAmarel/Phys202
|
mit
|
Creating ROOT file using root_numpy
|
import numpy
import root_numpy
# generating random data
data = numpy.random.normal(size=[10000, 2])
# adding names of columns
data = data.view([('first', float), ('second', float)])
#
root_numpy.array2root(data, filename='./toy_datasets/random.root', treename='tree', mode='recreate')
!cd ./toy_datasets/ ; ls
|
howto/00-intro-ROOT.ipynb
|
vkuznet/rep
|
apache-2.0
|
Plot function using ROOT
|
import ROOT
from rep.plotting import default_canvas
canvas = default_canvas()
fun1 = ROOT.TF1( 'fun1', 'abs(sin(x)/x)', 0, 10)
canvas.SetGridx()
canvas.SetGridy()
fun1.Draw()
# Drawing output (last line is considered as output of cell)
canvas
|
howto/00-intro-ROOT.ipynb
|
vkuznet/rep
|
apache-2.0
|
Plot histogram using ROOT for branch in root file
|
File = ROOT.TFile("toy_datasets/random.root")
Tree = File.Get("tree")
Tree.Draw("first")
canvas
|
howto/00-intro-ROOT.ipynb
|
vkuznet/rep
|
apache-2.0
|
use histogram settings
|
# we need to keep histogram in any variable, otherwise it will be deleted automatically
h1 = ROOT.TH1F("h1","hist from tree",50, -0.25, 0.25)
Tree.Draw("min_DOCA>>h1")
canvas
|
howto/00-intro-ROOT.ipynb
|
vkuznet/rep
|
apache-2.0
|
root_numpy way
There are two libraries to work with ROOT files
rootpy http://www.rootpy.org - direct wrapper to ROOT methods.
root_numpy http://rootpy.github.io/root_numpy/ - new-style, efficient and simple library to deal with ROOT files from python
Let's show how to use the second library.
|
data = root_numpy.root2array("toy_datasets/random.root",
treename='tree',
branches=['first', 'second', 'sin(first) * exp(second)'],
selection='first > 0')
|
howto/00-intro-ROOT.ipynb
|
vkuznet/rep
|
apache-2.0
|
in example above we selected three branches (one of which is an expression and will be computed on-the-fly) and selections
|
# taking, i.e. first 10 elements:
data2 = data[:10]
|
howto/00-intro-ROOT.ipynb
|
vkuznet/rep
|
apache-2.0
|
convert to pandas
|
import pandas
dataframe = pandas.DataFrame(data)
# looking ar first elements
dataframe.head()
|
howto/00-intro-ROOT.ipynb
|
vkuznet/rep
|
apache-2.0
|
Histograms in python
|
figure(figsize=(9, 7))
hist(data['first'], bins=50)
xlabel('first')
figure(figsize=(9, 7))
hist(data['second'], bins=50)
xlabel('second')
|
howto/00-intro-ROOT.ipynb
|
vkuznet/rep
|
apache-2.0
|
Clases
|
class Person:
def __init__(self, first, last):
self.first = first # campos
self.last = last
def greet(self, add_msg = ""): # mas metodos
print('hello ' + self.first + ' ' + add_msg)
juan = Person('juan', 'dominguez')
type(juan)
juan.first
juan.last
pedro = Person('pedro', 'gonzalez')
pedro.first
pedro.last
pedro.greet('my friend')
|
CuadernosPython/.ipynb_checkpoints/2._estructuras-checkpoint.ipynb
|
mauriciogtec/PropedeuticoDataScience2017
|
mit
|
Define the anihilation operators for photons 1 and 2, and the quantum Stokes parameters:
|
# define max dimension, enlarge as needed
N = 7
a1 = tensor(destroy(N),identity(N))
a2 = tensor(identity(N),destroy(N))
# quantum Stokes, eqn 4.6:
Lt = 1/2*(a1.dag()*a1 + a2.dag()*a2)
Lx = 1/2*(a1.dag()*a2 + a2.dag()*a1)
Ly = 1j/2*(a2.dag()*a1 - a1.dag()*a2)
Lz = 1/2*(a1.dag()*a1 - a2.dag()*a2)
# the number operators, just to have:
n1 = a1.dag()*a1
n2 = a2.dag()*a2
# Note, can use this approach or form a tensor with n and identity.
def tp(n,m):
"""Create a two photon ket state |n,m>
implemented using QuTiP tensor"""
return tensor(fock(N,n),fock(N,m))
def Bmatrix(Φ,Θ,Ψ,Λ):
"""This is the classical matrix given in 4.4, mainly to confirm parameter choice"""
a = exp(1j*Λ/2)
b = array([[exp(1j*Ψ/2),0],[0,exp(-1j*Ψ/2)]])
c = array([[cos(Θ/2),sin(Θ/2)],[-sin(Θ/2),cos(Θ/2)]])
d = array([[exp(1j*Φ/2),0],[0,exp(-1j*Φ/2)]])
return a * b @ c @ d
# Generate the perfect 50/50 BS as inm 4.23
# to check the angles:
Bmatrix(0,pi/2,0,0)
def B(Φ,Θ,Ψ,Λ):
"""Create the B operator given in 4.12"""
B = (-1j*Φ*Lz).expm() * (-1j*Θ*Ly).expm() * (-1j*Ψ*Lx).expm() * (-1j*Λ*Lt).expm()
return B
# The B operator for a 50/50 BS
bs = B(0,pi/2,0,0)
# Apply it to a |1,1> input state, a la Hong, Ou, Mandel:
out = bs.dag() * tp(1,1)
# Compare to the example expression from Ulf in 4.24 line 1:
out1 = 1/2 * (a1.dag() - a2.dag()) * (a1.dag() + a2.dag()) * tp(0,0)
# the right answer in any case (4.24 line 2)
testout = (1/sqrt(2)*(tp(2,0) - tp(0,2)))
testout == out1 == out
|
Beam Splitter Leonhart.ipynb
|
DawesLab/LabNotebooks
|
mit
|
These all agree: so far so good.
Next, try a single photon input:
|
psi1 = bs.dag() * tp(1,0)
psi1 == (1/sqrt(2)*(tp(1,0) - tp(0,1))) ## sanity check
|
Beam Splitter Leonhart.ipynb
|
DawesLab/LabNotebooks
|
mit
|
Note: this is different from van Enk (NotesBS.pdf) by the sign and a factor of i. It agrees with several other papers in the reference folder. TODO: sort this out.
Now to let the outputs interfere again after a phase shift in path 2:
|
def phaseshifter(phi):
""" I believe this shifts one arm by phi
doesn't seem to be working though"""
shifter = tensor(identity(N),exp(1j*phi)*identity(N))
return shifter
def stateplot(state):
plt.plot(real(state.full()),"bo")
plt.plot(imag(state.full()),"ro")
philist = linspace(0,2*pi,20)
input2 = [phaseshifter(pi/8)*psi1 for phi in philist]
out2 = bs.dag() * input2
plt.plot(philist,expect(n1,out2),label="$n_1$")
plt.plot(philist,expect(n2,out2),label="$n_2$")
plt.legend()
stateplot(test3[6])
expect(n2,out2)
expect(n1,out2)
stateplot(psi3[0])
# Explore the phase-shifted version of the 1 photon output by hard-coding the state
psi3 = [1/sqrt(2)*(exp(1j*phi)*tp(0,1) + tp(1,0)) for phi in philist]
test3 = bs.dag()*psi3
plt.plot(philist,expect(n1,test3),label="$n_1$")
plt.plot(philist,expect(n2,test3),label="$n_2$")
plt.legend()
(tp(1,0)).full()
|
Beam Splitter Leonhart.ipynb
|
DawesLab/LabNotebooks
|
mit
|
Read in the results of the above three scripts (scaProcessMSA, scaCore and scaSectorID), stored as three dictionaries in the database PF13354_full.db. To see what variables are stored in each dictionary, use:
>>> print dict.keys()
|
db = pickle.load(open('Outputs/PF13354_full.db','rb'))
Dseq = db['sequence']
Dsca = db['sca']
Dsect = db['sector']
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
I. Statistical Structure of the Multiple Sequence Alignment (MSA)
Plot a histogram of all pairwise sequence identities (left panel) and a global view of the sequence similarity matrix (defined by $S\equiv \frac{1}{L}XX^\top$) (right panel). The data show that the alignment is described by a nearly bimodal distribution of sequence identities with peaks near 25% and 45%. From the matrix at right, it is clear that the alignment is composed of several distinct sequence families.
|
# List all elements above the diagonal (i<j):
listS = [Dsca['simMat'][i,j] for i in range(Dsca['simMat'].shape[0]) \
for j in range(i+1, Dsca['simMat'].shape[1])]
#Cluster the sequence similarity matrix
Z = sch.linkage(Dsca['simMat'],method = 'complete', metric = 'cityblock')
R = sch.dendrogram(Z,no_plot = True)
ind = map(int, R['ivl'])
#Plotting
plt.rcParams['figure.figsize'] = 9, 4
plt.subplot(121)
plt.hist(listS, Dseq['Npos']/2)
plt.xlabel('Pairwise sequence identities', fontsize=14)
plt.ylabel('Number', fontsize=14)
plt.subplot(122)
plt.imshow(Dsca['simMat'][np.ix_(ind,ind)], vmin=0, vmax=1); plt.colorbar();
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
To examine the role of sequence and position weighting on the structure of the sequence space, we compute correlation matrices between all pairs of sequences, either with or without sequence and position weights and project the corresponding sequence space (by eigenvalue decomposition) down to a small set of top modes that contain the statistically dominant relationships between sequences. Since eigenvalue decomposition does not necessarily provide the best representation of sequence groups (for reasons described in "xx"), we also apply independent components analysis (or ICA) to the top few eigenmodes; this manipulation provides a representation in which the top groupings of sequences in the alignment (if such exists) should separate along the so-called independent components (or ICs). Below we plot the following eigenmodes (top row) and independent components (bottom row):
$\bullet$ $U^{(0)}$ and $U'^{(0)}$, the top eigenmodes and ICs without any weights;
$\bullet$ $U^{(1)}$ and $U'^{(1)}$ the top eigenmodes and ICs with sequences weights;
$\bullet$ $U^{(2)}$ and $U'^{(2)}$ the top eigenmodes and ICs with both sequences and positional weights.
The sequences are colored by weight, with red indicating the most strongly downweighted sequences. In contrast to the g-protein example, we see that application of the sequence and position weights makes the sequence space apparently more uniform (removes some of the family or clade-like structure).
|
Useq = Dsca['Useq']
Uica = Dsca['Uica']
plt.rcParams['figure.figsize'] = 9, 8
ica = ["","","","'","'","'"]
for k,U in enumerate(Useq+Uica):
plt.subplot(2,3,k+1)
sca.figWeights(U[:,0], U[:,1], Dseq['seqw'][0])
plt.xlabel(r'${U%s}^{(%i)}_1$'%(ica[k],k%3), fontsize=16)
plt.ylabel(r'${U%s}^{(%i)}_2$'%(ica[k],k%3), fontsize=16)
plt.tight_layout()
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
To examine the relationship between divergence in sequence similarity and phylogeny in the sequence-weighted alignment, we plot the top independent components of the sequence correlation matrix (after sequence weighting), colored by phylogenetic group. We start by constructing a dictionary of phylogenetic annotations and checking the representation of sequences in the top taxonomic levels. The annotations are parsed from the sequence headers.
|
#construct a dictionary of phylogenetic groups
annot = dict()
for i, h in enumerate(Dseq['hd']):
hs = h.split('|')
annot[hs[0]] = sca.Annot(hs[1], hs[2], hs[3].replace('.',''))
# Most frequent taxonomic groups:
atleast = 10
for level in range(4):
descr_list = [a.taxo.split(',')[level] for a in annot.values() \
if len(a.taxo.split(',')) > level]
descr_dict = {k:descr_list.count(k) for k in descr_list \
if descr_list.count(k)>=atleast}
print '\n Level %i:' % level
print descr_dict
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
Based on this, we select taxonomic groups and colors for representation. Here, we just start by choosing the broadly well-represented groups. To see a complete color-coding legend, use:
>>> sca.figColors()
|
phylo = list();
fam_names = ['Firmicutes', 'Actinobacteria', 'Bacteroidetes', \
'Cyanobacteria', 'Proteobacteria']
col = (0, 0.18, 0.38, 0.5, 0.6)
#Firmicutes = red, Actinobacteria = yellow, Bacteroidetes = cyan,
#Cyanobacteria = green, Proteobacteria = blue
for i,k in enumerate(fam_names):
sf = sca.Unit()
sf.name = fam_names[i].lower()
sf.col = col[i]
sf.items = [j for j,q in enumerate(Dseq['hd']) if sf.name in q.lower()]
phylo.append(sf)
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
Plot the top six independent components of the sequence correlation matrix (with sequence weights); color-coded by phylogenetic annotation. The sequences clearly seperate into groups related by phylogeny; the Proteobacteria (blue) seperate out on $U_1$, the Firmicutes (red) seperate out on $U_2$, the Cyanobacteria (green) seperate out on $U_3$, and the Bacteroidetes (cyan) seperate out on $U_5$.
|
plt.rcParams['figure.figsize'] = 9, 3.5
U = Dsca['Uica'][1]
pairs = [[2*i,2*i+1] for i in range(3)]
print pairs
for k,[k1,k2] in enumerate(pairs):
plt.subplot(1,3,k+1)
sca.figUnits(U[:,k1], U[:,k2], phylo)
#sca.figUnits(U[:,k1], U[:,k2], subfam)
plt.xlabel(r"${U'}^{(2)}_{%i}$"%(k1+1), fontsize=16)
plt.ylabel(r"${U'}^{(2)}_{%i}$"%(k2+1), fontsize=16)
plt.tight_layout()
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
II. <u>SCA conservation and coevolution<u/>
Plot the eigenspectrum of the SCA positional coevolution matrix ($\tilde{C_{ij}}$) (black bars) and 10 trials of matrix randomization for comparison (red line). This graph is used to choose the number of significant eigenmodes.
|
plt.rcParams['figure.figsize'] = 9, 3.5
hist0, bins = np.histogram(Dsca['Lrand'].flatten(), bins=Dseq['Npos'], \
range=(0,Dsect['Lsca'].max()))
hist1, bins = np.histogram(Dsect['Lsca'], bins=Dseq['Npos'], \
range=(0,Dsect['Lsca'].max()))
plt.bar(bins[:-1], hist1, np.diff(bins),color='k')
plt.plot(bins[:-1], hist0/Dsca['Ntrials'], 'r', linewidth=3)
plt.tick_params(labelsize=11)
plt.xlabel('Eigenvalues', fontsize=18); plt.ylabel('Numbers', fontsize=18);
print 'Number of eigenmodes to keep is %i' %(Dsect['kpos'])
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
To define the positions with significant contributions each of the independent components (ICs), we make a empirical fit for each IC to the t-distribution and select positions with greater than a specified cutoff on the CDF. We choose $p=0.95$ as our cutoff. Note that since some positions might contribute significantly to more than one IC (and indication of non-independence of ICs), we apply a simple algorithm to assign such positions to one IC. Specifically, we assign positions to the IC with which it has the greatest degree of co-evolution.<br/>
The data indicate generally good fits for the top six ICs, and we return the positions contributing to each IC in a format suitable for cut and paste into PyMol.
|
plt.rcParams['figure.figsize'] = 10,5
Vpica = Dsect['Vpica']
for k in range(Dsect['kpos']):
iqr = scoreatpercentile(Vpica[:,k],75) - scoreatpercentile(Vpica[:,k],25)
binwidth=2*iqr*(len(Vpica)**(-0.33))
nbins=round((max(Vpica[:,k])-min(Vpica[:,k]))/binwidth)
plt.subplot(1,Dsect['kpos'],k)
h_params = plt.hist(Vpica[:,k], nbins)
x_dist = np.linspace(min(h_params[1]), max(h_params[1]), num=100)
plt.plot(x_dist,Dsect['scaled_pd'][k],'r',linewidth = 2)
plt.xlabel(r'$V^p_{%i}$'%(k+1), fontsize=14)
plt.ylabel('Number', fontsize=14)
for n,ipos in enumerate(Dsect['ics']):
sort_ipos = sorted(ipos.items)
ats_ipos = ([Dseq['ats'][s] for s in sort_ipos])
ic_pymol = ('+'.join(ats_ipos))
print('IC %i is composed of %i positions:' % (n+1,len(ats_ipos)))
print(ic_pymol + "\n")
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
To define protein sectors, we examine the structure of the SCA positional correlation matrix with positions contributing to the top independent components (ICs) ordered by weight (left panel). This provides a basis to determine/interpret which ICs are truly statistically independent (defining an independent sector) and which represent hierarchical breakdowns of one sector.
IC 2 appears more distinct and is considered an independent sector (sector 1). ICs 1,3,5,and 6 are strongly co-evolving, and should be combined into one sector. IC 4 also appears to be related to [1,3,5,6] and the combination of 1,3,4,5,6 makes up sector two. The sectors (2 in total) are defined accordingly, and in the right panel, these independent components have been re-ordered accordingly to visualize this decomposition.
|
#plot the SCA positional correlation matrix, ordered by contribution to the top ICs
plt.rcParams['figure.figsize'] = 10, 10
plt.subplot(121)
plt.imshow(Dsca['Csca'][np.ix_(Dsect['sortedpos'], Dsect['sortedpos'])], \
vmin=0, vmax=2,interpolation='none',aspect='equal',\
extent=[0,sum(Dsect['icsize']),0,sum(Dsect['icsize'])])
line_index=0
for i in range(Dsect['kpos']):
plt.plot([line_index+Dsect['icsize'][i],line_index+Dsect['icsize'][i]],\
[0,sum(Dsect['icsize'])],'w', linewidth = 2)
plt.plot([0,sum(Dsect['icsize'])],[sum(Dsect['icsize'])-line_index,\
sum(Dsect['icsize'])-line_index],'w', linewidth = 2)
line_index += Dsect['icsize'][i]
#define the new sector groupings - 2 total
sec_groups = ([1],[0,2,4,5,3])
sectors = list()
for n,k in enumerate(sec_groups):
s = sca.Unit()
all_items = list()
for i in k: all_items = all_items+Dsect['ics'][i].items
s.items = all_items
s.col = (1/len(sec_groups))*n
sectors.append(s)
#plot the re-ordered matrix
plt.subplot(122)
line_index=0
sortpos = list()
for s in sectors:
sortpos.extend(s.items)
plt.imshow(Dsca['Csca'][np.ix_(sortpos, sortpos)], vmin=0, vmax=2,\
interpolation='none',aspect='equal',\
extent=[0,len(sortpos),0,len(sortpos)])
for s in sectors:
plt.plot([line_index+len(s.items),line_index+len(s.items)],\
[0,len(sortpos)],'w', linewidth = 2)
plt.plot([0,sum(Dsect['icsize'])],[len(sortpos)-line_index, \
len(sortpos)-line_index],'w', linewidth = 2)
line_index += len(s.items)
plt.tight_layout()
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
Print the sector positions, in a format suitable for pyMol, and create a pyMol session with the sectors (and decomposition into independent components) as seperate objects. Structurally, sectors 1+3 form physically contiguous units, and 2 is less so... this is consistent with the idea that sector 2/IC4 might be associated with sector 1/ICs1+3+5+6
|
for i,k in enumerate(sectors):
sort_ipos = sorted(k.items)
ats_ipos = ([Dseq['ats'][s] for s in sort_ipos])
ic_pymol = ('+'.join(ats_ipos))
print('Sector %i is composed of %i positions:' % (i+1,len(ats_ipos)))
print(ic_pymol + "\n")
sca.writePymol('1FQG', sectors, Dsect['ics'], Dseq['ats'], \
'Outputs/PF13354.pml','A', '../Inputs/', 0)
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
III. The phylogenetic basis of the sector hierarchy
How does the clear phylogenetic heterogeneity in the MSA influence the sector definitions? To address this, we take advantage of mathematical methods for mapping between the space of positional and sequence correlations, as described in Rivoire et al. Using this mapping, we plot the top $k_{pos}$ ICs as 2-D scatter plots with the corresponding sequence space divergence. The colors for the sequence space are according to the phylogenetic classifications we chose above.
|
plt.rcParams['figure.figsize'] = 15,8
pairs= [[0,1],[2,3],[4,5]]
for n,[k1,k2] in enumerate(pairs):
plt.subplot(2,3,n+1)
sca.figUnits(Dsect['Vpica'][:,k1], Dsect['Vpica'][:,k2], sectors, dotsize = 6)
plt.xlabel(r'$V^p_{%i}$' % (k1+1), fontsize=16)
plt.ylabel(r'$V^p_{%i}$' % (k2+1), fontsize=16)
plt.subplot(2,3,n+4)
sca.figUnits(Dsect['Upica'][:,k1], Dsect['Upica'][:,k2], phylo, dotsize = 6)
plt.xlabel(r'$U^p_{%i}$' % (k1+1), fontsize=16)
plt.ylabel(r'$U^p_{%i}$' % (k2+1), fontsize=16)
plt.tight_layout()
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
The interpretation for the two sectors:
Sector 1 is defined along ($V_2^p$). The sequences along the corresponding component ($U_2^p$) are homogeneously distributed with respect to phylogeny, consistent with the notion that this sector is a property of the entire alignment. Notably, this sector forms the catalytic core of the Beta-lactamase.
Sector 2 is composed of ICs 1,3,4 and 5 - and each of these is associated with some phylogenetic divergence. $V_1^p$ splits the cyanobacteria (green) from the proteobacteria (blue), $V_3^p$ seperates the proteobacteria (blue) from other sequence families, $V_5^p$ seperates out a subset of the firmicutes (red), and $V_6^p$ is associated with a divergence in the bacteriodetes (cyan). Sector 2 forms a physically contiguous unit that resembles a shell around the active site. The decomposition described above suggests that some functional divergence in beta-lactamse dynamics or regulatory mechanism across phylogenetic lines may underlie the breakdown of this sector.
For clarity, we also plot the same data as a stacked bar chart below.
|
plt.rcParams['figure.figsize'] = 20, 5
col = list()
for k in phylo:
col = col + [colorsys.hsv_to_rgb(k.col,1,1)]
for k in range(Dsect['kpos']):
forhist = list()
for group in phylo:
forhist.append([Dsect['Upica'][i,k] for i in group.items])
plt.subplot(2,Dsect['kpos'],k+1)
plt.hist(forhist, histtype='barstacked',color=col)
|
SCA_betalactamase.ipynb
|
reynoldsk/pySCA
|
bsd-3-clause
|
Find and extract all target contigs
Once you got your reference fasta files ready you are good to start with extracting the contigs of interest. For this purpose we want to create an overview over which contigs represent which reference locus in each sample. At the same time we also have to be somewhat selective and discard potential duplicates that match several loci. Let's check the function that helps you do this:
|
%%bash
source activate secapr_env
secapr find_target_contigs -h
|
docs/notebook/subdocs/extract_contigs.ipynb
|
AntonelliLab/seqcap_processor
|
mit
|
Note that in this step SECAPR will index all your locus names stored in the reference file, so in all downstream steps your loci will carry a numerical index. The translation table holding the information which index corresponds to which locus name is stored in a text file in the output folder (reference_fasta_header_info.txt).
The sensitivity of the blast algorithm (LASTZ) can be altered with the flags --min-coverage and --min-identity. High values mean conservative matching requirements, while low values will return more matches but also possibly non-orthologous sequences.
However, being to lenient with the --min-coverage and --min-identity flags can also lead to an increase of loci that are identified as paralogous, which happens when more than 1 contig matches the reference sequence.
You can choose to add the --keep-duplicates flag, in order to also keep contigs which span across multiple loci. These will be extracted independently for each contig thhey match and may hence be present in several copies in the FASTA file containing your extracted contigs. If this flag is used a txt file with the duplicate information for each sample is being printed into the samples output folder (reference_fasta_header_info.txt). This file contains the contig ID in the first column, followed by the list of reference loci indeces that the contig matched to.
The --keep-paralogs flag should be used with caution. This will lead to SECAPR keeping loci for which indications of paralogy were found (multiple matching contigs). These loci should generally not be used for phylogenetic inferences (unless you are certain for other reasons that you are not concerned about paralogous loci in your dataset), but they may be of interest in some cases. If this flag is used, a txt file with the paralog information for each sample is being printed into the samples output folder (info_paralogous_loci.txt). This file contains the reference locus index in the first column, followed by a list of contig names that matched to the locus.
Sometimes it turns out that despite being flagged, loci are not truly paralogous, but that multiple non-homologous/non-paralogous contigs were matched to the reference for other reasons (repetitive regions or other repeated sequence patterns). SECAPR provides an extra function to inspect where the flagged contigs end up on the reference (see documentation of paralogs_to_ref function). In those cases the paralogous loci can be included in further steps, i.e. apply the --keep-paralogs flag, which will extract only the longest (and usually best) contig for these loci.
Now let's run the script.
secapr find_target_contigs --contigs ../../data/processed/contigs/ --reference ../../data/raw/palm_reference_sequences.fasta --output ../../data/processed/target_contigs --keep-duplicates
To get a first idea of the resulting matches, you can have a look at the file match_table.txt in the output folder.
|
import pandas as pd
table = pd.read_csv('../../data/processed/target_contigs/match_table.txt', delimiter = '\t',index_col=0)
table.head()
|
docs/notebook/subdocs/extract_contigs.ipynb
|
AntonelliLab/seqcap_processor
|
mit
|
Those fields containing a '1' indicate that a unique match was extracted from the contig sequences for the respective exon and sample. If the output reveals a very low harvest of target sequences, you can try to reduce the values for the flags --min-coverage and --min-identity in order to be more generous in the matching step. If on the other hand your output turns out to capture a lot of non-homologous sequences between the different samples (can be identified after the alignment step), you may want to turn up the values for these flags in order to be more conservative in your search.
The script also prints out summary stats in a textfile in the output folder:
|
%%bash
cat ../../data/processed/target_contigs/summary_stats.txt
|
docs/notebook/subdocs/extract_contigs.ipynb
|
AntonelliLab/seqcap_processor
|
mit
|
Plotting with SECAPR
SECAPR provides a plotting function that can be used to get a visual overview of the extracted target contigs. The function can be called as follows:
|
%%bash
source activate secapr_env
secapr plot_sequence_yield -h
|
docs/notebook/subdocs/extract_contigs.ipynb
|
AntonelliLab/seqcap_processor
|
mit
|
For now we only want to print the contig data, so we can execute the function like this:
secapr plot_sequence_yield --extracted_contigs ../../data/processed/target_contigs --output ../../data/processed/plots
The resulting plot looks like this and shows which contigs could be extracted (blue) and which were not present (white) for each sample and locus:
|
from IPython.display import Image, display
img1 = Image("../../data/processed/plots/contig_yield_overview.png",width=1000)
display(img1)
|
docs/notebook/subdocs/extract_contigs.ipynb
|
AntonelliLab/seqcap_processor
|
mit
|
Step 2: Read data file
|
dat = ascii.read("../data/hogg-et-al-2010-table1.csv")
print(dat)
|
notebooks/Fitting-a-staight-line-to-data.py.ipynb
|
davidjaimes/ncat
|
unlicense
|
Step 3: Form matrices
\begin{equation}
\textbf{Y} =
\begin{bmatrix}
y_1 \
y_2 \
... \
y_N \
\end{bmatrix}
,
\textbf{A} =
\begin{bmatrix}
1 & x_1 \
1 & x_2 \
... & ... \
1 & x_N \
\end{bmatrix}
,
\textbf{C} =
\begin{bmatrix}
\sigma_{y_1}^2 & 0 & ... & 0 \
0 & \sigma_{y_1}^2 & ... & 0 \
... & ... & ... & ... \
0 & 0 & ... & \sigma_{y_N}^2 \
\end{bmatrix}
\end{equation}
|
A = np.ones((len(dat["x"]), 2))
A[:, 1] = dat["x"]
C = np.zeros((len(dat["ey"]), len(dat["ey"])))
np.fill_diagonal(C, pow(dat["ey"], 2.))
|
notebooks/Fitting-a-staight-line-to-data.py.ipynb
|
davidjaimes/ncat
|
unlicense
|
Step 4: Perform best-fit
\begin{equation}
\begin{bmatrix}
b \
m \
\end{bmatrix}
= \textbf{X} = [\textbf{A}^T \textbf{C}^{-1} \textbf{A}]^{-1} [\textbf{A}^T \textbf{C}^{-1} \textbf{Y}]
\end{equation}
|
term1 = np.linalg.inv(np.matmul(np.matmul(A.T, np.linalg.inv(C)), A))
term2 = np.matmul(np.matmul(A.T, np.linalg.inv(C)), dat["y"])
b, m = np.matmul(term1, term2)
|
notebooks/Fitting-a-staight-line-to-data.py.ipynb
|
davidjaimes/ncat
|
unlicense
|
Step 5: Standard errors in b and m parameters
|
eb, em = np.sqrt(term1[0, 0]), np.sqrt(term1[1, 1])
|
notebooks/Fitting-a-staight-line-to-data.py.ipynb
|
davidjaimes/ncat
|
unlicense
|
Step 6: Plot results
|
plt.errorbar(dat["x"], dat["y"], dat["ey"], marker=".", ls="none")
plt.plot(dat["x"], m * dat["x"] + b)
plt.xlabel("x-coordinate")
plt.ylabel("y-coordinate")
pf = "y = ({:.2f}$\pm${:.2f})x + ({:.2f}$\pm${:.2f})".format(m, em, b, eb)
plt.text(s=pf, x=50, y=200, color="C1", size=16)
plt.show();
|
notebooks/Fitting-a-staight-line-to-data.py.ipynb
|
davidjaimes/ncat
|
unlicense
|
Computational considerations
Seismic inversion algorithms are generally very computationally demanding and require a large amount of memory to store the forward wavefield. In order to keep this tutorial as lightweight as possible we are using a very simple
velocity model that requires low temporal and spatial resolution. For a more realistic model, a second set of preset parameters for a reduced version of the 2D Marmousi data set [1] is provided below in comments. This can be run to create some more realistic subsurface images. However, this second preset is more computationally demanding and requires a slightly more powerful workstation.
|
# Configure model presets
from examples.seismic import demo_model
# Enable model presets here:
preset = 'layers-isotropic' # A simple but cheap model (recommended)
# preset = 'marmousi2d-isotropic' # A larger more realistic model
# Standard preset with a simple two-layer model
if preset == 'layers-isotropic':
def create_model(grid=None):
return demo_model('layers-isotropic', origin=(0., 0.), shape=(101, 101),
spacing=(10., 10.), nbl=20, grid=grid, nlayers=2)
filter_sigma = (1, 1)
nshots = 21
nreceivers = 101
t0 = 0.
tn = 1000. # Simulation last 1 second (1000 ms)
f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)
# A more computationally demanding preset based on the 2D Marmousi model
if preset == 'marmousi2d-isotropic':
def create_model(grid=None):
return demo_model('marmousi2d-isotropic', data_path='../../../../data/',
grid=grid, nbl=20)
filter_sigma = (6, 6)
nshots = 301 # Need good covergae in shots, one every two grid points
nreceivers = 601 # One recevier every grid point
t0 = 0.
tn = 3500. # Simulation last 3.5 second (3500 ms)
f0 = 0.025 # Source peak frequency is 25Hz (0.025 kHz)
|
examples/seismic/tutorials/02_rtm.ipynb
|
opesci/devito
|
mit
|
True and smooth velocity models
First, we create the model data for the "true" model from a given demonstration preset. This model represents the subsurface topology for the purposes of this example and we will later use it to generate our synthetic data readings. We also generate a second model and apply a smoothing filter to it, which represents our initial model for the imaging algorithm. The perturbation between these two models can be thought of as the image we are trying to recover.
|
#NBVAL_IGNORE_OUTPUT
from examples.seismic import plot_velocity, plot_perturbation
from devito import gaussian_smooth
# Create true model from a preset
model = create_model()
# Create initial model and smooth the boundaries
model0 = create_model(grid=model.grid)
gaussian_smooth(model0.vp, sigma=filter_sigma)
# Plot the true and initial model and the perturbation between them
plot_velocity(model)
plot_velocity(model0)
plot_perturbation(model0, model)
|
examples/seismic/tutorials/02_rtm.ipynb
|
opesci/devito
|
mit
|
Acquisition geometry
Next we define the positioning and the wave signal of our source, as well as the location of our receivers. To generate the wavelet for our source we require the discretized values of time that we are going to use to model a single "shot",
which again depends on the grid spacing used in our model. For consistency this initial setup will look exactly as in the previous modelling tutorial, although we will vary the position of our source later on during the actual imaging algorithm.
|
#NBVAL_IGNORE_OUTPUT
# Define acquisition geometry: source
from examples.seismic import AcquisitionGeometry
# First, position source centrally in all dimensions, then set depth
src_coordinates = np.empty((1, 2))
src_coordinates[0, :] = np.array(model.domain_size) * .5
src_coordinates[0, -1] = 20. # Depth is 20m
# Define acquisition geometry: receivers
# Initialize receivers for synthetic and imaging data
rec_coordinates = np.empty((nreceivers, 2))
rec_coordinates[:, 0] = np.linspace(0, model.domain_size[0], num=nreceivers)
rec_coordinates[:, 1] = 30.
# Geometry
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates, t0, tn, f0=.010, src_type='Ricker')
# We can plot the time signature to see the wavelet
geometry.src.show()
|
examples/seismic/tutorials/02_rtm.ipynb
|
opesci/devito
|
mit
|
True and smooth data
We can now generate the shot record (receiver readings) corresponding to our true and initial models. The difference between these two records will be the basis of the imaging procedure.
For this purpose we will use the same forward modelling operator that was introduced in the previous tutorial, provided by the AcousticWaveSolver utility class. This object instantiates a set of pre-defined operators according to an initial definition of the acquisition geometry, consisting of source and receiver symbols. The solver objects caches the individual operators and provides a slightly more high-level API that allows us to invoke the modelling modelling operators from the initial tutorial in a single line. In the following cells we use this to generate shot data by only specifying the respective model symbol m to use, and the solver will create and return a new Receiver object the represents the readings at the previously defined receiver coordinates.
|
# Compute synthetic data with forward operator
from examples.seismic.acoustic import AcousticWaveSolver
solver = AcousticWaveSolver(model, geometry, space_order=4)
true_d , _, _ = solver.forward(vp=model.vp)
# Compute initial data with forward operator
smooth_d, _, _ = solver.forward(vp=model0.vp)
#NBVAL_IGNORE_OUTPUT
# Plot shot record for true and smooth velocity model and the difference
from examples.seismic import plot_shotrecord
plot_shotrecord(true_d.data, model, t0, tn)
plot_shotrecord(smooth_d.data, model, t0, tn)
plot_shotrecord(smooth_d.data - true_d.data, model, t0, tn)
|
examples/seismic/tutorials/02_rtm.ipynb
|
opesci/devito
|
mit
|
Imaging with back-propagation
As explained in the introduction of this tutorial, this method is based on back-propagation.
Adjoint wave equation
If we go back to the modelling part, we can rewrite the simulation as a linear system solve:
\begin{equation}
\mathbf{A}(\mathbf{m}) \mathbf{u} = \mathbf{q}
\end{equation}
where $\mathbf{m}$ is the discretized square slowness, $\mathbf{q}$ is the discretized source and $\mathbf{A}(\mathbf{m})$ is the discretized wave-equation. The discretized wave-equation matricial representation is a lower triangular matrix that can be solve with forward substitution. The pointwise writing or the forward substitution leads to the time-stepping stencil.
On a small problem one could form the matrix explicitly and transpose it to obtain the adjoint discrete wave-equation:
\begin{equation}
\mathbf{A}(\mathbf{m})^T \mathbf{v} = \delta \mathbf{d}
\end{equation}
where $\mathbf{v}$ is the discrete adjoint wavefield and $\delta \mathbf{d}$ is the data residual defined as the difference between the field/observed data and the synthetic data $\mathbf{d}_s = \mathbf{P}_r \mathbf{u}$. In our case we derive the discrete adjoint wave-equation from the discrete forward wave-equation to get its stencil.
Imaging
Wave-equation based imaging relies on one simple concept:
If the background velocity model is cinematically correct, the forward wavefield $\mathbf{u}$ and the adjoint wavefield $\mathbf{v}$ meet at the reflectors position at zero time offset.
The sum over time of the zero time-offset correlation of these two fields then creates an image of the subsurface. Mathematically this leads to the simple imaging condition:
\begin{equation}
\text{Image} = \sum_{t=1}^{n_t} \mathbf{u}[t] \mathbf{v}[t]
\end{equation}
In the following tutorials we will describe a more advanced imaging condition that produces shaper and more accurate results.
Operator
We will now define the imaging operator that computes the adjoint wavefield $\mathbf{v}$ and correlates it with the forward wavefield $\mathbf{u}$. This operator essentially consists of three components:
* Stencil update of the adjoint wavefield v
* Injection of the data residual at the adjoint source (forward receiver) location
* Correlation of u and v to compute the image contribution at each timestep
|
# Define gradient operator for imaging
from devito import TimeFunction, Operator, Eq, solve
from examples.seismic import PointSource
def ImagingOperator(model, image):
# Define the wavefield with the size of the model and the time dimension
v = TimeFunction(name='v', grid=model.grid, time_order=2, space_order=4)
u = TimeFunction(name='u', grid=model.grid, time_order=2, space_order=4,
save=geometry.nt)
# Define the wave equation, but with a negated damping term
eqn = model.m * v.dt2 - v.laplace + model.damp * v.dt.T
# Use `solve` to rearrange the equation into a stencil expression
stencil = Eq(v.backward, solve(eqn, v.backward))
# Define residual injection at the location of the forward receivers
dt = model.critical_dt
residual = PointSource(name='residual', grid=model.grid,
time_range=geometry.time_axis,
coordinates=geometry.rec_positions)
res_term = residual.inject(field=v.backward, expr=residual * dt**2 / model.m)
# Correlate u and v for the current time step and add it to the image
image_update = Eq(image, image - u * v)
return Operator([stencil] + res_term + [image_update],
subs=model.spacing_map)
|
examples/seismic/tutorials/02_rtm.ipynb
|
opesci/devito
|
mit
|
Implementation of the imaging loop
As just explained, the forward wave-equation is solved forward in time while the adjoint wave-equation is solved in a reversed time order. Therefore, the correlation of these two fields over time requires to store one of the two fields. The computational procedure for imaging follows:
Simulate the forward wave-equation with the background velocity model to get the synthetic data and save the full wavefield $\mathbf{u}$
Compute the data residual
Back-propagate the data residual and compute on the fly the image contribution at each time step.
This procedure is applied to multiple source positions (shots) and summed to obtain the full image of the subsurface. We can first visualize the varying locations of the sources that we will use.
|
#NBVAL_IGNORE_OUTPUT
# Prepare the varying source locations
source_locations = np.empty((nshots, 2), dtype=np.float32)
source_locations[:, 0] = np.linspace(0., 1000, num=nshots)
source_locations[:, 1] = 30.
plot_velocity(model, source=source_locations)
# Run imaging loop over shots
from devito import Function
# Create image symbol and instantiate the previously defined imaging operator
image = Function(name='image', grid=model.grid)
op_imaging = ImagingOperator(model, image)
for i in range(nshots):
print('Imaging source %d out of %d' % (i+1, nshots))
# Update source location
geometry.src_positions[0, :] = source_locations[i, :]
# Generate synthetic data from true model
true_d, _, _ = solver.forward(vp=model.vp)
# Compute smooth data and full forward wavefield u0
smooth_d, u0, _ = solver.forward(vp=model0.vp, save=True)
# Compute gradient from the data residual
v = TimeFunction(name='v', grid=model.grid, time_order=2, space_order=4)
residual = smooth_d.data - true_d.data
op_imaging(u=u0, v=v, vp=model0.vp, dt=model0.critical_dt,
residual=residual)
#NBVAL_IGNORE_OUTPUT
from examples.seismic import plot_image
# Plot the inverted image
plot_image(np.diff(image.data, axis=1))
from devito import norm
assert np.isclose(norm(image), 1e7, rtol=1e1)
|
examples/seismic/tutorials/02_rtm.ipynb
|
opesci/devito
|
mit
|
Load model
Loading Elowitz2000 - Repressilator in tellurium & roadrunner. A synthetic oscillatory network of transcriptional regulators.
The model is available from Biomodels as BIOMD0000000012.
|
import tellurium as te
biomodel = "BIOMD0000000012"
r = te.loads("{}.xml".format(biomodel))
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Simulate model
Simple timecourse simulation which we want to visualize with cy3sbml.
|
r.reset()
data = r.simulate(start=0, end=600, steps=300)
r.plot();
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Convert to dataframe
We store the resulting data in a pandas dataframe. roadrunner returns a NamedArray, we import this into pandas.
|
import pandas as pd
from matplotlib import pylab as plt
# sbml ids from timeCourseSelections
print(r.timeCourseSelections)
sbml_ids = [name.replace("[", "").replace("]", "")
for name in r.timeCourseSelections]
print(sbml_ids)
# create dataframe
df = pd.DataFrame(data[:, 1:], index=data[:,0], columns=sbml_ids[1:])
df.head(10)
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Connect to Cytoscape
We use py2cytoscape has wrapper modules for cyREST RESTful API. Hereby we can can access Cytoscape features in more Pythonic way instead of calling raw REST API via HTTP.
http://idekerlab.github.io/cyREST/#-1099067042
|
# !!! Start Cytoscape 3 with cyREST App
# start client
import json
import py2cytoscape
from py2cytoscape.data.cyrest_client import CyRestClient
from py2cytoscape.data.cynetwork import CyNetwork
from py2cytoscape.data.style import StyleUtil
cy = CyRestClient()
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Load SBML in cy3sbml
All the interaction with cytoscape from now on is via simple REST API!
py2cytoscape is only a simple wrapper to hide some of the REST calls.
The same can be done from anywhere (different programming language, ...).
|
# Reset
cy.session.delete()
# Step 2: Load SBML network
networks = cy.network.create_from('BIOMD0000000012.xml')
print(networks)
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Information about SBML networks
|
# Get information about SBML networks
base_net = None
for net in networks:
# unique id of cytoscape objects
net_suid = net.get_id()
net_name = net.get_network_value(column='name')
print(net.get_network_value(column='name'))
if (net_name.startswith('Base')):
base_net = net
# Selected the Base network (reaction - species graph)
print('-'*60)
print (base_net)
print('-'*60)
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Information about nodes
|
# get node and edges
nodes = base_net.get_nodes()
edges = base_net.get_edges()
print('* This network has ' + str(len(nodes)) + ' nodes and ' + str(len(edges)) + ' edges\n')
# Get a row in the node table as pandas Series object
node0 = nodes[0]
row = base_net.get_node_value(id=node0)
print(row)
# Or, pick one cell in the table
cell = base_net.get_node_value(id=node0, column='name')
print('\nThis node has name: ' + str(cell))
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Node table
All table attributes are available and can be manipulated.
|
# cell = base_net.get_node_value(id=node0, column='name')
node_table = base_net.get_node_table()
node_table.head()
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Find nodes for sbml ids
We now find the nodes which correspond to the species we simulated in the model.
The suid (unique node identifiers) are retrieved and stored for lookup.
|
node_suids = []
# logical indexing to find the nodes
for sid in sbml_ids[1:]:
suid_index = node_table[node_table["sbml id"]==sid].index
node_suids.append(suid_index.get_values()[0])
suid2sid = dict(zip(node_suids, sbml_ids[1:]))
suid2sid
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Get CyNetworkView
CyNetworkView is a reference to a network view in your current Cytoscape session. This means CyNetworkView objects do not include actual data, such as node size or edge stroke colors. Instead, they hold a location of the data and create actual REST calls when you execute get/update methods for nodes and edges.
|
# Get views for a network: Cytoscape "may" have multiple views, and that's why it returns list instead of an object.
view_ids = base_net.get_views()
print(view_ids)
# format option specify the return type
view1 = base_net.get_view(view_ids[0], format='view')
# This is a CyNetworkView object, not dictionary
print(view1)
from py2cytoscape.data.util_network import NetworkUtil as util
import time
# DataFrame for node views
df_vs_node = pd.DataFrame(index=node_suids,
columns=['NODE_WIDTH', 'NODE_HEIGHT'])
for timepoint in df.index:
# print(timepoint)
for index, row in df_vs_node.iterrows():
sbml_id = suid2sid.get(index)
# Set node size from simulation results
row['NODE_WIDTH'] = df.loc[timepoint, sbml_id]/15
row['NODE_HEIGHT'] = df.loc[timepoint, sbml_id]/15
# normalisation with max value
row['NODE_WIDTH'] = 80 * df.loc[timepoint, sbml_id]/max(df[sbml_id])
row['NODE_HEIGHT'] = 80 * df.loc[timepoint, sbml_id]/max(df[sbml_id])
# Apply for timepoint
view1.batch_update_node_views(df_vs_node)
# wait
time.sleep(0.03)
# reset to original size
for index, row in df_vs_node.iterrows():
sbml_id = suid2sid.get(index)
# row['NODE_FILL_COLOR'] = '#FF0000'
row['NODE_WIDTH'] = 40
row['NODE_HEIGHT'] = 40
view1.batch_update_node_views(df_vs_node)
r.plot()
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
Set node images
Create images for species
In a first step we want to display images in the network.
For this the individual figures are created and store in the results folder
|
# create results folder
import os
results_dir = "./results/{}".format(biomodel)
if not os.path.exists(results_dir):
os.mkdir()
# Create images for all species
time = df.index
for sbml_id in df.columns:
# Create plot
plt.figure(figsize=[2,2])
plt.plot(time, df[sbml_id], linewidth=2, color="black")
plt.xlabel('time')
plt.ylabel(sbml_id)
plt.savefig('{}/{}.png'.format(results_dir, sbml_id))
## Serve images
We run a simple file server to serve the images via URL.
# !!! serve the images on webserver
# python -m SimpleHTTPServer
# http://localhost:8000/results/BIOMD0000000012/
The images are than reachable under
http://localhost:8000/results/BIOMD0000000012/
# DataFrame for node views
df_vs_node = pd.DataFrame(index=node_suids,
columns=['NODE_CUSTOM_PAINT_1'])
# apply all visual changes for the timepoint
for index, row in df_vs_node.iterrows():
sbml_id = suid2sid.get(index)
# row['NODE_FILL_COLOR'] = '#FF0000'
row['NODE_CUSTOM_PAINT_1'] = 'http://localhost:8000/results/BIOMD0000000012/{}.png'.format(sbml_id)
# Apply for timepoint
view1.batch_update_node_views(df_vs_node)
# apply styles
# cy.style.apply(style=minimal_style, network=base_net)
# apply layout
# cy.layout.apply(name='force-directed', network=base_net)
|
docs/presentation/2016-09-20_COMBINE2016/examples/COMBINE2016_cy3sbml_demo.ipynb
|
matthiaskoenig/cy3sbml
|
lgpl-3.0
|
The for loop
The simplest, but slowest, method is obviously the good old for loop. Let's test this first
|
def ForLoop(f):
moving_val = []
moving_time = []
for i in xrange(0, N-window_length, window_shift):
moving_val.append(f(x[i:i+window_length]))
moving_time.append(np.average(t[i:i+window_length]))
return moving_time, moving_val
plt.plot(*ForLoop(mad))
plt.show()
%timeit ForLoop(mad)
|
_notebooks/2015-01-30-Applying-python-functions-in-moving-windows.ipynb
|
ga7g08/ga7g08.github.io
|
mit
|
Pandas rolling_apply
Next up is the inspiration for all this, pandas.rolling_apply. This is by far the easiest
method since it can be implemented in one line. However, as far as I can see there is no way
to set the window_shift
|
out = pd.rolling_apply(x, window_length, mad, center=True)
plt.plot(t, out)
plt.show()
%timeit out = pd.rolling_apply(x, window_length, mad)
|
_notebooks/2015-01-30-Applying-python-functions-in-moving-windows.ipynb
|
ga7g08/ga7g08.github.io
|
mit
|
Numpy broadcast to array
Next we will broadcast the 1D array to a 2D array, compute the function along
the new axis. This will require some effort to rewrite the function so it
handles the shapes correctly. For help in understanding how this is done I
really recommend taking a look at this scipy page
|
def NumpyArray():
mad_array = lambda x: np.fabs(x.T - x.mean(axis=1)).mean(axis=0)
vert_idx_list = np.arange(0, N - window_length, window_shift)
hori_idx_list = np.arange(window_length)
A, B = np.meshgrid(hori_idx_list, vert_idx_list)
idx_array = A + B
x_array = x[idx_array]
return t[vert_idx_list+int(window_length/2.)], mad_array(x_array)
plt.plot(*NumpyArray())
plt.show()
%timeit NumpyArray()
|
_notebooks/2015-01-30-Applying-python-functions-in-moving-windows.ipynb
|
ga7g08/ga7g08.github.io
|
mit
|
Alright now we’re going to be working with the running data set. so let’s go ahead and import it. We’ll see again that it’s not converting the dates so we’ve got to do that manually. However it doesn’t work exactly like it worked last time.
|
df = pd.read_csv('../data/date_fixed_running_data.csv')
df.head()
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
This is because when you save a data frame to a csv it doesn’t label the index column. So now our column is actually the ‘zero’ column. When you use parse_dates you can use either the column name (if available) or the 0-based column index number. This happened because the index didn’t have a name when we saved the last csv.
|
df['Unnamed: 0'].head()
df = pd.read_csv('../data/date_fixed_running_data.csv', parse_dates=['Date'])
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
We can import the date correctly by specifying the zero based look up or by specifying the name like we did in the last video.
|
df = pd.read_csv('../data/date_fixed_running_data.csv', parse_dates=[0])
df.head()
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
Let’s go ahead and rename the columns and set our index to the dates again.
|
cols = ['Date', 'Miles', 'Time']
df.columns = cols
df.head()
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
We can also rename the column using the rename method.
|
df.rename(columns={df.columns[0]:'Date'}, inplace=True)
df.head()
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
Now we can plot it again once we set the index.
|
df.set_index('Date', inplace=True)
df.plot()
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
Now one thing we’re going to want to do is get a breakdown of times for each run. Stats like minutes per mile and that sort of thing.
To do that we’re going to use map. In order to convert to seconds we’re going to need to convert our times represented as hour/min/seconds into just pure seconds. From that we can do minutes etc.
Let’s go ahead and write our map function. Now there is an edge case that we need to handle. Remember we don’t have time data for every single run so we’ve got to handle the NaN values correctly.
we'll do this by just keeping them in there.
|
raw_time_fmt = df.Time[0]
def get_total_seconds(raw_time):
if raw_time is np.nan:
return np.nan # if it's blank, keep it blank
hrs, mins, seconds = str(raw_time).split(':')
seconds = int(seconds) + 60 * int(mins) + 60 * 60 * int(hrs)
return seconds
print(get_total_seconds(raw_time_fmt))
df['Seconds'] = df.Time.map(get_total_seconds)
df.head(10)
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
Now we can describe and see I recorded times for 52 of the runs.
|
df.describe()
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
This is a great example of what np.NaN is so great in pandas. When we do this describe we get answers unlike in numpy and it just disregards the empty values. If we were to fill it in with zeros that would drag down our statistics.
We can see that here. See how much it changes the data when it is filled with 0 which would be incorrect analysis.
|
df.fillna(0).describe()
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
Now that we have seconds it would be useful to see minutes too. I find it hard to think in seconds.
So let’s add a new column.
|
df['Minutes'] = df['Seconds'].map(lambda x: x / 60)
df.describe()
|
4 - pandas Basics/4-6 pandas DataFrame Renaming Cols, Handling NaN Values, Maps, Intermediate Plotting, + Rolling Values, + Basic Date Indexing.ipynb
|
mitchshack/data_analysis_with_python_and_pandas
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.