markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
The types in the first text are:
|
print(types1)
|
notebooks/Python for Text Similarities.ipynb
|
dcavar/python-tutorial-for-ipython
|
apache-2.0
|
We can generate the instersection from the two sets of types in the following way:
|
print(set.intersection(types1, types2))
|
notebooks/Python for Text Similarities.ipynb
|
dcavar/python-tutorial-for-ipython
|
apache-2.0
|
To calculate the Jaccard coefficient we divide the length of the intersection of the sets of types by the length of the union of these sets:
|
lenIntersect = len(set.intersection(types1, types2))
lenUnion = len(set.union(types1, types2))
print(lenIntersect / lenUnion)
|
notebooks/Python for Text Similarities.ipynb
|
dcavar/python-tutorial-for-ipython
|
apache-2.0
|
onset_detect
|
def test_onset_detect(y, hop_length):
y2 = y.copy()
rms_tot = np.sqrt(np.mean(y**2))
y2[(np.abs(y) < (rms_tot * 1.5))] = 0.0
onsets = librosa.onset.onset_detect(y=y2, sr=sr, hop_length=hop_length)
index_list = librosa.frames_to_samples(onsets, hop_length=hop_length)
return index_list
y, sr = librosa.load('../data_in/Mdau_TE384.wav', sr=None)
index_list = test_onset_detect(y, 384)
print(len(index_list))
index_list
%%timeit
index_list = test_onset_detect(y, 384)
y, sr = librosa.load('../data_in/Mdau_TE384.wav', sr=None)
index_list = test_onset_detect(y, 384)#Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
y, sr = librosa.load('../data_in/Ppip_TE384.wav', sr=None)
index_list = test_onset_detect(y, 384)#Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
y, sr = librosa.load('../data_in/Myotis-Plecotus-Eptesicus_TE384.wav', sr=None)
index_list = test_onset_detect(y, 384)#Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
|
notebooks/experimental/librosa_detect_bat_pulses_in_time_domain.ipynb
|
cloudedbats/cloudedbats_dsp
|
mit
|
rmse + localmax
|
def test_rmse_localmax(y, hop_length):
y2 = y.copy()
rms_tot = np.sqrt(np.mean(y**2))
y2[(np.abs(y) < (rms_tot * 1.5))] = 0.0
rmse = librosa.feature.rms(y=y2, hop_length=384, frame_length=1024, center=True)
locmax = librosa.util.localmax(rmse.T)
maxindexlist = []
for index, a in enumerate(locmax):
if a: maxindexlist.append(index)
index_list = librosa.frames_to_samples(maxindexlist, hop_length=hop_length)
return index_list
y, sr = librosa.load('../data_in/Mdau_TE384.wav', sr=None)
index_list = test_rmse_localmax(y, 384)
print(len(index_list))
index_list
%%timeit
test_rmse_localmax(y, 384)
y, sr = librosa.load('../data_in/Mdau_TE384.wav', sr=None)
index_list = test_rmse_localmax(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
y, sr = librosa.load('../data_in/Ppip_TE384.wav', sr=None)
index_list = test_rmse_localmax(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
y, sr = librosa.load('../data_in/Myotis-Plecotus-Eptesicus_TE384.wav', sr=None)
index_list = test_rmse_localmax(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x-200:x+200].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
|
notebooks/experimental/librosa_detect_bat_pulses_in_time_domain.ipynb
|
cloudedbats/cloudedbats_dsp
|
mit
|
onset_strength and peak_pick
|
def test_onset_strength_and_peak_pick(y, hop_length):
y2 = y.copy()
rms_tot = np.sqrt(np.mean(y**2))
y2[(np.abs(y) < (rms_tot * 1.5))] = 0.0
onset_env = librosa.onset.onset_strength(y=y2, sr=sr,
hop_length=384,
aggregate=np.median)
peak_index_list = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
index_list = librosa.frames_to_samples(peak_index_list, hop_length=hop_length)
return index_list
y, sr = librosa.load('../data_in/Mdau_TE384.wav', sr=None)
index_list = test_onset_strength_and_peak_pick(y, 384)
print(len(index_list))
index_list
%%timeit
test_onset_strength_and_peak_pick(y, 384)
y, sr = librosa.load('../data_in/Mdau_TE384.wav', sr=None)
index_list = test_onset_strength_and_peak_pick(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
y, sr = librosa.load('../data_in/Ppip_TE384.wav', sr=None)
index_list = test_onset_strength_and_peak_pick(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
y, sr = librosa.load('../data_in/Myotis-Plecotus-Eptesicus_TE384.wav', sr=None)
index_list = test_onset_strength_and_peak_pick(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
|
notebooks/experimental/librosa_detect_bat_pulses_in_time_domain.ipynb
|
cloudedbats/cloudedbats_dsp
|
mit
|
peak_pick
|
def test_peak_pick(y, hop_length):
y2 = y.copy()
rms_tot = np.sqrt(np.mean(y**2))
y2[(np.abs(y) < (rms_tot * 1.5))] = 0.0
frames_per_ms = hop_length
minmax_window = frames_per_ms / 4
mean_window = frames_per_ms / 8
sensitivity = rms_tot * 1.5 # 0.1
skip_ms = 1
index_list = librosa.util.peak_pick(y2,
minmax_window, minmax_window,
mean_window, mean_window,
sensitivity,
frames_per_ms * skip_ms)
return index_list
y, sr = librosa.load('../data_in/Mdau_TE384.wav', sr=None)
index_list = test_peak_pick(y, 384)
print(len(index_list))
index_list
%%timeit
test_peak_pick(y, 384)
y, sr = librosa.load('../data_in/Mdau_TE384.wav', sr=None)
index_list = test_peak_pick(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
y, sr = librosa.load('../data_in/Ppip_TE384.wav', sr=None)
index_list = test_peak_pick(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
y, sr = librosa.load('../data_in/Myotis-Plecotus-Eptesicus_TE384.wav', sr=None)
index_list = test_peak_pick(y, 384) # Compare to original signal. Red dots are peaks.
plt.plot(y)
plt.scatter(index_list, [y[x:x+400].max() for x in index_list], color='r')
print(len(index_list))
plt.show()
|
notebooks/experimental/librosa_detect_bat_pulses_in_time_domain.ipynb
|
cloudedbats/cloudedbats_dsp
|
mit
|
In the code bellow, resize image into the special resolution
|
import numpy as np
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
import tensorflow as tf
raw_image = imread('model/datasets/nudity_dataset/3.jpg')
image = tf.placeholder("uint8", [None, None, 3])
image1 = tf.image.convert_image_dtype(image, dtype = tf.float32)
image1_t = tf.expand_dims(image1, 0)
image2 = tf.image.resize_bilinear(image1_t, [270, 270], align_corners=False)
image2 = tf.squeeze(image2, [0])
image3 = tf.sub(image2, 0.5)
image3 = tf.mul(image2, 2.0)
model = tf.initialize_all_variables()
with tf.Session() as session:
session.run(model)
result = session.run(image3, feed_dict={image:raw_image})
## Draw image
fig = plt.figure()
a = fig.add_subplot(1,2,1)
plt.imshow(raw_image)
a = fig.add_subplot(1,2,2)
plt.imshow(result)
plt.show()
|
VNG_MODEL_EXPERIMENT.ipynb
|
taiducvu/NudityDetection
|
apache-2.0
|
1.1 Create a standard training dataset
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
from model.datasets.data import generate_standard_dataset
# Load Normal and Nude images into the train dataset
image_normal_ls, file_name_normal = generate_standard_dataset('/home/cpu11757/workspace/Nudity_Detection/src/model/datasets/train/normal')
nudity_ls, file_name_nudity = generate_standard_dataset('/home/cpu11757/workspace/Nudity_Detection/src/model/datasets/train/nude')
init_op = tf.initialize_all_variables()
labels = np.zeros(3000, dtype = np.uint)
database = []
with tf.Session() as session:
session.run(init_op)
# Start populating the filename queue
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
for i in range(3000):
#print i
if i % 2 == 0:
image = image_normal_ls.eval()
else:
image = nudity_ls.eval()
labels[i] = 1
database.append(image)
coord.request_stop()
database = np.array(database)
from Dataset.data import generate_standard_dataset
import numpy as np
import tensorflow as tf
img_nudity, _ = generate_standard_dataset('/media/taivu/Data/Project/Nudity_Detection/src/model/datasets/AdditionalDataset/vng/sex')
labels = np.ones(100, dtype = np.uint)
dataset = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
for i in range(100):
image = img_nudity.eval()
dataset.append(image)
coord.request_stop()
database = np.array(dataset)
print file_name_normal[1123]
|
VNG_MODEL_EXPERIMENT.ipynb
|
taiducvu/NudityDetection
|
apache-2.0
|
Generate tfrecords
|
import os
import tensorflow as tf
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_dir, dataset, labels, name):
"""Converts a dataset to tfrecords."""
images = dataset
labels = labels
num_examples = dataset.shape[0]
rows, cols, depth = dataset[0].shape
filename = os.path.join(data_dir, name + '.tfrecords')
writer = tf.python_io.TFRecordWriter(filename)
for idx in range(num_examples):
image_raw = images[idx].tostring()
example = tf.train.Example(features = tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[idx])),
'image_raw': _bytes_feature(image_raw)
}))
writer.write(example.SerializeToString())
writer.close()
convert_to('/home/taivu/workspace/NudityDetection/Dataset',
database, labels, 'nudity_test_set')
|
VNG_MODEL_EXPERIMENT.ipynb
|
taiducvu/NudityDetection
|
apache-2.0
|
Read a batch images
|
import tensorflow as tf
import matplotlib.pyplot as plt
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64)
})
image = tf.decode_raw(features['image_raw'], tf.float32)
image = tf.reshape(image,[34,34,3])
label = tf.cast(features['label'], tf.int32)
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
depth = tf.cast(features['depth'], tf.int32)
return image, label, height, width, depth
def data_input(data_dir, batch_size):
filename_queue = tf.train.string_input_producer([data_dir], num_epochs = None)
image, label, height, width, depth = read_and_decode(filename_queue)
images_batch, labels_batch = tf.train.shuffle_batch(
[image, label],
batch_size = batch_size,
capacity = 2000,
min_after_dequeue = 80
)
return images_batch, labels_batch
#filename_queue = tf.train.string_input_producer(['/home/cpu11757/workspace/Nudity_Detection/src/model/datasets/vng_dataset.tfrecords'], num_epochs = None)
#image, label, height,_,depth = read_and_decode(filename_queue)
img_batch, lb_batch = data_input('/home/cpu11757/workspace/Nudity_Detection/src/model/datasets/vng_dataset.tfrecords',500)
init_op = tf.initialize_all_variables()
fig = plt.figure()
with tf.Session() as session:
session.run(init_op)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord)
images, labels = session.run([img_batch, lb_batch])
coord.request_stop()
import matplotlib.pyplot as plt
fig = plt.figure()
plt.imshow(images[1])
print labels[0]
plt.show()
|
VNG_MODEL_EXPERIMENT.ipynb
|
taiducvu/NudityDetection
|
apache-2.0
|
Example shuffle dataset
|
import tensorflow as tf
f = ["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"]
l = ["l1", "l2", "l3", "l4", "l5", "l6", "l7", "l8"]
fv = tf.constant(f)
lv = tf.constant(l)
rsq = tf.RandomShuffleQueue(10, 0, [tf.string, tf.string], shapes=[[],[]])
do_enqueues = rsq.enqueue_many([fv, lv])
gotf, gotl = rsq.dequeue()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess,coord = coord)
sess.run(do_enqueues)
for i in xrange(2):
one_f, one_l = sess.run([gotf, gotl])
print "F: ", one_f, "L: ", one_l
coord.request_stop()
|
VNG_MODEL_EXPERIMENT.ipynb
|
taiducvu/NudityDetection
|
apache-2.0
|
Example cPickle
|
import cPickle as pickle
dict1 = {'name':[],'id':[]}
dict2 = {'local':[], 'paza':[]}
#with open('test.p', 'wb') as fp:
# pickle.dump(dict1,fp)
# pickle.dump(dict2,fp)
with open('test.p', 'rb') as fp:
d1 = pickle.load(fp)
d2 = pickle.load(fp)
print len(d1)
print len(d2)
|
VNG_MODEL_EXPERIMENT.ipynb
|
taiducvu/NudityDetection
|
apache-2.0
|
Example reshape
|
import tensorflow as tf
import numpy as np
a = tf.constant(np.array([[.1]]))
init = tf.initialize_all_variables()
with tf.Session() as session:
session.run(init)
b = session.run(tf.nn.softmax(a))
c = session.run(tf.nn.softmax_cross_entropy_with_logits([0.6, 0.4],[0,1]))
#print b
#print c
label = np.array([[0], [1], [1]])
idx = np.arange(3) * 2
print ('IDX')
print idx
labels_one_hot = np.zeros((3,2))
print ('labels_one_hot')
print labels_one_hot
labels_one_hot.flat[idx + label.ravel()] = 1
print ('IDX + label.ravel()')
print idx + label.ravel()
import tensorflow as tf
import matplotlib.pyplot as plt
from Dataset.data import preprocess_image
import numpy as np
filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(
'/home/taivu/workspace/NudityDetection/Dataset/train/normal/*.jpg'))
img_reader = tf.WholeFileReader()
_, img_file = img_reader.read(filename_queue)
image = tf.image.decode_jpeg(img_file, 3)
image = preprocess_image(image, 34, 34)
images = tf.train.batch([image],
batch_size = 10,
capacity = 50,
name = 'input')
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
threads = tf.train.start_queue_runners(coord=coord)
result_img = sess.run([images])
result_img = np.array(result_img)
coord.request_stop()
coord.join(threads)
fig = plt.figure()
plt.imshow(result_img[0][1])
plt.show()
import tensorflow as tf
import numpy as np
from execute_model import evaluate
from Dataset.data import data_input
import matplotlib.pyplot as plt
dt, _ = data_input('/home/taivu/workspace/NudityDetection/Dataset/vng_dataset_validation.tfrecords', 10, False)
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
threads = tf.train.start_queue_runners(coord=coord)
result_img = sess.run([dt])
coord.request_stop()
coord.join(threads)
#fig = plt.figure()
result_img = np.array(result_img)
print result_img.shape
print result_img.dtype
#plt.show()
import tensorflow as tf
import numpy as np
from execute_model import evaluate
from Dataset.data import data_input
import matplotlib.pyplot as plt
dt = data_input('/home/taivu/workspace/NudityDetection/Dataset/vng_dataset_validation.tfrecords', 10, False, False)
coord = tf.train.Coordinator()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
threads = tf.train.start_queue_runners(coord=coord)
result_img = sess.run([dt])
coord.request_stop()
coord.join(threads)
#fig = plt.figure()
result_img = np.array(result_img)
print result_img.shape
print result_img.dtype
#plt.show()
import tensorflow as tf
import os
import glob
from Dataset.data import preprocess_image
import matplotlib.pyplot as plt
data_dir = '/home/taivu/workspace/AddPic'
filenames = []
for pathAndFilename in glob.iglob(os.path.join(data_dir, '*.jpg')):
filenames.append(pathAndFilename)
filename_queue = tf.train.string_input_producer(filenames, shuffle = None)
filename = filename_queue.dequeue()
# img_reader = tf.WholeFileReader()
img_file = tf.read_file(filename)
#_, img_file = img_reader.read(filename)
img = tf.image.decode_jpeg(img_file, 3)
img = preprocess_image(img, 34, 34)
filename_batch, img_batch = tf.train.batch([filename, img], batch_size = 3, capacity=200, name = 'input')
init = tf.global_variables_initializer()
coord =tf.train.Coordinator()
with tf.Session() as sess:
sess.run(init)
tf.train.start_queue_runners(sess, coord)
ls_img, ls_nf = sess.run([img_batch, filename_batch])
#fig = plt.figure()
print ls_nf
for i in range(3):
a = fig.add_subplot(1,3, i)
a.set_title('%d'%i)
plt.imshow(ls_img[i])
plt.show()
coord.request_stop()
print ls_nf[0]
import tensorflow as tf
import numpy as np
a = [[1,2,3]]
b = [[4,5,6]]
np.column_stack((a,b))
import math
print int(math.ceil(float(5)/3))
|
VNG_MODEL_EXPERIMENT.ipynb
|
taiducvu/NudityDetection
|
apache-2.0
|
Character counting and entropy
Write a function char_probs that takes a string and computes the probabilities of each character in the string:
First do a character count and store the result in a dictionary.
Then divide each character counts by the total number of character to compute the normalized probabilties.
Return the dictionary of characters (keys) and probabilities (values).
|
def char_probs(s):
"""Find the probabilities of the unique characters in the string s.
Parameters
----------
s : str
A string of characters.
Returns
-------
probs : dict
A dictionary whose keys are the unique characters in s and whose values
are the probabilities of those characters.
"""
dictionary = {}
for n in s:
dictionary[n]= (s.count(n))/len(s)
return dictionary
test1 = char_probs('aaaa')
assert np.allclose(test1['a'], 1.0)
test2 = char_probs('aabb')
assert np.allclose(test2['a'], 0.5)
assert np.allclose(test2['b'], 0.5)
test3 = char_probs('abcd')
assert np.allclose(test3['a'], 0.25)
assert np.allclose(test3['b'], 0.25)
assert np.allclose(test3['c'], 0.25)
assert np.allclose(test3['d'], 0.25)
|
midterm/AlgorithmsEx03.ipynb
|
edwardd1/phys202-2015-work
|
mit
|
The entropy is a quantiative measure of the disorder of a probability distribution. It is used extensively in Physics, Statistics, Machine Learning, Computer Science and Information Science. Given a set of probabilities $P_i$, the entropy is defined as:
$$H = - \Sigma_i P_i \log_2(P_i)$$
In this expression $\log_2$ is the base 2 log (np.log2), which is commonly used in information science. In Physics the natural log is often used in the definition of entropy.
Write a funtion entropy that computes the entropy of a probability distribution. The probability distribution will be passed as a Python dict: the values in the dict will be the probabilities.
To compute the entropy, you should:
First convert the values (probabilities) of the dict to a Numpy array of probabilities.
Then use other Numpy functions (np.log2, etc.) to compute the entropy.
Don't use any for or while loops in your code.
|
def entropy(d):
"""Compute the entropy of a dict d whose values are probabilities."""
"""Return a list of 2-tuples of (word, count), sorted by count descending."""
#t = np.array(d)
#t = np.sort(t)
H = 0
l = [(i,d[i]) for i in d]
t = sorted(l, key = lambda x:x[1], reverse = True)
for n in t:
H = H + (n[1])*np.log2(n[1])
#t = char_probs(t)*np.log2(char_probs(t))
return -H
entropy({'a': 0.5, 'b': 0.5})
assert np.allclose(entropy({'a': 0.5, 'b': 0.5}), 1.0)
assert np.allclose(entropy({'a': 1.0}), 0.0)
|
midterm/AlgorithmsEx03.ipynb
|
edwardd1/phys202-2015-work
|
mit
|
Use IPython's interact function to create a user interface that allows you to type a string into a text box and see the entropy of the character probabilities of the string.
|
def z(x):
print(entropy(char_probs(x)))
return entropy(char_probs(x))
interact(z, x='string');
assert True # use this for grading the pi digits histogram
|
midterm/AlgorithmsEx03.ipynb
|
edwardd1/phys202-2015-work
|
mit
|
Print the variable a in all uppercase
Print the variable a with every other letter in uppercase
Print the variable a in reverse, i.e. god yzal ...
Print the variable a with the words reversed, i.e. ehT kciuq ...
Print the variable b in scientific notation with 4 decimal places
|
people = [{'name': 'Charlie', 'age': 35},
{'name': 'Alice', 'age': 30},
{'name': 'Eve', 'age': 20},
{'name': 'Gail', 'age': 30},
{'name': 'Dennis', 'age': 25},
{'name': 'Bob', 'age': 35},
{'name': 'Fred', 'age': 25},]
|
Wk01-Overview.ipynb
|
streety/biof509
|
mit
|
Print the items in people as comma seperated values
Sort people so that they are ordered by age, and print
Sort people so that they are ordered by age first, and then their names, i.e. Bob and Charlie should be next to each other due to their ages with Bob first due to his name.
|
coords = [(0,0), (10,5), (10,10), (5,10), (3,3), (3,7), (12,3), (10,11)]
|
Wk01-Overview.ipynb
|
streety/biof509
|
mit
|
Write a function that returns the first n prime numbers
Given a list of coordinates calculate the distance covered travelling between all the points in order given using the Euclidean distance
Given a list of coordinates arrange them in such a way that the distance traveled is minimized (the itertools module may be useful).
|
np.random.seed(0)
a = np.random.randint(0, 100, size=(10,20))
|
Wk01-Overview.ipynb
|
streety/biof509
|
mit
|
VXLAN and EVPN
This category of questions allows you to query aspects of VXLAN and EVPN
configuration and behavior.
VXLAN VNI Properties
VXLAN Edges
L3 EVPN VNIs
|
bf.set_network('generate_questions')
bf.set_snapshot('aristaevpn')
|
docs/source/notebooks/vxlan_evpn.ipynb
|
batfish/pybatfish
|
apache-2.0
|
VXLAN VNI Properties
Returns configuration settings of VXLANs.
Lists VNI-level network segment settings configured for VXLANs.
Inputs
Name | Description | Type | Optional | Default Value
--- | --- | --- | --- | ---
nodes | Include nodes matching this specifier. | NodeSpec | True |
properties | Include properties matching this specifier. | VxlanVniPropertySpec | True |
Invocation
|
result = bf.q.vxlanVniProperties().answer().frame()
|
docs/source/notebooks/vxlan_evpn.ipynb
|
batfish/pybatfish
|
apache-2.0
|
Return Value
Name | Description | Type
--- | --- | ---
Node | Node | str
VRF | VRF | str
VNI | VXLAN Segment ID | int
Local_VTEP_IP | IPv4 address of the local VTEP | str
Multicast_Group | IPv4 address of the multicast group | str
VLAN | VLAN number for the VNI | int
VTEP_Flood_List | All IPv4 addresses in the VTEP flood list | List of str
VXLAN_Port | Destination port number for the VXLAN tunnel | int
Print the first 5 rows of the returned Dataframe
|
result.head(5)
|
docs/source/notebooks/vxlan_evpn.ipynb
|
batfish/pybatfish
|
apache-2.0
|
VXLAN Edges
Returns VXLAN edges.
Lists all VXLAN edges in the network.
Inputs
Name | Description | Type | Optional | Default Value
--- | --- | --- | --- | ---
nodes | Include edges whose first node matches this name or regex. | NodeSpec | True | .
remoteNodes | Include edges whose second node matches this name or regex. | NodeSpec | True | .
Invocation
|
result = bf.q.vxlanEdges().answer().frame()
|
docs/source/notebooks/vxlan_evpn.ipynb
|
batfish/pybatfish
|
apache-2.0
|
Return Value
Name | Description | Type
--- | --- | ---
VNI | VNI of the VXLAN tunnel edge | int
Node | Node from which the edge originates | str
Remote_Node | Node at which the edge terminates | str
VTEP_Address | VTEP IP of node from which the edge originates | str
Remote_VTEP_Address | VTEP IP of node at which the edge terminates | str
VLAN | VLAN associated with VNI on node from which the edge originates | int
Remote_VLAN | VLAN associated with VNI on node at which the edge terminates | int
UDP_Port | UDP port of the VXLAN tunnel transport | int
Multicast_Group | Multicast group of the VXLAN tunnel transport | str
Print the first 5 rows of the returned Dataframe
|
result.head(5)
|
docs/source/notebooks/vxlan_evpn.ipynb
|
batfish/pybatfish
|
apache-2.0
|
L3 EVPN VNIs
Returns configuration settings of VXLANs.
Lists VNI-level network segment settings configured for VXLANs.
Inputs
Name | Description | Type | Optional | Default Value
--- | --- | --- | --- | ---
nodes | Include nodes matching this specifier. | NodeSpec | True |
Invocation
|
result = bf.q.evpnL3VniProperties().answer().frame()
|
docs/source/notebooks/vxlan_evpn.ipynb
|
batfish/pybatfish
|
apache-2.0
|
Return Value
Name | Description | Type
--- | --- | ---
Node | Node | str
VRF | VRF | str
VNI | VXLAN Segment ID | int
Route_Distinguisher | Route distinguisher | str
Import_Route_Target | Import route target | str
Export_Route_Target | Export route target | str
Print the first 5 rows of the returned Dataframe
|
result.head(5)
|
docs/source/notebooks/vxlan_evpn.ipynb
|
batfish/pybatfish
|
apache-2.0
|
Time frequency with Stockwell transform in sensor space
This script shows how to compute induced power and intertrial coherence
using the Stockwell transform, a.k.a. S-Transform.
|
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.time_frequency import tfr_stockwell
from mne.datasets import somato
print(__doc__)
|
0.14/_downloads/plot_stockwell.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Set parameters
|
data_path = somato.data_path()
raw_fname = data_path + '/MEG/somato/sef_raw_sss.fif'
event_id, tmin, tmax = 1, -1., 3.
# Setup for reading the raw data
raw = io.Raw(raw_fname)
baseline = (None, 0)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
|
0.14/_downloads/plot_stockwell.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Calculate power and intertrial coherence
|
epochs = epochs.pick_channels([epochs.ch_names[82]]) # reduce computation
power, itc = tfr_stockwell(epochs, fmin=6., fmax=30., decim=4, n_jobs=1,
width=.3, return_itc=True)
power.plot([0], baseline=(-0.5, 0), mode=None, title='S-transform (power)')
itc.plot([0], baseline=None, mode=None, title='S-transform (ITC)')
|
0.14/_downloads/plot_stockwell.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
The effective relative permittivity of the geometry shows a dispersion effect at low frequency which can be modelled by a wideband Debye model such as Djordjevic/Svensson implementation of skrf microstripline media. The value then increase slowly with frequency which correspond roughly to the Kirschning and Jansen dispersion model.
The Insertion Loss seems proportional to frequency, which indicate a predominance of the dielectric losses. Conductor losses are related to the square-root of frequency. Radiation losses are neglected.
Fit microstripline model to the computed parameters by optimization
Effective relative permittivity
Microstrip media model with the physical dimensions of the measured microstriplines is fitted to the computed $\epsilon_{r,eff}$ by optimization of $\epsilon_r$ and tand of the substrate at 1GHz. The dispersion model used to account for frequency variation of the parameters are Djordjevic/Svensson and Kirschning and Jansen.
|
from skrf.media import MLine
W = 3.00e-3
H = 1.51e-3
T = 50e-6
L = 0.1
Er0 = 4.5
tand0 = 0.02
f_epr_tand = 1e9
x0 = [Er0, tand0]
def model(x, freq, Er_eff, L, W, H, T, f_epr_tand, Loss_mea):
ep_r = x[0]
tand = x[1]
m = MLine(frequency=freq, z0=50, w=W, h=H, t=T,
ep_r=ep_r, mu_r=1, rho=1.712e-8, tand=tand, rough=0.15e-6,
f_low=1e3, f_high=1e12, f_epr_tand=f_epr_tand,
diel='djordjevicsvensson', disp='kirschningjansen')
DUT = m.line(L, 'm', embed=True, z0=m.Z0_f)
Loss_mod = 20 * log10(absolute(DUT.s[:,1,0]))
return sum((real(m.ep_reff_f) - Er_eff)**2) + 0.01*sum((Loss_mod - Loss_mea)**2)
res = minimize(model, x0, args=(MSL100.frequency, Er_eff, L, W, H, T, f_epr_tand, Loss_mea),
bounds=[(4.2, 4.7), (0.001, 0.1)])
Er = res.x[0]
tand = res.x[1]
print('Er={:.3f}, tand={:.4f} at {:.1f} GHz.'.format(Er, tand, f_epr_tand * 1e-9))
|
doc/source/examples/networktheory/Correlating microstripline model to measurement.ipynb
|
temmeand/scikit-rf
|
bsd-3-clause
|
As a sanity check, the model data are compared with the computed parameters
|
m = MLine(frequency=MSL100.frequency, z0=50, w=W, h=H, t=T,
ep_r=Er, mu_r=1, rho=1.712e-8, tand=tand, rough=0.15e-6,
f_low=1e3, f_high=1e12, f_epr_tand=f_epr_tand,
diel='djordjevicsvensson', disp='kirschningjansen')
DUT = m.line(L, 'm', embed=True, z0=m.Z0_f)
DUT.name = 'DUT'
Loss_mod = 20 * log10(absolute(DUT.s[:,1,0]))
plt.figure()
plt.suptitle('Measurement vs Model')
plt.subplot(2,1,1)
plt.plot(f * 1e-9, Er_eff, label='Measured')
plt.plot(f * 1e-9, real(m.ep_reff_f), label='Model')
plt.ylabel('$\epsilon_{r,eff}$')
plt.legend()
plt.subplot(2,1,2)
plt.plot(f * 1e-9, Loss_mea, label='Measured')
plt.plot(f * 1e-9, Loss_mod, label='Model')
plt.xlabel('Frequency (GHz)')
plt.ylabel('Insertion Loss (dB)')
plt.legend()
plt.show()
|
doc/source/examples/networktheory/Correlating microstripline model to measurement.ipynb
|
temmeand/scikit-rf
|
bsd-3-clause
|
The phase of the model shows a good agreement, while the Insertion Loss seems to have a reasonable agreement and is small whatsoever.
Connector impedance adjustment by time-domain reflectometry
Time-domain step responses of measurement and model are used to adjust the connector model characteristic impedance.
The plots shows the connector having an inductive behaviour (positive peak) and the microstripline being a bit too much capacitive (negative plateau).
Characteristic impedance of the connector is tuned by trial-and-error until a reasonable agreement is achieved. Optimization could have been used instead.
|
mod = left ** DUT ** right
MSL100_dc = MSL100.extrapolate_to_dc(kind='linear')
DUT_dc = mod.extrapolate_to_dc(kind='linear')
plt.figure()
plt.suptitle('Left-right and right-left TDR')
plt.subplot(2,1,1)
MSL100_dc.s11.plot_s_time_step(pad=2000, window='hamming', label='Measured L-R')
DUT_dc.s11.plot_s_time_step(pad=2000, window='hamming', label='Model L-R')
plt.xlim(-2, 4)
plt.subplot(2,1,2)
MSL100_dc.s22.plot_s_time_step(pad=2000, window='hamming', label='Measured R-L')
DUT_dc.s22.plot_s_time_step(pad=2000, window='hamming', label='Model R-L')
plt.xlim(-2, 4)
plt.tight_layout()
plt.show()
|
doc/source/examples/networktheory/Correlating microstripline model to measurement.ipynb
|
temmeand/scikit-rf
|
bsd-3-clause
|
Data Wrangling
data extraction
|
with open('LocationHistory.json', 'r') as fh:
raw = json.loads(fh.read())
# use location_data as an abbreviation for location data
location_data = pd.DataFrame(raw['locations'])
del raw #free up some memory
# convert to typical units
location_data['latitudeE7'] = location_data['latitudeE7']/float(1e7)
location_data['longitudeE7'] = location_data['longitudeE7']/float(1e7)
location_data['timestampMs'] = location_data['timestampMs'].map(lambda x: float(x)/1000) #to seconds
location_data['datetime'] = location_data.timestampMs.map(datetime.datetime.fromtimestamp)
# Rename fields based on the conversions we just did
location_data.rename(columns={'latitudeE7':'latitude', 'longitudeE7':'longitude', 'timestampMs':'timestamp'}, inplace=True)
location_data = location_data[location_data.accuracy < 1000] #Ignore locations with accuracy estimates over 1000m
location_data.reset_index(drop=True, inplace=True)
|
output/downloads/notebooks/map_of_flights.ipynb
|
kpolimis/kpolimis.github.io-src
|
gpl-3.0
|
Explore Data
view data and datatypes
|
location_data.head()
location_data.dtypes
location_data.describe()
|
output/downloads/notebooks/map_of_flights.ipynb
|
kpolimis/kpolimis.github.io-src
|
gpl-3.0
|
data manipulation
Degrees and Radians
We're going to convert the degree-based geo data to radians to calculate distance traveled. I'm going to paraphrase an explanation (source below) about why the degree-to-radians conversion is necessary
Degrees are arbitrary because they’re based on the sun and backwards because they are from the observer’s perspective.
Radians are in terms of the mover allowing equations to “click into place”. Converting rotational to linear speed is easy, and ideas like sin(x)/x make sense.
Consult this post for more info about degrees and radians in distance calculation.
convert degrees to radians
|
degrees_to_radians = np.pi/180.0
location_data['phi'] = (90.0 - location_data.latitude) * degrees_to_radians
location_data['theta'] = location_data.longitude * degrees_to_radians
# Compute distance between two GPS points on a unit sphere
location_data['distance'] = np.arccos(
np.sin(location_data.phi)*np.sin(location_data.phi.shift(-1)) * np.cos(location_data.theta - location_data.theta.shift(-1)) +
np.cos(location_data.phi)*np.cos(location_data.phi.shift(-1))) * 6378.100 # radius of earth in km
|
output/downloads/notebooks/map_of_flights.ipynb
|
kpolimis/kpolimis.github.io-src
|
gpl-3.0
|
calculate speed during trips (in km/hr)
|
location_data['speed'] = location_data.distance/(location_data.timestamp - location_data.timestamp.shift(-1))*3600 #km/hr
|
output/downloads/notebooks/map_of_flights.ipynb
|
kpolimis/kpolimis.github.io-src
|
gpl-3.0
|
Flight algorithm
filter flights
remove flights using conservative selection criteria
|
flights = flight_data[(flight_data.speed > 40) & (flight_data.distance > 80)].reset_index()
# Combine instances of flight that are directly adjacent
# Find the indices of flights that are directly adjacent
_f = flights[flights['index'].diff() == 1]
adjacent_flight_groups = np.split(_f, (_f['index'].diff() > 1).nonzero()[0])
# Now iterate through the groups of adjacent flights and merge their data into
# one flight entry
for flight_group in adjacent_flight_groups:
idx = flight_group.index[0] - 1 #the index of flight termination
flights.loc[idx, ['start_lat', 'start_lon', 'start_datetime']] = [flight_group.iloc[-1].start_lat,
flight_group.iloc[-1].start_lon,
flight_group.iloc[-1].start_datetime]
# Recompute total distance of flight
flights.loc[idx, 'distance'] = distance_on_unit_sphere(flights.loc[idx].start_lat,
flights.loc[idx].start_lon,
flights.loc[idx].end_lat,
flights.loc[idx].end_lon)*6378.1
# Now remove the "flight" entries we don't need anymore.
flights = flights.drop(_f.index).reset_index(drop=True)
# Finally, we can be confident that we've removed instances of flights broken up by
# GPS data points during flight. We can now be more liberal in our constraints for what
# constitutes flight. Let's remove any instances below 200km as a final measure.
flights = flights[flights.distance > 200].reset_index(drop=True)
|
output/downloads/notebooks/map_of_flights.ipynb
|
kpolimis/kpolimis.github.io-src
|
gpl-3.0
|
This algorithm worked 100% of the time for me - no false positives or negatives. But the adjacency-criteria of the algorithm is fairly brittle. The core of it centers around the assumption that inter-flight GPS data will be directly adjacent to one another. That's why the initial screening on line 1 of the previous cell had to be so liberal.
Now, the flights DataFrame contains only instances of true flights which facilitates plotting with Matplotlib's Basemap. If we plot on a flat projection like tmerc, the drawgreatcircle function will produce a true path arc just like we see in the in-flight magazines.
Visualize Flights
|
fig = plt.figure(figsize=(18,12))
# Plotting across the international dateline is tough. One option is to break up flights
# by hemisphere. Otherwise, you'd need to plot using a different projection like 'robin'
# and potentially center on the Int'l Dateline (lon_0=-180)
# flights = flights[(flights.start_lon < 0) & (flights.end_lon < 0)]# Western Hemisphere Flights
# flights = flights[(flights.start_lon > 0) & (flights.end_lon > 0)] # Eastern Hemisphere Flights
xbuf = 0.2
ybuf = 0.35
min_lat = np.min([flights.end_lat.min(), flights.start_lat.min()])
min_lon = np.min([flights.end_lon.min(), flights.start_lon.min()])
max_lat = np.max([flights.end_lat.max(), flights.start_lat.max()])
max_lon = np.max([flights.end_lon.max(), flights.start_lon.max()])
width = max_lon - min_lon
height = max_lat - min_lat
m = Basemap(llcrnrlon=min_lon - width* xbuf,
llcrnrlat=min_lat - height*ybuf,
urcrnrlon=max_lon + width* xbuf,
urcrnrlat=max_lat + height*ybuf,
projection='merc',
resolution='l',
lat_0=min_lat + height/2,
lon_0=min_lon + width/2,)
m.drawmapboundary(fill_color='#EBF4FA')
m.drawcoastlines()
m.drawstates()
m.drawcountries()
m.fillcontinents()
current_date = time.strftime("printed: %a, %d %b %Y", time.localtime())
for idx, f in flights.iterrows():
m.drawgreatcircle(f.start_lon, f.start_lat, f.end_lon, f.end_lat, linewidth=3, alpha=0.4, color='b' )
m.plot(*m(f.start_lon, f.start_lat), color='g', alpha=0.8, marker='o')
m.plot(*m(f.end_lon, f.end_lat), color='r', alpha=0.5, marker='o' )
fig.text(0.125, 0.18, "Data collected from 2013-2017 on Android \nPlotted using Python, Basemap \n%s" % (current_date),
ha='left', color='#555555', style='italic')
fig.text(0.125, 0.15, "kivanpolimis.com", color='#555555', fontsize=16, ha='left')
plt.savefig('flights.png', dpi=150, frameon=False, transparent=False, bbox_inches='tight', pad_inches=0.2)
Image(filename='flights.png')
|
output/downloads/notebooks/map_of_flights.ipynb
|
kpolimis/kpolimis.github.io-src
|
gpl-3.0
|
You can draw entertaining conclusions from the flight visualization. For instance, you can see some popular layover locations, all those lines in/out of Seattle, plus a recent trip to Germany. And Basemap has made it so simple for us - no Shapefiles to import because all map information is included in the Basemap module.
Calculate all the miles you have traveled in the years observed with a single line of code:
|
flights_in_miles = round(flights.distance.sum()*.621371) # distance column is in km, convert to miles
flights_in_miles
print("{0} miles traveled from {1} to {2}".format(flights_in_miles, earliest_obs, latest_obs))
|
output/downloads/notebooks/map_of_flights.ipynb
|
kpolimis/kpolimis.github.io-src
|
gpl-3.0
|
Conclusion
You've now got the code to go ahead and reproduce these maps.
I'm working on creating functions to automate these visualizations
Potential future directions
Figure out where you usually go on the weekends
Calculate your fastest commute route
measure the amount of time you spend driving vs. walking.
Download this notebook, or see a static view here
|
import time
print("last updated: {}".format(time.strftime("%a, %d %b %Y %H:%M", time.localtime())))
|
output/downloads/notebooks/map_of_flights.ipynb
|
kpolimis/kpolimis.github.io-src
|
gpl-3.0
|
Demo 2: Plotting a candlestick chart for any stock in 11 lines of code
|
# Choose a start and end date in a slightly different format to before (YYYY/MM/DD)
start = (2015, 10, 2)
end = (2016, 4,2)
company = "S&P 500"
ticker = "^GSPC"
quotes = mpf.quotes_historical_yahoo_ohlc(ticker, start, end)
print(quotes[:2])
# We use Matplotlib to generate plots
fig, ax = plt.subplots(figsize=(8, 5))
fig.subplots_adjust(bottom=0.2)
mpf.candlestick_ohlc(ax, quotes, width=0.6, colorup='b', colordown='r')
# Running this block produces an ugly output
# We can try again with some fancier formatting tricks
fig, ax = plt.subplots(figsize=(8, 5))
fig.subplots_adjust(bottom=0.2)
mpf.candlestick_ohlc(ax, quotes, width=0.6, colorup='b', colordown='r')
# Adding some formatting sugar
plt.title("Candlestick Chart for "+company+" ("+ticker+")"+" "+str(start)+" to "+str(end))
plt.grid(True) # Set a title
ax.xaxis_date() # dates on the x-axis
ax.autoscale_view() #Scale the image
plt.setp(plt.gca().get_xticklabels(), rotation=30) # Format labels
|
giag.ipynb
|
trsherborne/learn-python
|
mit
|
Complete Code for Demo 1
|
# -*- coding: utf-8 -*-
%matplotlib inline
import numpy as np
import pandas as pd
from pandas_datareader import data as web
# Choose a stock
ticker = 'GOOG'
# Choose a start date in US format MM/DD/YYYY
stock_start = '10/2/2015'
# Choose an end date in US format MM/DD/YYYY
stock_end = '10/2/2016'
# Retrieve the Data from Google's Finance Database
stock = web.DataReader(ticker,data_source='google',
start = stock_start,end=stock_end)
# Generate the logarithm of the ratio between each days closing price
stock['Log_Ret'] = np.log(stock['Close']/stock['Close'].shift(1))
# Generate the rolling standard deviation across the time series data
stock['Volatility'] = pd.rolling_std(stock['Log_Ret'],window=252)*np.sqrt(252)
google[['Close','Volatility']].plot(subplots=True,color='b',figsize=(8,6))
|
giag.ipynb
|
trsherborne/learn-python
|
mit
|
Complete Code for Demo 2
|
# -*- coding: utf-8 -*-
%matplotlib inline
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.finance as mpf
start = (2015, 10, 2)
end = (2016, 4,2)
company = "S&P 500"
ticker = "^GSPC"
quotes = mpf.quotes_historical_yahoo_ohlc(ticker, start, end)
print(quotes[:2])
fig, ax = plt.subplots(figsize=(8, 5))
fig.subplots_adjust(bottom=0.2)
mpf.candlestick_ohlc(ax, quotes, width=0.6, colorup='b', colordown='r')
plt.title("Candlestick Chart for "+company+" ("+ticker+")"+" "+str(start)+" to "+str(end))
plt.grid(True) # Set a title
ax.xaxis_date() # dates on the x-axis
ax.autoscale_view() #Scale the image
plt.setp(plt.gca().get_xticklabels(), rotation=30) # Format labels
|
giag.ipynb
|
trsherborne/learn-python
|
mit
|
Now turn on infos just for OPF module.
|
pypsa.opf.logger.setLevel(logging.INFO)
out = network.lopf()
|
examples/notebooks/logging-demo.ipynb
|
PyPSA/PyPSA
|
mit
|
Now turn on warnings just for OPF module
|
pypsa.opf.logger.setLevel(logging.WARNING)
out = network.lopf()
|
examples/notebooks/logging-demo.ipynb
|
PyPSA/PyPSA
|
mit
|
Now turn on all messages for the PF module
|
pypsa.pf.logger.setLevel(logging.DEBUG)
out = network.lpf()
|
examples/notebooks/logging-demo.ipynb
|
PyPSA/PyPSA
|
mit
|
Now turn off all messages for the PF module again
|
pypsa.pf.logger.setLevel(logging.ERROR)
out = network.lpf()
|
examples/notebooks/logging-demo.ipynb
|
PyPSA/PyPSA
|
mit
|
Google
|
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime(2016, 7, 15)
google_df = data.DataReader("F", 'google', start, end)
google_df.plot()
|
python-scripts/data_analytics_learn/link_pandas/Ex_Files_Pandas_Data/Exercise Files/02_08/Begin/Remote Data.ipynb
|
adityaka/misc_scripts
|
bsd-3-clause
|
Pentru a calcula radacinile polinomului caracteristic al matricii $A$ si vectorii proprii corespunzatori, apelam functia np.linalg.eig(A) care returneaza array-ul 1D, Lamb, ce contine radacinile polinomului caracteristic si array-ul 2D, V,
care are pe o coloana j coordonatele unui vector propriu corespunzator valorii proprii Lamb[j].
|
Lamb, V=np.linalg.eig(A)
print 'Radacinile polinomului caracteristic sunt\n', Lamb
print'\n iar vectorii proprii corespunzatori: \n', V.round(2)
print 'Vectorul propriu corespunzator valorii', Lamb[3], 'este:\n', V[:,3].round(2)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Matricea data este o matrice binara, deci poate fi interpretata ca matricea de adiacenta a unui graf.
Fiind o matrice nenegativa asociata unui graf conex, i se poate aplica Teorema Perron-Frobenius.
Sa determinam valoarea proprie dominanta, adica valoarea proprie reala, strict pozitiva $\lambda_d$
cu proprietatea ca $|\lambda_j|\leq \lambda_d$, $\forall\: j=\overline{0,4}$ si vectorul propriu corespunzator:
Teoretic ar trebui sa calculam mai intai array-ul valorilor absolute ale elementelor din Lamb si
apoi elementul maxim:
|
print np.fabs(Lamb)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Concentrat putem scrie:
|
print np.amax(np.fabs(Lamb))# functia np.amax(array) returneaza elementul maxim dintr-un array 1D
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Deci valoarea proprie dominanta este:
|
lambD=np.amax(np.fabs(Lamb))# valoarea proprie dominanta calculata
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
iar pozitia ei in array-ul Lamb este returnata de np.argmax(np.fabs(Lamb)):
|
j=np.argmax(np.fabs(Lamb))
print 'Valoarea proprie dominanta este plasata in pozitia:', j
#vectorul propriu corespunzator:
x=V[:,j]
print 'Valoarea proprie dominanta este:', lambD, \
'\n\n iar vectorul propriu dominant este\n', V[:,j].round(2)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Observam ca acest vector are toate coordonatele negative, deci -x este vectorul propriu cu
toate coordonatele pozitive, conform teoremei
Perron-Frobenius.
Vectorul $x$ normalizat este $r=x/\sum_{i=0}^{n-1}x[i]$ si reprezinta vectorul rating, avand drept coordonate coeficientii de popularitate/importanta a nodurilor retelei. Adica $r[j]$ este coeficientul de popularitate al nodului $j$ din retea:
|
r=x/np.sum(x)
print 'Coeficientii de popularitate a nodurilor retelei de matricede conectivitate'+\
'$A$ sunt\n', r.round(2)# semnul + intre doua stringuri inseamna concatenarea lor
# semnul \ reprezinta continuare pe linia urmatoare
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Sa realizam acum ranking-ul nodurilor, sortand elementele vectorului $r$, descrescator si retinand
indicii ce dau pozitia initiala in r a elementelor sortate.
|
ranking=np.argsort(r)[::-1] #Functia np.argsort, sorteaza crescator array-ul 1D, rating,
# si returneaza indicii din r a elementelor sortate
# Pentru a gasi ordinea indicilor pentru sortarea descrescatoare
# se inverseaza elementele array-ului returnat de
# np.argsort(rating) folosind notatia tipica pt reversing, [::-1]
print ranking
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Deci nodul retelei cu cea mai mare popularitate este nodul 4, urmat de 0, 3,2,1.
Sa aplicam acum aceasta procedura pentru retele neorientate si apoi retele orientate, folosind
pachetul networkx:
Definirea unui graf in networkx
Importam modulul networkx astfel:
|
import networkx as nx
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Linia urmatoare defineste un graf vid, G, neorientat (G este un obiect din clasa Graph):
|
G=nx.Graph()
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
1. Constructia grafului pornind de la lista nodurilor si lista arcelor
Se defineste lista nodurilor, V, si lista arcelor, E, si apoi se apeleaza pentru graful G metoda
add_nodes_from(V), respectiv add_edges_from(E).
Se pot adauga noduri/arce individuale
apeland metoda add_node()/add_edge():
|
n=9
V=[i for i in range(n)]
G.add_nodes_from(V)
E=[(0,1), (0,2), (1,3), (1,4), (1,7), (2,5), (2,8), (3, 4), (3,5),(4,6), (4,7), (4,8), (5,7)]
G.add_edges_from(E)
G.add_edge(6,8)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Dupa ce elementele definitorii au fost setate, urmeaza generarea/trasarea grafului, folosind functia nx.draw care se bazeaza pe functii din biblioteca grafica matplotlib.
|
%matplotlib inline
# comanda "%matplotlib inline" se da pentru a insera figurile generate, inline, in notebook
import matplotlib.pyplot as plt # importam biblioteca grafica
nx.draw(G, node_color='c',edge_color='b', with_labels=True)# in mod implicit graful este trasat
#fara a afisa etichetele nodurilor
# with_labels=True conduce la afisarea lor
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Pozitionarea relativa a nodurilor este realizata conform algoritmului numit spring layout algorithm.
Exista mai multe modalitati de amplasare a nodurilor in spatiu, dar aceasta este cea mai convenabila pentru prezentarea noastra.
Extragem matricea de adiacenta a grafului:
|
A=nx.adjacency_matrix(G)# A este un obiect al unei clase speciale in networkx
#A.todense() defineste matricea de adiacenta ca un obiect al unei clase din numpy,
#dar NU clasa `numpy.array`
print A.todense()
print type(A.todense())
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Pentru a lucra doar cu numpy.array, convertim A.todense() (se pot determina valorile si vectorii proprii
ai lui A.todense(), dar e putin diferit de modul de lucru cu numpy.array):
|
A=np.array(A.todense())# interpretati aceasta linie ca un cast
print type(A)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Sa determinam coeficientul de popularitate a nodurilor acestei retele. Cum graful asociat este neorientat
matricea de adiacenta este simetrica si deci are sigur toate radacinile polinomului caracteristic, reale (Cursul 12).
|
Lamb,V=np.linalg.eig(A)
lamb=np.amax(Lamb)# radacinile fiind reale, valoarea dominata este maximumul valorilor proprii
j=np.argmax(Lamb)#pozitia in Lamb a valorii maxime
print j
x=V[:,j]
print 'Valoarea proprie dominanta este:', lamb
print 'Vectorul propriu corespunzator:\n', x.round(3)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Sa determinam vectorul rating asociat nodurilor retelei:
|
s=np.sum(x)
rating=x/s # vectorul propriu dominant, normalizat
print 'Vectorul rating al nodurilor\n', rating.round(3)
ranking=np.argsort(rating)[::-1]
print ranking
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Rezulta astfel ca nodul cu cea mai mare popularitate este nodul 4.
Coeficientul de popularitate este:
|
print rating [ranking[0]]
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Fiecarui nod dintr-o retea i se asociaza gradul, ca fiind numarul de noduri cu care este conectat
printr-un arc (drum de lungime 1).
Functia grad=nx.degree(nod) returneaza gradul unui nod, iar grad=nx.degree(G), gradele tuturor
nodurilor retelei. In acest al doilea caz, grad este un dictionar, adica o structura de date in Python
ce consta dintr-o multime ordonata de perechi cheie:valoare, inserate intre acolade:
|
dictionar={'grupa1':35, 'grupa2':40, 'grupa3': 43, 'grupa4':45}
print dictionar
print dictionar.keys()
print 'In grupa 2 sunt', dictionar['grupa2'], 'studenti'
grad=nx.degree(G)
print 'Dictionarul gradelor nodurilor:', grad
print 'Gradul nodului 4, ce are ceam mai mare popularitate este:', grad[4]
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Remarcam ca nodul 4 care are cel mai mare coeficient de popularitate are si cel mai mare
grad (este "cel mai conectat" nod din retea).
2. Constructia grafului neorientat pornind de la matricea sa de adiacenta.
Daca se da matricea de adiacenta, $A$, a unui graf atunci graful este creat de functia:
G= nx.from_numpy_matrix(A):
|
Ad=np.array([[0,1,1,1,0,0,0,1],
[1,0,1,0,1,1,1,0],
[1,1,0,0,0,0,1,1],
[1,0,0,0,1,1,1,1],
[0,1,0,1,0,1,1,0],
[0,1,0,1,1,0,1,0],
[0,1,1,1,1,1,0,1],
[1,0,1,1,0,0,1,0]], float)
Gr=nx.from_numpy_matrix(Ad)
print 'Nodurile grafului sunt:\n', Gr.nodes()
print 'Lista arcelor:\n', Gr.edges()
nx.draw(Gr, node_color='g', with_labels=True, alpha=0.5)
# alpha este parametrul de transparenta a culorii nodurilor
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Popularitatea nodurilor unei retele orientate
Constructia unei retele (graf) orientat se realizeaza la fel ca in cazul celor neorientate,
doar ca obiectul nu mai este declarat de tip Graph, ci DiGraph.
|
H=nx.DiGraph()
n=5
Noduri=[k for k in range(n)]
Arce=[(0,3), (0,4), (1,2),(1,3), (1,4), (2,3), (4,1), (4,3)]
H.add_nodes_from(Noduri)
H.add_edges_from(Arce)
nx.draw(H, node_color='r', with_labels=True, alpha=0.5)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Sa construim o retea orientata din matricea sa adiacenta si sa determinam popularitatea nodurilor:
|
plt.rcParams['figure.figsize'] = 8, 8 #setam dimensiunile figurii
W=np.array([[0,1,1,1,0,0,0,0],[0,0,1,0,1,1,1,0],[0,0,0,0,0,0,0,1],[0,0,0,0,1,1,0,0],
[0,0,0,0,0,0,1,0], [0,0,0,0,1,0,1,0],[0,1,1,1,0,0,0,1], [1,0,0,1,0,0,0,0]], float)
GW=nx.from_numpy_matrix(W, create_using=nx.DiGraph())
print 'Nodurile grafului sunt:\n', GW.nodes()
print 'Lista arcelor:\n', GW.edges()
nx.draw(GW, node_color='g', with_labels=True, alpha=0.5)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Conform teoriei din cursul 11, vectorul rating asociat unei retele orientate este vectorul propriu
al valorii proprii dominante a matricii de conectivitate, transpusa:
|
Lamb, V=np.linalg.eig(W.transpose()) # aflam radacinile polinomului caracteristic a matricii W^T
print Lamb.round(3)
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Matricea de adiacenta nefiind simetrica, polinomul sau caracteristic poate avea si radacini complex conjugate. Radacinile reale sunt afisate
si ele in forma complexa, $a=a+ 0.j$ (in Python numarul complex $i=\sqrt{-1}$ este notat $j$, ca in electronica).
Determinam acum valoarea proprie dominanta, adica radacina reala, pozitiva, care domina valorile absolute ale celorlalte:
|
absLamb=np.abs(Lamb)
j=np.argmax(absLamb)
if not np.isreal(Lamb[j]):# daca valoarea absoluta maxima nu este reala
raise ValueError("matricea A nu indeplineste conditiile T Perron-Frobenius sau alta cauza")
else:
lamD=np.real(Lamb[j])# afiseaza nr real fara 0*j
print 'valoarea proprie dominanta este:', lamD
print 'valorile absolute ale radacinilor sunt:\n', absLamb.round(3)
x=V[:,j]
s=np.sum(x)
rating=x/s
print 'Vectorul rating:\n', np.real(rating.round(3))# fortam sa afiseze coordonatele fara 0.j
ranking=np.argsort(rating)[::-1]
print 'Nodurile in ordinea descrescatoare a popularitatii lor:\n', ranking
print 'Nodul cel mai important este:', ranking[0]
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Proiect: Determinarea popularitatii jucatorilor unei echipe de fotbal la Campionatul Mondial, Brazilia 2014
Sa se determine popularitatea jucatorilor unei echipe de fotbal intr-unul din meciurile jucate la campionatul Mondial de Fotbal, Brazilia 2014.
Reteaua asociata echipei implicata intr-un joc are ca noduri jucatorii (fara rezervele ce nu au intrat in jocul respectiv).
Exista arc orientat de la jucatorul i la jucatorul j, daca in cursul meciului numarul de pase de la i la j este nenul.
Notam cu $W$ matricea ponderare:
$$W_{ij}=\mbox{numarul de pase de la i la j}$$
Evident $W_{ij}=0$, daca jucatorul i nu a avut nicio pasa spre j.
Prin urmare matricea de conectivitate nu este o matrice binara.
Datele pentru acest proiect le descarcati de la FIFA.
La adresa URL http://www.fifa.com/worldcup/statistics/matches/passes.html
dati click pe un meci, de exemplu Germania-Argentina si se deschide pagina:
http://www.fifa.com/worldcup/matches/round=255959/match=300186501/index.html#games
De pe aceasta pagina descarcam fisierul Passing Distribution.pdf
Copiati intr-un fisier PaseNumeEchipa.txt matricea paselor din tabelul cel mai din stanga. Evident, nu includeti ca nod, jucatorii de rezerva, neinclusi in meciul respectiv.
De exemplu, jucatorul nr 17, Per MERTESACKER, din echipa Germaniei se vede ca n-a jucat in meciul cu Argentina.
Apoi o cititi astfel:
W=np.loadtxt('PaseNumeEchipa.txt', dtype=float)
Generati reteaua paselor setand in prealabil o figura de dimensiuni mai mari, ca sa fie vizualizate arcele cat mai bine. Aceasta setare se realizeaza inainte de a desena reteaua prin linia:
plt.rcParams['figure.figsize'] = 10, 10
Cu aceasta setare figura va fi de 10 pe 10. Puteti creste la 12 pe 12.
Creati apoi dictionarul jucatorilor. De exemplu in meciul Germania-Argentina, pe linia $i$ a matricii paselor jucatorilor germani, figureaza jucatorul 'Prenume Nume'. Astfel dictionarul Jucatori s-ar defini pentru Germania, astfel:
|
Jucatori={ 0: 'Manuel NEUER',
1: 'Benedikt HOEWEDES',
2: 'Mats HUMMELS'}# etc
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Avand acest dictionar atunci cand am calculat vectorul ranking, printam informatia in felul urmator:
Cel mai popular jucator (cel care a primit cele mai multe pase in timpul meciului) este
jucatorul Jucatori[ranking[0]].
i=ranking[0]este codul numeric al jucatorului,
$i \in{0,1, \ldots, n-1}$, cel mai bun, iar Jucatori[i] este numele acestuia extras din dictionar.
Alegeti meciuri diferite si echipe diferite, nu analizati toti echipa Germaniei.
O analiza mai detaliata a performantelor jucatorilor o vom putea efectua in semstrul II, dupa ce studiem
Lanturi Markov la Probabilitati.
This notebook was created early in december 2014 (hence it is history). Meanwhile networkx evolved and some cells could display errors after running.
|
from IPython.core.display import HTML
def css_styling():
styles = open("./custom.css", "r").read()
return HTML(styles)
css_styling()
|
Networks.ipynb
|
empet/LinAlgCS
|
bsd-3-clause
|
Now that we have the TensorFlow code working on a subset of the data, we can package the TensorFlow code up as a Python module and train it on Cloud AI Platform.
<p>
<h2> Train on Cloud AI Platform</h2>
<p>
Training on Cloud AI Platform requires:
<ol>
<li> Making the code a Python package
<li> Using gcloud to submit the training code to Cloud AI Platform
</ol>
Ensure that the AI Platform API is enabled by going to this [link](https://console.developers.google.com/apis/library/ml.googleapis.com).
## Lab Task 1
The following code edits babyweight/trainer/task.py. You should use add hyperparameters needed by your model through the command-line using the `parser` module. Look at how `batch_size` is passed to the model in the code below. Do this for the following hyperparameters (defaults in parentheses): `train_examples` (5000), `eval_steps` (None), `pattern` (of).
|
%%writefile babyweight/trainer/task.py
import argparse
import json
import os
from . import model
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--bucket',
help = 'GCS path to data. We assume that data is in gs://BUCKET/babyweight/preproc/',
required = True
)
parser.add_argument(
'--output_dir',
help = 'GCS location to write checkpoints and export models',
required = True
)
parser.add_argument(
'--batch_size',
help = 'Number of examples to compute gradient over.',
type = int,
default = 512
)
parser.add_argument(
'--job-dir',
help = 'this model ignores this field, but it is required by gcloud',
default = 'junk'
)
parser.add_argument(
'--nnsize',
help = 'Hidden layer sizes to use for DNN feature columns -- provide space-separated layers',
nargs = '+',
type = int,
default=[128, 32, 4]
)
parser.add_argument(
'--nembeds',
help = 'Embedding size of a cross of n key real-valued parameters',
type = int,
default = 3
)
## TODOs after this line
################################################################################
## TODO 1: add the new arguments here
## parse all arguments
args = parser.parse_args()
arguments = args.__dict__
# unused args provided by service
arguments.pop('job_dir', None)
arguments.pop('job-dir', None)
## assign the arguments to the model variables
output_dir = arguments.pop('output_dir')
model.BUCKET = arguments.pop('bucket')
model.BATCH_SIZE = arguments.pop('batch_size')
model.TRAIN_STEPS = (arguments.pop('train_examples') * 100) / model.BATCH_SIZE
model.EVAL_STEPS = arguments.pop('eval_steps')
print ("Will train for {} steps using batch_size={}".format(model.TRAIN_STEPS, model.BATCH_SIZE))
model.PATTERN = arguments.pop('pattern')
model.NEMBEDS= arguments.pop('nembeds')
model.NNSIZE = arguments.pop('nnsize')
print ("Will use DNN size of {}".format(model.NNSIZE))
# Append trial_id to path if we are doing hptuning
# This code can be removed if you are not using hyperparameter tuning
output_dir = os.path.join(
output_dir,
json.loads(
os.environ.get('TF_CONFIG', '{}')
).get('task', {}).get('trial', '')
)
# Run the training job
model.train_and_evaluate(output_dir)
|
courses/machine_learning/deepdive/06_structured/labs/5_train.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Lab Task 2
Address all the TODOs in the following code in babyweight/trainer/model.py with the cell below. This code is similar to the model training code we wrote in Lab 3.
After addressing all TODOs, run the cell to write the code to the model.py file.
|
%%writefile babyweight/trainer/model.py
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
BUCKET = None # set from task.py
PATTERN = 'of' # gets all files
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
# Define some hyperparameters
TRAIN_STEPS = 10000
EVAL_STEPS = None
BATCH_SIZE = 512
NEMBEDS = 3
NNSIZE = [64, 16, 4]
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(prefix, mode, batch_size):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Use prefix to create file path
file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, prefix, PATTERN)
# Create list of files that match pattern
file_list = tf.gfile.Glob(file_path)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Define feature columns
def get_wide_deep():
# Define column types
is_male,mother_age,plurality,gestation_weeks = \
[\
tf.feature_column.categorical_column_with_vocabulary_list('is_male',
['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
tf.feature_column.categorical_column_with_vocabulary_list('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Discretize
age_buckets = tf.feature_column.bucketized_column(mother_age,
boundaries=np.arange(15,45,1).tolist())
gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks,
boundaries=np.arange(17,47,1).tolist())
# Sparse columns are wide, have a linear relationship with the output
wide = [is_male,
plurality,
age_buckets,
gestation_buckets]
# Feature cross all the wide columns and embed into a lower dimension
crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)
embed = tf.feature_column.embedding_column(crossed, NEMBEDS)
# Continuous columns are deep, have a complex relationship with the output
deep = [mother_age,
gestation_weeks,
embed]
return wide, deep
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None]),
KEY_COLUMN: tf.placeholder_with_default(tf.constant(['nokey']), [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# create metric for hyperparameter tuning
def my_rmse(labels, predictions):
pred_values = predictions['predictions']
return {'rmse': tf.metrics.root_mean_squared_error(labels, pred_values)}
def forward_features(estimator, key):
def new_model_fn(features, labels, mode, config):
spec = estimator.model_fn(features, labels, mode, config)
predictions = spec.predictions
predictions[key] = features[key]
spec = spec._replace(predictions=predictions)
return spec
return tf.estimator.Estimator(model_fn=new_model_fn, model_dir=estimator.model_dir, config=estimator.config)
## TODOs after this line
################################################################################
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
wide, deep = get_wide_deep()
EVAL_INTERVAL = 300 # seconds
## TODO 2a: set the save_checkpoints_secs to the EVAL_INTERVAL
run_config = tf.estimator.RunConfig(save_checkpoints_secs = None,
keep_checkpoint_max = 3)
## TODO 2b: change the dnn_hidden_units to NNSIZE
estimator = tf.estimator.DNNLinearCombinedRegressor(
model_dir = output_dir,
linear_feature_columns = wide,
dnn_feature_columns = deep,
dnn_hidden_units = None,
config = run_config)
# illustrates how to add an extra metric
estimator = tf.estimator.add_metrics(estimator, my_rmse)
# for batch prediction, you need a key associated with each instance
estimator = forward_features(estimator, KEY_COLUMN)
## TODO 2c: Set the third argument of read_dataset to BATCH_SIZE
## TODO 2d: and set max_steps to TRAIN_STEPS
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train', tf.estimator.ModeKeys.TRAIN, None),
max_steps = None)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn, exports_to_keep=None)
## TODO 2e: Lastly, set steps equal to EVAL_STEPS
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval', tf.estimator.ModeKeys.EVAL, 2**15), # no need to batch in eval
steps = None,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
|
courses/machine_learning/deepdive/06_structured/labs/5_train.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Lab Task 5
Once the code works in standalone mode, you can run it on Cloud AI Platform.
Change the parameters to the model (-train_examples for example may not be part of your model) appropriately.
Because this is on the entire dataset, it will take a while. The training run took about <b> 2 hours </b> for me. You can monitor the job from the GCP console in the Cloud AI Platform section.
|
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=20000
|
courses/machine_learning/deepdive/06_structured/labs/5_train.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
When I ran it, I used train_examples=20000. When training finished, I filtered in the Stackdriver log on the word "dict" and saw that the last line was:
<pre>
Saving dict for global step 5714290: average_loss = 1.06473, global_step = 5714290, loss = 34882.4, rmse = 1.03186
</pre>
The final RMSE was 1.03 pounds.
<h2> Repeat training </h2>
<p>
This time with tuned parameters (note last line)
|
%%bash
OUTDIR=gs://${BUCKET}/babyweight/trained_model_tuned
JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/babyweight/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=2.1 \
--python-version=3.7 \
-- \
--bucket=${BUCKET} \
--output_dir=${OUTDIR} \
--train_examples=2000 --batch_size=35 --nembeds=16 --nnsize=281
|
courses/machine_learning/deepdive/06_structured/labs/5_train.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Compute EBTEL Results
Run the single- and two-fluid EBTEL models for a variety of inputs. This will be the basis for the rest of our analysis.
First, import any needed modules.
|
import sys
import os
import subprocess
import pickle
import numpy as np
sys.path.append(os.path.join(os.environ['EXP_DIR'],'ebtelPlusPlus/rsp_toolkit/python'))
from xml_io import InputHandler,OutputHandler
|
notebooks/compute_ebtel_results.ipynb
|
rice-solar-physics/hot_plasma_single_nanoflares
|
bsd-2-clause
|
Setup the base dictionary for all of the runs. We'll read in the base dictionary from the ebtel++ example configuration file.
|
ih = InputHandler(os.path.join(os.environ['EXP_DIR'],'ebtelPlusPlus','config','ebtel.example.cfg.xml'))
config_dict = ih.lookup_vars()
config_dict['use_adaptive_solver'] = False
config_dict['loop_length'] = 40.0e+8
config_dict['adaptive_solver_error'] = 1e-8
config_dict['calculate_dem'] = False
config_dict['total_time'] = 5000.0
config_dict['tau'] = 0.1
config_dict['use_c1_grav_correction'] = True
config_dict['use_c1_loss_correction'] = True
config_dict['c1_cond0'] = 6.0
config_dict['c1_rad0'] = 0.6
config_dict['heating']['background'] = 3.5e-5
config_dict['output_filename'] = '../results/_tmp_'
|
notebooks/compute_ebtel_results.ipynb
|
rice-solar-physics/hot_plasma_single_nanoflares
|
bsd-2-clause
|
Next, construct a function that will make it easy to run all of the different EBTEL configurations.
|
def run_and_print(tau,h0,f,flux_opt,oh_inst):
#create heating event
oh_inst.output_dict['heating']['events'] = [
{'event':{'magnitude':h0,'rise_start':0.0,'rise_end':tau/2.0,'decay_start':tau/2.0,'decay_end':tau}}
]
#set heat flux options
oh_inst.output_dict['saturation_limit'] = f
oh_inst.output_dict['use_flux_limiting'] = flux_opt
#single-fluid
oh_inst.output_dict['force_single_fluid'] = True
oh_inst.output_dict['heating']['partition'] = 0.5
oh_inst.print_to_xml()
subprocess.call([os.path.join(os.environ['EXP_DIR'],'ebtelPlusPlus','bin','ebtel++.run'),
'-c',oh_inst.output_filename])
#save parameters to list
temp = np.loadtxt(oh_inst.output_dict['output_filename'])
t,T,n = temp[:,0],temp[:,1],temp[:,3]
#two-fluid
#--electron heating
oh_inst.output_dict['force_single_fluid'] = False
oh_inst.output_dict['heating']['partition'] = 1.0
oh_inst.print_to_xml()
subprocess.call([os.path.join(os.environ['EXP_DIR'],'ebtelPlusPlus','bin','ebtel++.run'),
'-c',oh_inst.output_filename])
temp = np.loadtxt(oh_inst.output_dict['output_filename'])
te,Tee,Tei,ne= temp[:,0],temp[:,1],temp[:,2],temp[:,3]
#--ion heating
oh_inst.output_dict['force_single_fluid'] = False
oh_inst.output_dict['heating']['partition'] = 0.0
oh_inst.print_to_xml()
subprocess.call([os.path.join(os.environ['EXP_DIR'],'ebtelPlusPlus','bin','ebtel++.run'),
'-c',oh_inst.output_filename])
temp = np.loadtxt(oh_inst.output_dict['output_filename'])
ti,Tie,Tii,ni = temp[:,0],temp[:,1],temp[:,2],temp[:,3]
#return dictionary
return {'t':t,'te':te,'ti':ti,'T':T,'Tee':Tee,'Tei':Tei,'Tie':Tie,'Tii':Tii,'n':n,'ne':ne,'ni':ni,
'heat_flux_option':flux_opt}
|
notebooks/compute_ebtel_results.ipynb
|
rice-solar-physics/hot_plasma_single_nanoflares
|
bsd-2-clause
|
Configure instances of the XML output handler for printing files.
|
oh = OutputHandler(config_dict['output_filename']+'.xml',config_dict)
|
notebooks/compute_ebtel_results.ipynb
|
rice-solar-physics/hot_plasma_single_nanoflares
|
bsd-2-clause
|
Finally, run the model for varying pulse duration.
|
tau_h = [20,40,200,500]
tau_h_results = []
for t in tau_h:
results = run_and_print(t,20.0/t,1.0,True,oh)
results['loop_length'] = config_dict['loop_length']
tau_h_results.append(results)
|
notebooks/compute_ebtel_results.ipynb
|
rice-solar-physics/hot_plasma_single_nanoflares
|
bsd-2-clause
|
And then run the models for varying flux-limiter, $f$.
|
flux_lim = [{'f':1.0,'opt':True},{'f':0.53,'opt':True},{'f':1.0/6.0,'opt':True},{'f':0.1,'opt':True},
{'f':1.0/30.0,'opt':True},{'f':1.0,'opt':False}]
flux_lim_results = []
for i in range(len(flux_lim)):
results = run_and_print(200.0,0.1,flux_lim[i]['f'],flux_lim[i]['opt'],oh)
results['loop_length'] = config_dict['loop_length']
flux_lim_results.append(results)
|
notebooks/compute_ebtel_results.ipynb
|
rice-solar-physics/hot_plasma_single_nanoflares
|
bsd-2-clause
|
Save both data structures to serialized files.
|
with open(__dest__[0],'wb') as f:
pickle.dump(tau_h_results,f)
with open(__dest__[1],'wb') as f:
pickle.dump(flux_lim_results,f)
|
notebooks/compute_ebtel_results.ipynb
|
rice-solar-physics/hot_plasma_single_nanoflares
|
bsd-2-clause
|
Run training
To help ensure this example runs quickly, we train for only 100000 steps, even though in our paper we used 40000 steps.
|
! gsutil cp gs://data-driven-discretization-public/training-data/burgers.h5 .
%%time
! python data-driven-discretization-1d/pde_superresolution/scripts/run_training.py \
--checkpoint_dir burgers-checkpoints \
--equation burgers \
--hparams resample_factor=16,learning_stops=[5000,10000] \
--input_path burgers.h5
|
notebooks/time-integration.ipynb
|
google/data-driven-discretization-1d
|
apache-2.0
|
Run evaluation
One key parameter here is the "warmup" time cutoff, which we use to ensure that we are only asking the neural network to make predictions on fully developed solutions, after all transiants are removed. We used warmup=10 for Burgers, warmup=100 for KS and warmup=50 for KdV.
|
# Use pre-computed "exact" solution from WENO.
# You could also run this yourself using scripts/create_exact_data.py
! gsutil cp gs://data-driven-discretization-public/time-evolution/exact/burgers_weno.nc .
|
notebooks/time-integration.ipynb
|
google/data-driven-discretization-1d
|
apache-2.0
|
See also ks_spectral.nc and kdv_spectral.nc in the same directory for reference simulations with KS and KdV equations.
|
import xarray
# remove extra samples, so evaluation runs faster
reference = xarray.open_dataset('burgers_weno.nc').isel(sample=slice(10)).load()
reference.to_netcdf('burgers_weno_10samples.nc')
%%time
! python data-driven-discretization-1d/pde_superresolution/scripts/run_evaluation.py \
--checkpoint_dir burgers-checkpoints \
--exact_solution_path burgers_weno_10samples.nc \
--equation_name burgers \
--stop_times "[10]" \
--num_samples 10 \
--warmup 10 \
--time_delta 0.1 \
--time_max 50 \
--logtostderr
|
notebooks/time-integration.ipynb
|
google/data-driven-discretization-1d
|
apache-2.0
|
Very simple evaluation
Evaluations have been saved to burgers-checkpoints/results.nc, but we'll download them from cloud storage instead:
|
! gsutil cp gs://data-driven-discretization-public/time-evolution/model/burgers_16x_samples.nc .
import xarray
results = xarray.open_dataset('burgers_16x_samples.nc').load()
results
reference
|
notebooks/time-integration.ipynb
|
google/data-driven-discretization-1d
|
apache-2.0
|
An example solution from our reference model, at high resolution:
|
reference.y[0].sel(time=slice(10, 60)).plot.imshow()
|
notebooks/time-integration.ipynb
|
google/data-driven-discretization-1d
|
apache-2.0
|
Coarse-grained simulation with our neural network:
|
results.y[0].plot.imshow()
|
notebooks/time-integration.ipynb
|
google/data-driven-discretization-1d
|
apache-2.0
|
Difference between the neural network results and coarse-grained reference results:
|
(results.y.sel(sample=0)
- reference.y.sel(sample=0, time=slice(10, 60)).coarsen(x=16).mean()
.assign_coords(x=results.x)).plot.imshow()
|
notebooks/time-integration.ipynb
|
google/data-driven-discretization-1d
|
apache-2.0
|
We'll just check that the pulse area is what we want.
|
print('The input pulse area is {0:.3f}.'.format(
np.trapz(mbs.Omegas_zt[0,0,:].real, mbs.tlist)/np.pi))
|
docs/examples/mbs-two-sech-6pi.ipynb
|
tommyogden/maxwellbloch
|
mit
|
Solve the Problem
|
Omegas_zt, states_zt = mbs.mbsolve(recalc=False)
|
docs/examples/mbs-two-sech-6pi.ipynb
|
tommyogden/maxwellbloch
|
mit
|
Plot Output
|
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import numpy as np
sns.set_style('darkgrid')
fig = plt.figure(1, figsize=(16, 6))
ax = fig.add_subplot(111)
cmap_range = np.linspace(0.0, 4.0, 11)
cf = ax.contourf(mbs.tlist, mbs.zlist,
np.abs(mbs.Omegas_zt[0]/(2*np.pi)),
cmap_range, cmap=plt.cm.Blues)
ax.set_title('Rabi Frequency ($\Gamma / 2\pi $)')
ax.set_xlabel('Time ($1/\Gamma$)')
ax.set_ylabel('Distance ($L$)')
for y in [0.0, 1.0]:
ax.axhline(y, c='grey', lw=1.0, ls='dotted')
plt.colorbar(cf);
fig, ax = plt.subplots(figsize=(16, 5))
ax.plot(mbs.zlist, mbs.fields_area()[0]/np.pi, clip_on=False)
ax.set_ylim([0.0, 8.0])
ax.set_xlabel('Distance ($L$)')
ax.set_ylabel('Pulse Area ($\pi$)');
|
docs/examples/mbs-two-sech-6pi.ipynb
|
tommyogden/maxwellbloch
|
mit
|
Analysis
The $6 \pi$ sech pulse breaks up into three $2 \pi$ pulses, which travel at a speed according to their width.
Movie
|
# C = 0.1 # speed of light
# Y_MIN = 0.0 # Y-axis min
# Y_MAX = 4.0 # y-axis max
# ZOOM = 2 # level of linear interpolation
# FPS = 60 # frames per second
# ATOMS_ALPHA = 0.2 # Atom indicator transparency
# FNAME = "images/mb-solve-two-sech-6pi"
# FNAME_JSON = FNAME + '.json'
# with open(FNAME_JSON, "w") as f:
# f.write(mb_solve_json)
# !make-mp4-fixed-frame.py -f $FNAME_JSON -c $C --fps $FPS --y-min $Y_MIN --y-max $Y_MAX \
# --zoom $ZOOM --atoms-alpha $ATOMS_ALPHA #--peak-line --c-line
# FNAME_MP4 = FNAME + '.mp4'
# !make-gif-ffmpeg.sh -f $FNAME_MP4 --in-fps $FPS
# from IPython.display import Image
# Image(url=FNAME_MP4 +'.gif', format='gif')
|
docs/examples/mbs-two-sech-6pi.ipynb
|
tommyogden/maxwellbloch
|
mit
|
Import the Cem class, and instantiate it. In Python, a model with a BMI will have no arguments for its constructor. Note that although the class has been instantiated, it's not yet ready to be run. We'll get to that later!
|
import pymt.models
cem = pymt.models.Cem()
|
docs/demos/cem.ipynb
|
csdms/coupling
|
mit
|
Even though we can't run our waves model yet, we can still get some information about it. Just don't try to run it. Some things we can do with our model are get the names of the input variables.
|
cem.output_var_names
cem.input_var_names
|
docs/demos/cem.ipynb
|
csdms/coupling
|
mit
|
OK. We're finally ready to run the model. Well not quite. First we initialize the model with the BMI initialize method. Normally we would pass it a string that represents the name of an input file. For this example we'll pass None, which tells Cem to use some defaults.
|
args = cem.setup(number_of_rows=100, number_of_cols=200, grid_spacing=200.)
cem.initialize(*args)
|
docs/demos/cem.ipynb
|
csdms/coupling
|
mit
|
With the grid_id, we can now get information about the grid. For instance, the number of dimension and the type of grid (structured, unstructured, etc.). This grid happens to be uniform rectilinear. If you were to look at the "grid" types for wave height and period, you would see that they aren't on grids at all but instead are scalars.
|
grid_type = cem.get_grid_type(grid_id)
grid_rank = cem.get_grid_ndim(grid_id)
print('Type of grid: %s (%dD)' % (grid_type, grid_rank))
|
docs/demos/cem.ipynb
|
csdms/coupling
|
mit
|
Because this grid is uniform rectilinear, it is described by a set of BMI methods that are only available for grids of this type. These methods include:
* get_grid_shape
* get_grid_spacing
* get_grid_origin
|
spacing = np.empty((grid_rank, ), dtype=float)
shape = cem.get_grid_shape(grid_id)
cem.get_grid_spacing(grid_id, out=spacing)
print('The grid has %d rows and %d columns' % (shape[0], shape[1]))
print('The spacing between rows is %f and between columns is %f' % (spacing[0], spacing[1]))
|
docs/demos/cem.ipynb
|
csdms/coupling
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.