markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
The one below should fail, because it's outside the prior:
|
print(lpost([-0.1, 0.1, 0.1]))
print(lpost([0.1, -0.1, 0.1]))
print(lpost([0.1, 0.1, -0.1]))
|
notebooks/SherpaResponses.ipynb
|
eblur/clarsach
|
gpl-3.0
|
Okay, cool! This works.
Now we can run MCMC!
|
import emcee
start_pars = np.array([0.313999, 1.14635, 0.0780871])
start_cov = np.diag(start_pars/100.0)
nwalkers = 100
niter = 200
ndim = len(start_pars)
burnin = 50
p0 = np.array([np.random.multivariate_normal(start_pars, start_cov) for
i in range(nwalkers)])
# initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lpost, args=[False], threads=4)
pos, prob, state = sampler.run_mcmc(p0, burnin)
_, _, _ = sampler.run_mcmc(pos, niter, rstate0=state)
plt.figure()
plt.plot(sampler.flatchain[:,0])
plt.figure()
plt.plot(sampler.flatchain[:,1])
plt.figure()
plt.plot(sampler.flatchain[:,2])
import corner
%matplotlib inline
corner.corner(sampler.flatchain,
quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_args={"fontsize": 12});
|
notebooks/SherpaResponses.ipynb
|
eblur/clarsach
|
gpl-3.0
|
Configurations
|
vocab = (" $%'()+,-./0123456789:;=?ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"\\^_abcdefghijklmnopqrstuvwxyz{|}\n")
graph_path = r"./graphs"
test_text_path = os.path.normpath(r"../Dataset/arvix_abstracts.txt")
batch_size=50
model_param_path=os.path.normpath(r"./model_checkpoints")
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Data encoding
Basic Assumption
A full string sequence consists $START$ & $STOP$ signal with characters in the middle.
Encoding policy
A set $\mathcal{S}$ that consists of many characters is utilized to encode the characters.
The $1^{st}$ entry of the vector corresponds to $UNKNOWN$ characters(l.e. characters that are beyond $\mathcal{S}$).
The last entry of the vector corresponds to $STOP$ signal of the sequence.
The entries in the middle corresponds to the indices of the characters within $\mathcal{S}$.
The $START$ signal is represented as a zero vector.
Implementation & Test
Declaration
|
class TextCodec:
def __init__(self, vocab):
self._vocab = vocab
self._dim = len(vocab) + 2
def encode(self, string, sess = None, start=True, stop=True):
"""
Encode string.
Each character is represented as a N-dimension one hot vector.
N = len(self._vocab)+ 2
Note:
The first entry of the vector corresponds to unknown character.
The last entry of the vector corresponds to STOP signal of the sequence.
The entries in the middle corresponds to the index of the character.
The START signal is represented as a zero vector.
"""
tensor = [vocab.find(ch) + 1 for ch in string]
if stop:
tensor.append(len(vocab)+1) # String + STOP
tensor = tf.one_hot(tensor,depth=len(vocab) + 2,on_value=1.0,off_value=0.0,axis=-1, dtype=tf.float32)
if start:
tensor=tf.concat([tf.zeros([1, len(vocab) + 2],dtype=tf.float32),tensor],axis=0) # String + START
if sess is None:
with tf.Session() as sess:
nparray=tensor.eval()
elif type(sess) == tf.Session:
nparray = tensor.eval(session=sess)
else:
raise TypeError('"sess" must be {}, got {}'.format(tf.Session, type(sess)))
return nparray
def decode(self, nparray, default="[UNKNOWN]",start="[START]",stop="[STOP]",strip=False):
text_list = []
indices=np.argmax(nparray, axis=1)
for v, ch_i in zip(nparray,indices):
if np.all(v==0):
text_list.append(start if not strip else "")
elif ch_i==0:
text_list.append(default)
elif ch_i==len(self._vocab)+1:
text_list.append(stop if not strip else "")
else:
text_list.append(vocab[ch_i-1])
return "".join(text_list)
@property
def dim(self):
return self._dim
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Test
See how encoding and decoding work.
|
test_codec=TextCodec(vocab)
test_text_encoded=test_codec.encode("Hello world!")
print("Encoded text looks like:\n{}".format(test_text_encoded))
test_text_decoded=test_codec.decode(nparray=test_text_encoded,strip=False)
print("Decoded text looks like:\n{}".format(test_text_decoded))
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Load data set
|
with open(test_text_path, "r") as f:
raw_text_list = "".join(f.readlines()).split("\n")
print("Loaded abstract from a total of {} theses.".format(len(raw_text_list)))
# See what we have loaded
sample_text_no = random.randint(0, len(raw_text_list)-1)
sample_text_raw = raw_text_list[sample_text_no]
print("A sample text in the data set:\n{}".format(sample_text_raw))
sample_text_encoded=test_codec.encode(sample_text_raw)
print("Encoded text:\n{}".format(sample_text_encoded))
print("Decoded text:\n{}".format(test_codec.decode(sample_text_encoded)))
encoded_data = test_codec.encode("\n".join(raw_text_list), start=False, stop=False)
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Define Batch Generator
|
def batch_generator(data, codec, batch_size, seq_length, reset_every):
if type(data) == str:
data=codec.encode(data, start=False, stop=False)
head = 0
reset_index = 0
batch = []
seq = []
increment = seq_length * reset_every - 1
extras = codec.encode("", start=True, stop=True)
v_start, v_stop = extras[0: 1, :], extras[1: 2, :]
while head < np.shape(data)[0] or len(batch) == batch_size:
if len(batch) == batch_size:
batch = np.array(batch)
for offset in range(reset_every):
yield (batch[:, offset * seq_length: (offset + 1) * seq_length, :],
batch[:, offset * seq_length + 1: (offset + 1) * seq_length + 1, :])
batch = []
else:
seq = np.concatenate([v_start, data[head: head + increment, :], v_stop], axis=0)
if np.shape(seq)[0] == (increment + 2):
batch.append(seq)
head += increment
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Check the generator
|
seq_length = 100
reset_every = 2
batch_size = 2
batches = batch_generator(data=encoded_data,
codec=test_codec,
batch_size=batch_size,
seq_length=seq_length,
reset_every=reset_every)
for (x, y), i in zip(batches, range(reset_every * 2)):
print("Batch {}".format(i))
if (i % reset_every) == 0:
print("Reset")
for j in range(batch_size):
decoded_x, decoded_y = test_codec.decode(x[j], strip=False), test_codec.decode(y[j], strip=False)
print("Index of sub-sequence:\n{}\nSequence input:\n{}:\nSequence output:\n{}".format(j,
decoded_x,
decoded_y))
del seq_length, reset_every, batch_size, batches
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Define model class
|
class DRNN(tf.nn.rnn_cell.RNNCell):
def __init__(self, input_dim, hidden_dim, output_dim, num_hidden_layer, dtype=tf.float32):
super(tf.nn.rnn_cell.RNNCell, self).__init__(dtype=dtype)
assert type(input_dim) == int and input_dim > 0, "Invalid input dimension. "
self._input_dim = input_dim
assert type(num_hidden_layer) == int and num_hidden_layer > 0, "Invalid number of hidden layer. "
self._num_hidden_layer = num_hidden_layer
assert type(hidden_dim) == int and hidden_dim > 0, "Invalid dimension of hidden states. "
self._hidden_dim = hidden_dim
assert type(output_dim) == int and output_dim > 0, "Invalid dimension of output dimension. "
self._output_dim = output_dim
self._state_is_tuple = True
with tf.variable_scope("input_layer"):
self._W_xh = tf.get_variable("W_xh", shape=[self._input_dim, self._hidden_dim])
self._b_xh = tf.get_variable("b_xh", shape=[self._hidden_dim])
with tf.variable_scope("rnn_layers"):
self._cells = [tf.nn.rnn_cell.GRUCell(self._hidden_dim) for _ in range(num_hidden_layer)]
with tf.variable_scope("output_layer"):
self._W_ho_list = [tf.get_variable("W_h{}o".format(i), shape=[self._hidden_dim, self._output_dim])
for i in range(num_hidden_layer)]
self._b_ho = tf.get_variable("b_ho", shape=[self._output_dim])
@property
def output_size(self):
return self._output_dim
@property
def state_size(self):
return (self._hidden_dim,) * self._num_hidden_layer
def zero_state(self, batch_size, dtype):
if self._state_is_tuple:
return tuple(cell.zero_state(batch_size, dtype)for cell in self._cells)
else:
raise NotImplementedError("Not implemented yet.")
def __call__(self, _input, state, scope=None):
assert type(state) == tuple and len(state) == self._num_hidden_layer, "state must be a tuple of size {}".format(
self._num_hidden_layer)
hidden_layer_input = tf.matmul(_input, self._W_xh) + self._b_xh
prev_output = hidden_layer_input
final_state = []
output = None
for hidden_layer_index, hidden_cell in enumerate(self._cells):
with tf.variable_scope("cell_{}".format(hidden_layer_index)):
new_output, new_state = hidden_cell(prev_output, state[hidden_layer_index])
prev_output = new_output + hidden_layer_input # Should be included in variable scope of this layer or?
final_state.append(new_state)
_W_ho = self._W_ho_list[hidden_layer_index]
if output is None:
output = tf.matmul(new_output, _W_ho)
else:
output = output + tf.matmul(new_output, _W_ho)
output = tf.tanh(output + self._b_ho)
# output = tf.nn.relu(output)
final_state = tuple(final_state)
return output, final_state
def inspect_weights(self, sess):
val = self._W_xh.eval(sess)
print("W_xh:\n{}\nF-norm:\n{}".format(val, norm(val)))
val = self._b_xh.eval(sess)
print("b_xh:\n{}\nF-norm:\n{}".format(val, norm(val)))
for hidden_layer_index in range(self._num_hidden_layer):
val = self._W_ho_list[hidden_layer_index].eval(sess)
print("W_h{}o:\n{}\nF-norm:\n{}".format(hidden_layer_index, val, norm(val)))
val = self._b_ho.eval(sess)
print("b_ho:\n{}\nF-norm:\n{}".format(val, norm(val)))
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Make an instance of the model and define the rest of the graph
Thoughts
If GRU is used, then the outputs of GRU shall not be directly used as desired output without further transforms. (e.g. A cell accpet 2 inputs, a state from the previous cell and the input of this cell(which is approximated by the state input), then the RNN cell can be treated as a normal feed forward network.
The proposal above is to be tested again due to the previous bug in training (Failed to feed the initial state given by the RNN output from last sequnce)
|
tf.reset_default_graph()
input_dim = output_dim = test_codec.dim
hidden_dim = 700
num_hidden_layer = 3
rnn_cell = DRNN(input_dim=input_dim, output_dim=output_dim, num_hidden_layer=num_hidden_layer, hidden_dim=hidden_dim)
batch_size = 50
init_state = tuple(tf.placeholder_with_default(input=tensor,
shape=[None, hidden_dim]) for tensor in rnn_cell.zero_state(
batch_size=batch_size, dtype=tf.float32))
seq_input = tf.placeholder(name="batch_input", shape=[None, None, input_dim], dtype=tf.float32)
target_seq_output = tf.placeholder(name="target_batch_output", shape=[None, None, output_dim], dtype=tf.float32)
seq_output, final_states = tf.nn.dynamic_rnn(cell=rnn_cell,inputs=seq_input,
initial_state=init_state, dtype=tf.float32)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=target_seq_output, logits=seq_output))
summary_op = tf.summary.scalar(tensor=loss, name="loss")
global_step = tf.get_variable(name="global_step", initializer=0, trainable=False)
lr = tf.get_variable(name="learning_rate", initializer=1.0, trainable=False)
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Training
|
n_epoch=50
learning_rate=1e-3
train_op=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss, global_step=global_step)
print_every = 50
save_every = 1000
partition_size = 100
logdir = os.path.normpath("./graphs")
seq_length = 100
reset_every = 100
visualize_every = 100
learning_rate_decay = 0.9
# batch_size has been specified when configuring the the tensors for initial states
keep_checkpoint_every_n_hours = 0.5
model_checkpoint_dir = os.path.normpath("./model_checkpoints")
model_checkpoint_path = os.path.join(model_checkpoint_dir, "DRNN")
saver = tf.train.Saver(keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours)
batches = list(batch_generator(data=encoded_data,
codec=test_codec,
batch_size=batch_size,
seq_length=seq_length,
reset_every=reset_every))
with tf.Session() as sess, tf.summary.FileWriter(logdir=logdir) as writer:
sess.run(tf.global_variables_initializer())
feed_dict = dict()
states = None
sess.run(tf.assign(lr, learning_rate))
zero_states = sess.run(rnn_cell.zero_state(batch_size=1, dtype=tf.float32))
for epoch in range(n_epoch):
assert lr.eval(sess) > 0, "learning_rate must be positive."
for i, (x, y) in enumerate(batches):
feed_dict = {seq_input: x, target_seq_output: y}
if (i % reset_every) != 0 and states is not None:
for j in range(len(init_state)):
feed_dict[init_state[j]] = states[j]
_, summary, states, step = sess.run(fetches=[train_op, summary_op, final_states, global_step],
feed_dict=feed_dict)
writer.add_summary(summary=summary, global_step=step)
if ((step + 1) % save_every) == 0:
saver.save(sess=sess, save_path=model_checkpoint_path, global_step=step)
if (step % visualize_every) == 0:
feed_dict = {seq_input: x[:1, : , :]}
for key, value in zip(init_state, zero_states):
feed_dict[key] = value
sample_output = sess.run(seq_output, feed_dict=feed_dict)
print(test_codec.decode(sample_output[0], strip=False))
sess.run(tf.assign(lr, lr.eval(sess) * learning_rate_decay))
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Test online inference
|
def online_inference(cell, prime, sess, codec,
input_tensor,
init_state_tensor_tuple,
output_tensor,
final_state_tensor_tuple,
length):
final_output = [prime]
zero_states = sess.run(cell.zero_state(batch_size=1, dtype=tf.float32))
feed_dict = {input_tensor: codec.encode(prime, start=True, stop=False)[np.newaxis, :, :]} # prime
for init_state_tensor, init_state_value in zip(init_state_tensor_tuple,
zero_states):
feed_dict[init_state_tensor] = init_state_value
output, final_states = sess.run([output_tensor, final_state_tensor_tuple], feed_dict=feed_dict)
final_output.append(codec.decode(output[0, -1:, :], strip=False))
for _ in range(length - len(prime)):
feed_dict = {input_tensor: codec.encode(final_output[-1], start=False, stop=False)[np.newaxis, :, :]}
for init_state_tensor, init_state_value in zip(init_state_tensor_tuple, final_states):
feed_dict[init_state_tensor] = init_state_value
output, final_states = sess.run([output_tensor, final_state_tensor_tuple], feed_dict=feed_dict)
final_output.append(codec.decode(output[0], strip=False))
return "".join(final_output)
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = saver.last_checkpoints
print(ckpt)
print(online_inference(rnn_cell, "We propose",
sess, test_codec, seq_input, init_state, seq_output, final_states, 200))
|
RNN101/Text Generator.ipynb
|
BorisPolonsky/LearningTensorFlow
|
mit
|
Let's evaluate how much the membrane potential depends on Input resistance and
membrane time constant and the sag ratio. We will create the following multivariate function:
$f(k;x) = k_0 + k_1x_1 + k_2x_2 + k_3x_3$
where $k$ is a vector or parameters (contants) and $x$ is a vector of independent variables (i.e $x_1$ is the input resistance $x_2$ is membrane time constant and $x_3$ the sag ratio)
|
x = df[['InputR', 'Sag','Tau_mb']]
y = df[['Vrest']]
# import standard regression models (sm)
import statsmodels.api as sm
K = sm.add_constant(x) # k0, k1, k2 and k3...
# get estimation
est = sm.OLS(y, K).fit() # ordinary least square regression
est.summary() # need more data for kurtosis :)
|
Optimization/Multivariate regression.ipynb
|
JoseGuzman/myIPythonNotebooks
|
gpl-2.0
|
Generate Mock Observed Luminosity
|
import numpy as np
# original values
# sigma_L = 1
# a1 = 12
# a2 = 1.4
# a3 = fov['mass_h'].min()
# a4 = 10
# sigma_obs = 2
S = 0.155
a1 = 10.709
a2 = 0.359
a3 = 2.35e14
a4 = 1.10
sigma_obs = 0.01
mean_L = a1 + a2*np.log(fov['mass_h'] / a3) + a4 * np.log(1 + fov['z'])
fov['lum'] = np.random.lognormal(mean_L, S, len(fov))
fov['lum_obs'] = np.random.lognormal(np.log(fov['lum']), sigma_obs, len(fov))
%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(fov[:1000]['mass_h'], fov[:1000]['lum_obs'], alpha=0.2)
plt.gca().set_xscale("log", nonposx='clip')
plt.gca().set_yscale("log", nonposx='clip')
plt.title('Mass Luminosity Distribution')
plt.xlabel('Mass[$M_{\odot}$]')
plt.ylabel('Luminosity[$L_{\odot}h^{-2}$]')
|
MassLuminosityProject/GenerateMockDataAndImportanceSample_2017_01_22.ipynb
|
davidthomas5412/PanglossNotebooks
|
mit
|
Generate Q
|
fov['q_lum'] = np.random.lognormal(np.log(fov['lum_obs']), sigma_obs, len(fov))
fov['q_mass_h_mean'] = a3 * (fov['q_lum'] / (np.exp(a1) * (1+fov['z']) ** a4 )) ** (1 / a2)
fov['q_mass_h'] = np.random.lognormal(np.log(fov['q_mass_h_mean']), 5*S, len(fov))
plt.scatter(fov[:500]['mass_h'], fov[:500]['lum_obs'], alpha=0.4, color='blue', label='P')
plt.scatter(fov[:500]['q_mass_h'], fov[:500]['q_lum'], alpha=0.4, color='gold', label='Q')
plt.gca().set_xscale("log", nonposx='clip')
plt.gca().set_yscale("log", nonposx='clip')
plt.title('Mass Luminosity Distribution')
plt.xlabel('Mass[$M_{\odot}$]')
plt.ylabel('Luminosity[$L_{\odot}h^{-2}$]')
plt.legend()
|
MassLuminosityProject/GenerateMockDataAndImportanceSample_2017_01_22.ipynb
|
davidthomas5412/PanglossNotebooks
|
mit
|
Multiple Conditions
So now we can deal with a scenario where there are two possible decisions to be made. What about more than two decision?
Say hello to "elif"!
|
collection = [1,2,3,4,5]
if collection[0] == 0:
print ("Zero!")
elif collection[0] == 100:
print ("Hundred!")
else:
print("Not Zero or Hundred")
x = ["George", "Barack", "Donald"]
test = "Richard"
if test in x:
print(test, "has been found.")
else:
print(test, "was not found. Let me add him to the list." )
x.append(test)
print(x)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Exercise
Write some code to check if you are old enough to buy a bottle of wine. You need to be 18 or over, but if your State is Texas, you need to be 25 or over.
|
# Your code here
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Adding your own input
How about adding your own input and checking against that? This doesn't come in too handy in a data science environment since you typically have a well defined dataset already. Nevertheless, this is important to know.
|
age = int(input("Please enter your age:"))
if age < 18:
print("You cannot vote or buy alcohol.")
elif age < 21:
print("You can vote, but can't buy alcohol.")
else:
print("You can vote to buy alcohol. ;) ")
mr_prez = ["Bill", "George", "Barack", "Donald"]
name = input("Enter your name:") # Don't need to specify str
type(name)
if name in mr_prez:
print("You share your name with a President.")
else:
print("You too can be president some day.")
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Loops
Time to supercharge our Python usage. Loops are in some ways, the basis for automation. Check if a condition is true, then execute a step, and keep executing it till the condition is no longer true.
|
numbers = [1,2,3,4,5,6,7,8,9,10]
for number in numbers:
if number % 2 == 0:
print("Divisible by 2.")
else:
print("Not divisible by 2.")
numbers = {1,2,3,4,5,6,7,8,9,10}
for num in numbers:
if num%3 == 0:
print("Divisible by 3.")
else:
print("Not divisible by 3.")
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
When using dictionaries, you can iterate through keys, values or both.
|
groceries = {"Milk":2.5, "Tea": 4, "Biscuits": 3.5, "Sugar":1}
print(groceries.keys())
print(groceries.values())
# item here refers to the the key in set name groceries
for a in groceries.keys():
print(a)
for price in groceries.values():
print(price)
for (key, val) in groceries.items():
print(key,val)
groceries.items()
groceries.keys()
groceries.values()
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Exercise
Print the names of the people in the dictionary 'data'
Print the name of the people who have 'incubees'
Print the name, and net worth of people with a net worth higher than 500,000
Print the names of people without a board seat
Enter your responses in the fields below. This is solved for you if you scroll down, but you can't cheat yourself!
|
data = {
"Richard": {
"Title": "CEO",
"Employees": ["Dinesh", "Gilfoyle", "Jared"],
"Awards": ["Techcrunch Disrupt"],
"Previous Firm": "Hooli",
"Board Seat":1,
"Net Worth": 100000
},
"Jared": {
"Real_Name": "Donald",
"Title": "CFO",
"Previous Firm": "Hooli",
"Board Seat":1,
"Net Worth": 500
},
"Erlich": {
"Title": "Visionary",
"Previous Firm": "Aviato",
"Current Firm": "Bachmannity",
"Incubees": ["Richard", "Dinesh", "Gilfoyle", "Nelson", "Jian Yang"],
"Board Seat": 1,
"Net Worth": 5000000
},
"Nelson": {
"Title": "Co-Founder",
"Current Firm": "Bachmannity",
"Previous Firm": "Hooli",
"Board Seat": 0,
"Net Worth": 10000000
},
}
# Name of people in the dictionary
data.keys()
# Alternate way to get the name of the people in the dictionary
for name in data.keys():
print(name)
# Name of people who have incubees
for name in data.items():
if "Incubees" in name[1]:
print (name[0])
# Name and networth of people with a networth greater 500000
for name in data.items():
if "Net Worth" in name[1] and name[1]["Net Worth"]>500000:
print (name[0], name[1]["Net Worth"])
# Name of people who don't have a board seat
for name in data.items():
if "Board Seat" in name[1] and name[1]["Board Seat"] == 0:
print (name[0])
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Range of Values
We often need to define a range of values for our program to iterate over.
|
# Generate a list on the fly
nums = list(range(10))
print(nums)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
In a defined range, the lower number is inclusive, and upper number is exclusive. So 0 to 10 would include 0 but exclude 10. So if we need a specific range, we can use this knowledge to our advantage.
|
nums = list(range(1,11))
print(nums)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
We can also specify a range without explicitly defining an upper or lower range, in which case, Python does it's magic: range will be 0 to one less than the number specified.
|
nums = list(range(10))
print(nums)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
We can also use the range function to perform mathematical tricks.
|
for i in range(1,6):
print("The square of",i,"is:",i**2)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Or to check for certain other conditions or properties, or to define how many times an activity will be performed.
|
for i in range(1,10):
print("*"*i)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Exercise
Print all numbers from 1 to 20
|
# Your Code Here
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Exercise
Print the square of the first 10 natural numbers.
|
# Your Code Here
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Become a Control Freak
And now, it's time to become a master of control! A data scientist needs absolute control over loops, stopping when defined conditions are met, or carrying on till a solution if found.
<img src="images/break.jpg">
Break
|
for i in range(1,100):
print("The square of",i,"is:",i**2)
if i >= 5:
break
print("Broken")
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Continue
Break's cousin is called Continue.
If a certain condition is met, carry on.
|
letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
for letter in letters:
print("Currently testing letter", letter)
if letter == "e":
print("I plead the 5th!")
continue
print( letter)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
List Comprehension
Remember lists? Now here's a way to power through a large list in one line!
As a Data Scientist, you will need to write a lot of code very efficiently, especially in the data exploration stage. The more experiments you can run to understand your data, the better it is. This is also a very useful tool in transforming one list (or dictionary) into another list.
Let's begin by some simple examples
First, we will write a program to generate the squares of the first 10 natural numbers, using a standard for loop.
Next, we will contrast that with the List Comprehension approach.
|
# Here is a standard for loop
numList = []
for num in range(1,11):
numList.append(num**2)
print (numList)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
So far, so good!
|
# Now for List Comprehension
sqList = [num**2 for num in range(1,11)]
print(sqList)
[num**2 for num in range(1,11)]
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
How's that for speed?!
Here's the format for List Comprehensions, in English.
ListName = [Expected_Result_or_Operation for Item in a given range]<br>
print the ListName
|
cubeList = [num**3 for num in range(6)]
print(cubeList)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
List comprehensions are very useful when dealing with an existing list. Let's see some examples.
|
nums = [1,2,3,4,5,6,7,8,9,10]
# For every n in the list named nums, I want an n
my_list1 = [n for n in nums]
print(my_list1)
# For every n in the list named nums, I want n to be squared
my_list2 = [n**2 for n in nums]
print(my_list2)
# For every n in the list named nums, I want n, only if it is even
my_list3 = [n for n in nums if n%2 == 0]
print(my_list3)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
How about calculating the areas of circles, given a list of radii? That too in just one line.
|
radius = [1.0, 2.0, 3.0, 4.0, 5.0]
import math
# Area of Circle = Pi * (radius**2)
area = [round((r**2)*math.pi,2) for r in radius]
print(area)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Dictionary Comprehension
Let's get back to our dictionary named Data. Dictionary Comprehension can be a very efficient way to extract information out of them. Especially when you have thousands or millions of records.
|
data = {
"Richard": {
"Title": "CEO",
"Employees": ["Dinesh", "Gilfoyle", "Jared"],
"Awards": ["Techcrunch Disrupt"],
"Previous Firm": "Hooli",
"Board Seat":1,
"Net Worth": 100000
},
"Jared": {
"Real_Name": "Donald",
"Title": "CFO",
"Previous Firm": "Hooli",
"Board Seat":1,
"Net Worth": 500
},
"Erlich": {
"Title": "Visionary",
"Previous Firm": "Aviato",
"Current Firm": "Bachmannity",
"Incubees": ["Richard", "Dinesh", "Gilfoyle", "Nelson", "Jian Yang"],
"Board Seat": 1,
"Net Worth": 5000000
},
"Nelson": {
"Title": "Co-Founder",
"Current Firm": "Bachmannity",
"Previous Firm": "Hooli",
"Board Seat": 0,
"Net Worth": 10000000
},
}
# Print all details for people who have incubees
[(k,v) for k, v in data.items() if "Incubees" in v ]
for name in data.items():
if "Net Worth" in name[1] and name[1]["Net Worth"]>500000:
print (name[0], name[1]["Net Worth"])
high_nw = [(name[0], name[1]["Net Worth"]) for name in data.items() if "Net Worth" in name[1] and name[1]["Net Worth"]>500000]
print(high_nw)
type(high_nw)
type(high_nw[0])
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
We can also use dictionary comprehension to create new dictionaries
|
name = ['George HW', 'Bill', 'George', 'Barack', 'Donald', 'Bugs']
surname = ['Bush', 'Clinton', 'Bush Jr', 'Obama', 'Trump', 'Bunny']
full_names = {n:s for n,s in zip(name,surname)}
full_names
# What if we want to exclude certain values?
full_names = {n:s for n,s in zip(name, surname) if n!='Bugs'}
print(full_names)
|
07.Loop_it_up.ipynb
|
prasants/pyds
|
mit
|
Problem Statement
There are influenza viruses that are collected from the "environment", or have an "unknown" host. How do we infer which hosts it came from? Well, that sounds like a Classification problem.
|
# Load the sequences into memory
sequences = [s for s in SeqIO.parse('data/20160127_HA_prediction.fasta', 'fasta') if len(s.seq) == 566] # we are cheating and not bothering with an alignment.
len(sequences)
# Load the sequence IDs into memory
seqids = [s.id for s in SeqIO.parse('data/20160127_HA_prediction.fasta', 'fasta') if len(s.seq) == 566]
len(seqids)
# Cast the sequences as a MultipleSeqAlignment object, and then turn that into a pandas DataFrame.
# Note: this cell takes a while.
seq_aln = MultipleSeqAlignment(sequences)
seq_df = pd.DataFrame(np.array(seq_aln))
seq_df.head()
# Transform the df into isoelectric point features.
seq_feats = seq_df.replace(isoelectric_points.keys(), isoelectric_points.values())
seq_feats.index = seqids
seq_feats.head()
# Quick check to make sure that we have no strings:
for c in seq_feats.columns:
letters = set(seq_feats[c])
for item in letters:
assert not isinstance(item, str)
# Let us now load our labels.
labels = pd.read_csv('data/20160127_HA_prediction.csv', parse_dates=['Collection Date'])
labels['Host Species'] = labels['Host Species'].str.replace('IRD:', '').str.replace('/Avian', '')
labels['Sequence Accession'] = labels['Sequence Accession'].str.replace('*', '')
labels.set_index('Sequence Accession', inplace=True)
labels.head()
# Let's join in the labels so that we have everything in one big massive table.
data_matrix = seq_feats.join(labels['Host Species'], how='inner')
data_matrix.head()
# Quickly inspect the different labels under "host species"
# set(data_matrix['Host Species'])
# We will want to predict the labels for: "Avian", "Bird", "Environment", "Unknown", "null"
unknown_labels = ['Avian', 'Bird', 'Environment', 'Unknown', 'null']
known_labels = set(data_matrix['Host Species']) - set(unknown_labels)
# Let's further split the data into the "unknowns" and the "knowns"
unknowns = data_matrix[data_matrix['Host Species'].isin(unknown_labels)]
knowns = data_matrix[data_matrix['Host Species'].isin(known_labels)]
# Finally, we want to convert the known host species into a matrix of 1s and 0s, so that we can use them as inputs
# to the training algorithm.
lb = LabelBinarizer()
lb.fit([s for s in known_labels])
lb.transform(knowns['Host Species']) # note: this has not done anything to the original data.
|
03 Classification.ipynb
|
ericmjl/scikit-learn-tutorial
|
mit
|
Train/Test Split
We're almost ready for training a machine learning model to classify the unknown hosts based on their sequence.
Here's the proper procedure.
Split the labelled data into a training and testing set. (~70 train/30 test to 80 train/20 test)
Train and evaluate a model on the training set.
Make predictions on the testing set, evaluate the model on testing set accuracy.
This procedure is known as cross-validation, and is a powerful, yet cheap & easy method for evaluating how good a particular supervised learning model works.
|
# Split the data into a training and testing set.
X_cols = [i for i in range(0,566)]
X = knowns[X_cols]
Y = lb.transform(knowns['Host Species'])
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
# Train a Random Forest Classifier.
# Note: This cell takes a while; any questions?
# Initialize the classifier object.
clf = RandomForestClassifier()
# Train (i.e. "fit") the classifier to the training Xs and Ys
clf.fit(X_train, Y_train)
# Make predictions on the test X
preds = clf.predict(X_test)
preds
lb.inverse_transform(preds)
|
03 Classification.ipynb
|
ericmjl/scikit-learn-tutorial
|
mit
|
How do we evaluate how good the classification task performed?
For binary classification, the Receiver-Operator Characteristic curve is a great way to evaluate a classification task.
For multi-label classification, which is the case we have here, accuracy score is a great starting place.
|
# Let's first take a look at the accuracy score: the fraction that were classified correctly.
accuracy_score(lb.inverse_transform(Y_test), lb.inverse_transform(preds))
|
03 Classification.ipynb
|
ericmjl/scikit-learn-tutorial
|
mit
|
What about those sequences for which the hosts were unknown?
We can run the predict(unknown_Xs) to predict what their hosts were likely to be, given their sequence.
|
unknown_preds = clf.predict(unknowns[X_cols]) # make predictions; note: these are still dummy-encoded.
unknown_preds = lb.inverse_transform(unknown_preds) # convert dummy-encodings back to string labels.
unknown_preds
|
03 Classification.ipynb
|
ericmjl/scikit-learn-tutorial
|
mit
|
What this gives us is the class label with the highest probability of being the correct one.
While we will not do this here, at this point, it would be a good idea to double-check your work with a sanity check. Are the sequences that are predicted to be Human truly of a close sequence similarity to actual Human sequences? You may want to do a Multiple Sequence Alignment, or you might want to simply compute the Levenshtein or Hamming distance between the two sequences, as a sanity check.
How do we interpret what the classifier learned?
Depending on the classifier used, you can peer inside the model to get a feel for what the classifier learned about the features that best predict the class label.
The RandomForestClassifier provides a feature_importances_ attribute that we can access and plot.
|
plt.plot(clf.feature_importances_)
|
03 Classification.ipynb
|
ericmjl/scikit-learn-tutorial
|
mit
|
シグモイド関数
ロジット関数の逆関数。
$$ \phi(z) = \frac{1}{1+e^(-z)} $$
ステップ関数とは異なり緩やかに上昇していくため、例えば結果が降水確率が0.8なら80%であるということができる。
|
import matplotlib.pyplot as plt
import numpy as np
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7, 7, 0.1)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color='k')
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
# y axis ticks and gridline
plt.yticks([0.0, 0.5, 1.0])
ax = plt.gca()
ax.yaxis.grid(True)
plt.tight_layout()
# plt.savefig('./figures/sigmoid.png', dpi=300)
plt.show()
|
python-machine-learning/ch03/logistic-regression.ipynb
|
hide-tono/python-training
|
apache-2.0
|
ロジスティック回帰の重みの学習
尤度L:結果から見たところの条件のもっともらしさ
$$ L(w) = P(y|x;w) = \prod_{i=1}^nP(y^{(i)}|x^{(i)};w) = \prod_{i=1}^n(\phi(z^{(i)}))^{(y^{(i)})}(1-\phi(z^{(i)}))^{1-y^{(i)}} $$
\( P(y|x;w) \)の;wはwをパラメータに持つという意味。
対数尤度l:
* アンダーフローの可能性低下
* 積が和に変換されるため加算を用いて微分できるようになる
$$ l(w) = \log L(w) = \sum_{i=1}^n\bigl[(y^{(i)}\log(\phi(z^{(i)})))+({1-y^{(i)})\log(1-\phi(z^{(i)}))}\bigr] $$
上記関数は勾配上昇するので、コスト関数Jとしてはマイナスにする
$$ J(w) = \sum_{i=1}^n\bigl[(-y^{(i)}\log(\phi(z^{(i)})))-({1-y^{(i)})\log(1-\phi(z^{(i)}))}\bigr] $$
1つのサンプルで計算されるコストは、上式から\( \sum \)と\( (i) \)を取って、
$$ J(\phi(z),y;w) = -y\log(\phi(z))-(1-y)\log(1-\phi(z)) $$
上式から、y=0であれば1つ目の項が0になりy=1であれば2つ目の項が0になる。
$$ J(\phi(z),y;w) = \begin{cases}
-\log(\phi(z)) & \text (y=1)\
-\log(1-\phi(z)) & \text (y=0)\end{cases}$$
|
from sklearn import datasets
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
# Irisデータセットをロード
iris = datasets.load_iris()
# 3,4列目の特徴量を抽出
X = iris.data[:, [2, 3]]
# クラスラベルを取得
y = iris.target
# print('Class labels:', np.unique(y))
# テストデータの分離
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# 特徴量のスケーリング
sc = StandardScaler()
# トレーニングデータの平均と標準偏差を計算
sc.fit(X_train)
# 平均と標準偏差を用いて標準化
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import warnings
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.6,
c=cmap(idx),
edgecolor='black',
marker=markers[idx],
label=cl)
# highlight test samples
if test_idx:
# plot all samples
if not versiontuple(np.__version__) >= versiontuple('1.9.0'):
X_test, y_test = X[list(test_idx), :], y[list(test_idx)]
warnings.warn('Please update to NumPy 1.9.0 or newer')
else:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
alpha=1.0,
edgecolor='black',
linewidths=1,
marker='o',
s=55, label='test set')
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
#print(X_combined_std)
# Pythonでの実装
from sklearn.linear_model import LogisticRegression
# ロジスティック回帰のインスタンスを生成
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
# 決定境界をプロット
plot_decision_regions(X_combined_std, y_combined, classifier=lr, test_idx=range(105, 150))
# ラベル設定
plt.xlabel('petal width(標準化済み)')
# 凡例を設定
plt.legend(loc='upper left')
plt.show()
|
python-machine-learning/ch03/logistic-regression.ipynb
|
hide-tono/python-training
|
apache-2.0
|
過学習が発生…高バリアンス
学習不足…高バイアス
共線性:特徴量の間の相関の高さ
正則化:共線性を根拠に過学習を防ぐ。極端なパラメータの重みにペナルティを科す。
L2正則化
$$ \frac{\lambda}{2}||w||^2 = \frac{\lambda}{2}\sum_{j=1}^m w^2_j $$
\( \lambda \)は正則化パラメータという。
ロジスティック回帰のコスト関数に重みをつける
$$ J(w) = \sum_{i=1}^n\bigl[(-y^{(i)}\log(\phi(z^{(i)})))-({1-y^{(i)})\log(1-\phi(z^{(i)}))}\bigr] + \frac{\lambda}{2}||w||^2 $$
正則化パラメータ\( \lambda \)の逆数をCとする
$$ C = \frac{1}{\lambda} $$
$$ J(w) = C\sum_{i=1}^n\bigl[(-y^{(i)}\log(\phi(z^{(i)})))-({1-y^{(i)})\log(1-\phi(z^{(i)}))}\bigr] + \frac{1}{2}||w||^2 $$
|
weights, params = [], []
# numpy.arange(-5, 5)はだめ。https://github.com/numpy/numpy/issues/8917
for c in range(-5, 5):
lr = LogisticRegression(C=10**c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10**c)
weights = np.array(weights)
plt.plot(params, weights[:, 0], label='petal length')
plt.plot(params, weights[:, 1], linestyle='--', label='petal width')
plt.ylabel('weight coefficient')
plt.xlabel('C')
plt.legend(loc='upper left')
plt.xscale('log')
plt.show()
|
python-machine-learning/ch03/logistic-regression.ipynb
|
hide-tono/python-training
|
apache-2.0
|
We use the sklearn.datasets.load_digits method to load the MNIST data.
|
from sklearn.datasets import load_digits
digits_data = load_digits()
from IPython.display import display
display(dir(digits_data))
display(digits_data.data.shape)
display(digits_data.target.shape)
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
This dataset contains data for 1797 images. Each image is an 8*8 matrix stored as a flat-packed array.
Next we combine the data and target into a single dataframe.
|
mnist_df = pd.DataFrame(index=digits_data.target, data=digits_data.data)
mnist_df.head()
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
Next we find out How many images we have per label.
|
image_counts = mnist_df.groupby(mnist_df.index)[0].count()
ax = image_counts.plot(kind='bar', title='Image count per label in data')
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
Next we scale mnist_df so that every feature has zero mean and unit variance.
Pairwise Distances, P and $\sigma_i$s
|
from sklearn.preprocessing import scale
mnist_df_scaled = pd.DataFrame(index=mnist_df.index,
columns=mnist_df.columns,
data=scale(mnist_df))
mnist_df_scaled.head()
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
From the scaled data, we must calculate the $P_{ij}$s. To do this we first calculate the pairwise distances between each pair of rows in the input data. For efficiency's sake, we use the sklearn.metrics.pairwise_distances library function.
Next, we start with a given purplexity target and then calculate the individual sigmas based on that.
|
MACHINE_PRECISION = np.finfo(float).eps
from sklearn.metrics import pairwise_distances
def optimal_sigma(dist_i, i, target_entropy, n_iter=100, entropy_diff=1E-7):
"""
For the pairwise distances between the i-th feature vector and every other feature vector in the original dataset,
execute a binary search for ``sigma`` such the entropy of the conditional probability distribution
${P_i}$ equals ``target_entropy`` at ``entropy_diff`` precision. Return the optimal sigma.
Assume that the distances are not squared.
Execute at most ``n_iter`` searches. Raise ``ValueError`` if we haven't found a decent enough
value of ``sigma``.
Note that dist_i.loc[i] is the distance of the i-th feature vector to itself, i.e. 0.
"""
assert dist_i.loc[i] == 0
# initial value of sigma
sigma = 1.0
# initial left and right boundaries for the binary search
sigma_min, sigma_max = -np.inf, np.inf
for _ in range(1, n_iter+1):
# Evaluate the Gaussian kernel with current sigma
r = dist_i.pow(2).div(2 * (sigma ** 2))
s = np.exp(-r)
# Recall that p(j|i) = 0 if i = j
s.loc[i] = 0
p = s / s.sum()
# the np.maximum trick below avoids taking log of very small (< MACHINE_PRECISION) numbers
# and ending up with -inf
entropy = - p.dropna().dot(np.log(np.maximum(p.dropna(), MACHINE_PRECISION)))
if np.fabs(target_entropy - entropy) <= entropy_diff:
break
if entropy > target_entropy:
# new boundary is [sigma_min, sigma]
sigma_max = sigma
# if sigma_min is still open
if not np.isfinite(sigma_min):
sigma *= 0.5
else:
# new boundary is [sigma, sigma_max]
sigma_min = sigma
# if sigma_max is still open
if not np.isfinite(sigma_max):
sigma *= 2.0
# If both the left and right boundaries are closed, new sigma
# is the midpoint of sigma_min and sigma_max
if np.all(np.isfinite([sigma_min, sigma_max])):
sigma = (sigma_min + sigma_max) / 2
else:
raise ValueError("Unable to find a sigma after [{}] iterations that matches target entropy: [{}]".format(
n_iter, target_entropy))
return sigma
def calc_optimal_sigmas(df, target_purplexity):
"""
From the DataFrame of feature vectors, ``df``, calculate pairwise distances and then find the optimal values
for the Gaussian kernels for each conditional probability distribution {P_i}
"""
target_entropy = np.log(target_purplexity)
paired_dists = pd.DataFrame(data=pairwise_distances(df.values, metric='l2'))
optimal_sigmas = paired_dists.apply(lambda row: optimal_sigma(row, row.name, target_entropy), axis=1)
# p_joint = (p_cond + p_cond.T) / (2 * df.shape[0])
# return p_joint
return paired_dists, optimal_sigmas
def calc_p(df, target_purplexity=30):
"""
Calculate the joint distribution of P_{ij} for the original input vectors x.
Assume ``pairwise_dist`` are squared.
"""
paired_dists, optimal_sigmas = calc_optimal_sigmas(df, target_purplexity)
exps = np.exp(-paired_dists)
p_cond = exps.div(2 * optimal_sigmas.pow(2), axis=1)
p_cond.values[np.diag_indices_from(p_cond)] = 0
p_cond = p_cond.div(p_cond.sum(axis=1), axis=0)
n_points = p_cond.shape[0]
p_joint = (p_cond + p_cond.T) / (2 * n_points)
return p_joint
p_joint = calc_p(mnist_df_scaled)
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
Now we're going to set up TensorFlow for the KLD minimization problem.
|
import tensorflow as tf
display(tf.__version__)
def pairwise_dist(tf_y):
"""Calculate pairwise distances between each pair of vectors in tf_y."""
tf_norms = tf.square(tf.norm(tf_y, axis=1))
tf_r1 = tf.expand_dims(tf_norms, axis=1)
tf_r2 = tf.expand_dims(tf_norms, axis=0)
tf_y_dot_yT = tf.matmul(tf_y, tf_y, transpose_b=True)
tf_dot = tf.cast(tf_y_dot_yT, dtype=tf.float32)
tf_r = tf_r1 + tf_r2
tf_d1 = tf_r - 2 * tf_dot
return tf_d1
def calc_q(tf_y):
"""
Calculate the joint distribution of two embeddings y_i and y_j in tensorflow.
Call from inside an active tensorflow session only.
"""
tf_pdist = pairwise_dist(tf_y)
tf_d = 1 / (1 + tf_pdist)
tf_d = tf.matrix_set_diag(tf_d, tf.zeros(tf.shape(tf_d)[0]))
tf_q = tf.div(tf_d, tf.reduce_sum(tf_d))
return tf_q
embedding_size = 2
n_points = p_joint.shape[0]
losses = []
n_iter = 1000
loss_epsilon = 1E-8
learning_rate = 0.2
current_graph = tf.Graph()
with current_graph.as_default():
# Placeholder for the joint distribution P of feature vectors in original space
# This is a constant w.r.t the KLD minimization
tf_p_joint = tf.placeholder(dtype=tf.float32, name='p_joint', shape=[n_points, n_points])
# Feature vectors in the embedding space - initialized by sampling from random distribution
tf_y = tf.Variable(name='y', validate_shape=False,
dtype=tf.float32,
initial_value=tf.random_normal([n_points, embedding_size]))
# One step for iterative KLD minimization
# calculate joint distribution Q of embeddings
tf_q_joint = calc_q(tf_y)
# Both P and Q have zeros in the diagonals. Since we want to calculate log{P/Q},
# We temporarily replace the 1s with 0s, so the log of the diagonals are zeros
# and they don't contribute to the KLD value.
p_diag_1 = tf.matrix_set_diag(tf_p_joint, tf.ones(n_points))
q_diag_1 = tf.matrix_set_diag(tf_q_joint, tf.ones(n_points))
tf_log_p_by_q = tf.log(tf.div(p_diag_1, q_diag_1))
kld = tf.reduce_sum(tf.multiply(tf_p_joint, tf_log_p_by_q))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name='Adam')
train_op = optimizer.minimize(kld, name='KLD_minimization')
with tf.Session() as sess:
# initialize tensorflow variables
init = tf.global_variables_initializer()
sess.run(init)
feed_dict = {tf_p_joint: p_joint.astype(np.float32).values}
# run the optimization step n_iter times, breaking out if two successive steps
# produce an absolute change in the value of the loss function <= loss_epsilon
for i in range(1, n_iter+1):
_, loss_val = sess.run([train_op, kld], feed_dict=feed_dict)
losses.append(loss_val)
if i % 100 == 0:
print("After iteration: {}, loss: {}".format(i, loss_val))
if len(losses) >= 2:
last_loss = losses[-2]
loss_delta = np.abs(last_loss-loss_val)
if loss_delta < loss_epsilon:
print("Exiting after %s iterations, loss_delta [{}] <= loss_epsilon [{}".format(
n_iter, loss_delta, loss_epsilon))
break
y_embeddings = sess.run(tf_y, feed_dict=feed_dict)
pd.Series(losses).rolling(10).mean().plot()
embeddings_df = pd.DataFrame(index=mnist_df_scaled.index, data=y_embeddings)
plot_source = embeddings_df.reset_index().rename(columns={
'index': 'label', 0: 'x', 1:'y'})
fg = sns.FacetGrid(data=plot_source, hue='label', size=10)
fg.map(plt.scatter, 'x', 'y').add_legend()
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
Let's compare that against what the sklearn implementation gives us:
|
from sklearn.manifold import TSNE
# Extract the embeddings and convert into a DataFrame
sk_embedded = TSNE(n_components=2).fit_transform(mnist_df_scaled.values)
sk_embedded = pd.DataFrame(index=mnist_df_scaled.index, data=sk_embedded)
# Display
sk_embedded = sk_embedded.reset_index().rename(columns={'index': 'label', 0: 'x', 1:'y'})
fg = sns.FacetGrid(data=sk_embedded, hue='label', size=10)
fg.map(plt.scatter, 'x', 'y').add_legend()
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
Appendix: Vectorized Calculation of $Q_{ij}$ in TensorFlow$
|
y = pd.DataFrame(index=range(3), columns=range(5), data=np.random.uniform(1, 5, size=[3, 5]))
y
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
First we calculate Q using the direct iterative algorithm which requires iterating over rows and columns of y this gives us a reference to test our vectorized implementation for correctness.
|
Q_simple = pd.DataFrame(index=y.index, columns=y.index, data=0.0)
for i in range(0, y.shape[0]):
for j in range(0, i):
assert i != j, (i, j)
md = y.loc[i, :].sub(y.loc[j, :])
d = 1 + np.linalg.norm(md)**2
Q_simple.loc[i, j] = 1 / d
Q_simple.loc[j, i] = 1 / d
Q_simple
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
To calculate Q in a vectorized way, we note that
$D[i, j] = (y[i] - y[j]) (y[i] - y[j])^T = norm(y[i])^2 + norm(y[j])^2 - 2 \times dot(a[i], a[j])$
For the entire 2D array y, we can generalize this to:
D = np.atleast_2d(r) + np.atleast_2d(r).T - 2 * np.dot(y, y.T)
where r is (vector) of norms of each vector in y.
|
norms = y.apply(np.linalg.norm, axis=1).values
r1 = np.atleast_2d(norms**2)
r2 = r1.T
d1 = r1 + r2
d2 = d1 - 2 * np.dot(y, y.T)
d2 += 1
d3 = 1 / d2
d3[np.diag_indices_from(d3)] = 0
Q_vectorized = pd.DataFrame(d3)
Q_vectorized
from pandas.util.testing import assert_frame_equal
assert_frame_equal(Q_simple, Q_vectorized, check_less_precise=True)
|
simple_implementations/t-sne.ipynb
|
dipanjank/ml
|
gpl-3.0
|
First we load the iris data from task 1 and split it into training and validation set.
|
# load dataset from task 1
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pandas.read_csv(url, names=names)
# split-out dataset
array = dataset.values
X = array[:,0:4]
y = array[:,4]
|
notebooks/robin_ue1/03_Cross_validation_and_grid_search.ipynb
|
hhain/sdap17
|
mit
|
Next we run a performance test on GridSearchCV. Therefor we search mulitple times to maximize the precision save the best time for later comparison. Each time we use a different number of jobs.
|
# parameter for performance test
max_jobs = 8
best_in = 3
# performance test
measurements = []
i = 1
while i <= max_jobs:
min_t = float("inf")
for j in range(best_in):
kneighbors = KNeighborsClassifier()
grid_search = GridSearchCV(kneighbors, parameter_grid, cv=cross_val, scoring=scoring, n_jobs=i)
start = timer()
grid_search.fit(X, y)
stop = timer()
min_t = min(min_t, stop - start)
measurements.append(min_t)
i += 1
|
notebooks/robin_ue1/03_Cross_validation_and_grid_search.ipynb
|
hhain/sdap17
|
mit
|
Finally we plot our results:
|
fig = plt.figure()
fig.suptitle('Visualization of the runtime depending on the number of used jobs.')
plt.xticks(range(1, max_jobs + 1))
ax = fig.add_subplot(111)
ax.set_xlabel('used jobs')
ax.set_ylabel('runtime in seconds')
ax.plot(range(1, max_jobs + 1), measurements, 'ro')
plt.show()
neighbors = [s[0]["n_neighbors"] for s in grid_search.grid_scores_]
val_score = [s[1] for s in grid_search.grid_scores_]
fig = plt.figure()
fig.suptitle('Visualization of the precision depending on the used parameter n_neighbors.')
plt.xticks(range(1,max_n + 1))
ax = fig.add_subplot(111)
ax.set_xlabel('n_neighbors')
ax.set_ylabel('mean test score')
ax.plot(neighbors, val_score, 'ro')
plt.show()
max_score = max(val_score)
i = val_score.index(max_score)
n = neighbors[i]
print("Maximum precision:", max_score)
print("Is reached with:","n_neighbors =", n)
|
notebooks/robin_ue1/03_Cross_validation_and_grid_search.ipynb
|
hhain/sdap17
|
mit
|
Then we can display the final result:
|
t = algorithm.linspace(0, duration_secs, samples)
plt.plot(t, data)
plt.show()
|
notebooks/Oscillators.ipynb
|
mohabouje/eDSP
|
gpl-3.0
|
Sawtooth Signal
A sawtooth waveform increases linearly from -1 to 1 in $ [0, 2 \pi wi] $ interval, and decreases linearly from 1 to
-1 in the interval $ \left[ 2 \pi w, 2 \pi \right] $, where $ w $ is the width of the periodic signal.
If $ w $ is 0.5, the function generates a standard triangular wave. The triangle wave shares many geometric
similarities with the sawtooth wave, except it has two sloping line segments.
A more general form, and with period T, is:
$$ {\displaystyle 2\left({\frac {t}{T}}-\left\lfloor {\frac {1}{2}}+{\frac {t}{T}}\right\rfloor \right)} $$
The class sawtooth_oscillator implements a basic square signal oscillator. In this example we generate a square signal with a period of 10KHz sampled at 42.1KHz:
|
width = 0.7
sawtooth = oscillator.Sawtooth(amp=amplitude, sr=sample_rate, f=frequency, width=width)
data = sawtooth.generate(N=samples)
|
notebooks/Oscillators.ipynb
|
mohabouje/eDSP
|
gpl-3.0
|
Then, to display:
|
plt.plot(t, data)
plt.show()
|
notebooks/Oscillators.ipynb
|
mohabouje/eDSP
|
gpl-3.0
|
Interactive mode
|
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
@interact(dtype=widgets.Dropdown(
options=['square', 'sinusoidal', 'sawtooth'],
value='square',
description='Type:',
disabled=False),
frequency=widgets.IntSlider(min=1,max=20,step=1,value=10),
duration=widgets.IntSlider(min=1,max=5,step=1,value=1),
alpha=widgets.FloatSlider(min=0.0,max=1.0, value=0.3))
def display_oscillator(dtype, frequency, duration, alpha):
sr = 42000
g = None
if dtype == "square":
g = oscillator.Square(amp=1, sr=sr, f=frequency, duty=alpha)
elif dtype == "sinusoidal":
g = oscillator.Sinusoidal(amp=1, sr=sr, f=frequency, p=0)
else:
g = oscillator.Sawtooth(amp=1, sr=sr, f=frequency, width=alpha)
samples = int(duration * sr)
data = g.generate(N=samples)
t = algorithm.linspace(0, duration, samples)
plt.plot(t, data)
plt.show()
|
notebooks/Oscillators.ipynb
|
mohabouje/eDSP
|
gpl-3.0
|
Note that this works in the opposite direction too: let's say you want to find "rare" objects in 10 dimensions, where we'll define rare as <1% of the population. Then you'll need to accept objects from 63% of the distribution in all 10 dimensions! So are those really "rare" or are they just a particular 1% of the population?
|
import numpy as np
p = 10**(np.log10(0.01)/10.0)
print p
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
N.B. Dimensionality isn't just measuring $D$ parameters for $N$ objects. It could be a spectrum with $D$ values or an image with $D$ pixels, etc. In the book the examples used just happen to be spectra of galaxies from the SDSS project. But we can insert the data of our choice instead.
For example: the SDSS comprises a sample of 357 million sources:
- each source has 448 measured attributes
- selecting just 30 (e.g., magnitude, size..) and normalizing the data range $-1$ to $1$
yields a probability of having one of the 357 million sources reside within a unit hypersphere of 1 in 1.4$\times 10^5$.
Principal Component Analysis (PCA)
In Principal Component Analysis (PCA) we seek to take a data set like the one shown below and apply a transform to the data such that the new axes are aligned with the maximal variance of the data. As can be seen in the Figure, this is basically just the same as doing regression by minimizing the square of the perpendicular distances to the new axes. Note that we haven't made any changes to the data, we have just defined new axes.
|
# Execute this cell
# Ivezic, Figure 7.2
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
#------------------------------------------------------------
# Set parameters and draw the random sample
np.random.seed(42)
r = 0.9
sigma1 = 0.25
sigma2 = 0.08
rotation = np.pi / 6
s = np.sin(rotation)
c = np.cos(rotation)
X = np.random.normal(0, [sigma1, sigma2], size=(100, 2)).T
R = np.array([[c, -s],[s, c]])
X = np.dot(R, X)
#------------------------------------------------------------
# Plot the diagram
fig = plt.figure(figsize=(5, 5), facecolor='w')
ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False)
# draw axes
ax.annotate(r'$x$', (-r, 0), (r, 0),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
ax.annotate(r'$y$', (0, -r), (0, r),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
# draw rotated axes
ax.annotate(r'$x^\prime$', (-r * c, -r * s), (r * c, r * s),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
ax.annotate(r'$y^\prime$', (r * s, -r * c), (-r * s, r * c),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
# scatter points
ax.scatter(X[0], X[1], s=25, lw=0, c='k', zorder=2)
# draw lines
vnorm = np.array([s, -c])
for v in (X.T):
d = np.dot(v, vnorm)
v1 = v - d * vnorm
ax.plot([v[0], v1[0]], [v[1], v1[1]], '-k')
# draw ellipses
for sigma in (1, 2, 3):
ax.add_patch(Ellipse((0, 0), 2 * sigma * sigma1, 2 * sigma * sigma2,
rotation * 180. / np.pi,
ec='k', fc='gray', alpha=0.2, zorder=1))
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.show()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Note that the points are correlated along a particular direction which doesn't align with the initial choice of axes. So, we should rotate our axes to align with this correlation.
We'll choose the rotation to maximize the ability to discriminate between the data points:
* the first axis, or principal component, is direction of maximal variance
* the second principal component is orthogonal to the first component and maximizes the residual variance
* ...
PCA is a dimensional reduction process because we can generally account for nearly "all" of the variance in the data set with fewer than the original $K$ dimensions. See more below.
We start with a data set ${x_i}$ which consists of $N$ objects for which we measure $K$ features. We start by subtracting the mean for each feature in ${x_i}$ and write $X$ as a $N\times K$ matrix.
The covariance of this matrix is
$$C_X=\frac{1}{N-1}X^TX.$$
There are off-diagonal terms if there are correlations between the measurements (e.g., maybe two of the features are temperature dependent and the measurements were taken at the same time).
If $R$ is a projection of the data that is aligned with the maximal variance, then we have $Y= X R$ with covariance
$$ C_{Y} = R^T X^T X R = R^T C_X R.$$
$r_1$ is the first principal component of $R$, which can be derived using Langrange multipliers with the following cost function:
$$ \phi(r_1,\lambda_1) = r_1^TC_X r_1 - \lambda_1(r_1^Tr_1-1). $$
If we take derivative of $\phi(r_1,\lambda)$ with respect to $r_1$ and set it to 0, then we have
$$ C_Xr_1 - \lambda_1 r_1 = 0. $$
$\lambda_1$ (the largest eigenvalue of the matrix) is the root of the equation $\det(C_X -
\lambda_1 {\bf I})=0$ for which the eigenvalue is
$$ \lambda_1 = r_1^T C_X r_1.$$
The columns of the full matrix, $R$ are the eigenvectors (known here as principal components).
We aren't going to go through the linear algebra more than that here. But it would be a good group project for someone. See the end of 7.3.1 starting at the bottom on page 294 or go through Karen Leighly's PCA lecture notes if you want to walk through the math in more detail.
Preparing data for PCA
Subtract the mean of each dimension (to "center" the data)
Divide by the variance in each dimension (to "whiten" the data)
(For spectra and images) normalize each row to yield an integral of unity.
|
#Example call from 7.3.2
import numpy as np
from sklearn.decomposition import PCA
X = np.random.normal(size=(100,3)) # 100 points in 3D
R = np.random.random((3,10)) # projection matrix
X = np.dot(X,R) # X is now 10-dim, with 3 intrinsic dims
pca = PCA(n_components=4) # n_components can be optionally set
pca.fit(X)
comp = pca.transform(X) # compute the subspace projection of X
mean = pca.mean_ # length 10 mean of the data
components = pca.components_ # 4x10 matrix of components
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Scikit-Learn's decomposition module has a number of PCA type implementations.
Let's work through an example using spectra of galaxies take during the Sloan Digital Sky Survey. In this sample there are 4000 spectra with flux measurements in 1000 bins. 15 example spectra are shown below and our example will use half of the spectra chosen at random.
|
%matplotlib inline
# Example from Andy Connolly
# See Ivezic, Figure 7.4
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
print len(spectra), len(wavelengths)
#----------------------------------------------------------------------
# Compute PCA
np.random.seed(500)
nrows = 2000 # We'll just look at 2000 random spectra
n_components = 5 # Do the fit with 5 components, which is the mean plus 4
ind = np.random.randint(spectra.shape[0], size=nrows)
spec_mean = spectra[ind].mean(0) # Compute the mean spectrum, which is the first component
# spec_mean = spectra[:50].mean(0)
# use Randomized PCA for speed
pca = RandomizedPCA(n_components - 1)
pca.fit(spectra[ind])
pca_comp = np.vstack([spec_mean,pca.components_]) #Add the mean to the components
evals = pca.explained_variance_ratio_
print evals # Print the eigenvalues
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Now let's plot the components. See also Ivezic, Figure 7.4. The left hand panels are just the first 5 spectra for comparison with the first 5 PCA components, which are shown on the right. They are ordered by the size of their eigenvalues.
|
#Make plots
fig = plt.figure(figsize=(10, 8))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = 'PCA components'
for j in range(n_components):
# plot the components
ax = fig.add_subplot(n_components, 2, 2*j+2)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel('wavelength (Angstroms)')
ax.plot(wavelengths, pca_comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
# plot the first j spectra
ax2 = fig.add_subplot(n_components, 2, 2*j+1)
ax2.yaxis.set_major_formatter(plt.NullFormatter())
ax2.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax2.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax2.set_xlabel('wavelength (Angstroms)')
ax2.plot(wavelengths, spectra[j], '-k', lw=1)
# plot zero line
ax2.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax2.set_xlim(xlim)
if j == 0:
ax.set_title(titles, fontsize='medium')
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax2.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
ax.text(0.02, 0.95, label, transform=ax.transAxes,
ha='left', va='top', bbox=dict(ec='w', fc='w'),
fontsize='small')
plt.show()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Now let's make "scree" plots. These plots tell us how much of the variance is explained as a function of the each eigenvector. Our plot won't look much like Ivezic, Figure 7.5, so I've shown it below to explain where "scree" comes from.
|
# Execute this cell
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(121)
ax.plot(np.arange(n_components-1), evals)
ax.set_xlabel("eigenvalue number")
ax.set_ylabel("eigenvalue ")
ax = fig.add_subplot(122)
ax.plot(np.arange(n_components-1), evals.cumsum())
ax.set_xlabel("eigenvalue number")
ax.set_ylabel("cumulative eigenvalue")
plt.show()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
How much of the variance is explained by the first two components? How about all of the components?
|
print("The first component explains {:.3f} of the variance in the data.".format(# Complete
print("The second component explains {:.3f} of the variance in the data.".format(# Complete
print("All components explain {:.3f} of the variance in the data.".format(# Complete
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
This is why PCA enables dimensionality reduction.
How many components would we need to explain 99.5% of the variance?
|
for num_feats in np.arange(1,20, dtype = int):
# complete
print("{:d} features are needed to explain 99.5% of the variance".format(# Complete
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Note that we would need 1000 components to encode all of the variance.
Interpreting the PCA
The output eigenvectors are ordered by their associated eigenvalues
The eigenvalues reflect the variance within each eigenvector
The sum of the eigenvalues is total variance of the system
Projection of each spectrum onto the first few eigenspectra is a compression of the data
Once we have the eigenvectors, we can try to reconstruct an observed spectrum, ${x}(k)$, in the eigenvector basis, ${e}_i(k)$, as
$$ \begin{equation}
{x}i(k) = {\mu}(k) + \sum_j^R \theta{ij} {e}_j(k).
\end{equation}
$$
That would give a full (perfect) reconstruction of the data since it uses all of the eigenvectors. But if we truncate (i.e., $r<R$), then we will have reduced the dimensionality while still reconstructing the data with relatively little loss of information.
For example, we started with 4000x1000 floating point numbers. If we can explain nearly all of the variance with 8 eigenvectors, then we have reduced the problem to 4000x8+8x1000 floating point numbers!
Execute the next cell to see how the reconstruction improves by adding more components.
|
# Execute this cell
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#------------------------------------------------------------
# Compute PCA components
# Eigenvalues can be computed using PCA as in the commented code below:
#from sklearn.decomposition import PCA
#pca = PCA()
#pca.fit(spectra)
#evals = pca.explained_variance_ratio_
#evals_cs = evals.cumsum()
# because the spectra have been reconstructed from masked values, this
# is not exactly correct in this case: we'll use the values computed
# in the file compute_sdss_pca.py
evals = data['evals'] ** 2
evals_cs = evals.cumsum()
evals_cs /= evals_cs[-1]
evecs = data['evecs']
spec_mean = spectra.mean(0)
#------------------------------------------------------------
# Find the coefficients of a particular spectrum
spec = spectra[1]
coeff = np.dot(evecs, spec - spec_mean)
#------------------------------------------------------------
# Plot the sequence of reconstructions
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(hspace=0)
for i, n in enumerate([0, 4, 8, 20]):
ax = fig.add_subplot(411 + i)
ax.plot(wavelengths, spec, '-', c='gray')
ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k')
if i < 3:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylim(-2, 21)
ax.set_ylabel('flux')
if n == 0:
text = "mean"
elif n == 1:
text = "mean + 1 component\n"
text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
else:
text = "mean + %i components\n" % n
text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
ax.text(0.01, 0.95, text, ha='left', va='top', transform=ax.transAxes)
fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$')
plt.show()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Caveats I
PCA is a linear process, whereas the variations in the data may not be. So it may not always be appropriate to use and/or may require a relatively large number of components to fully describe any non-linearity.
Note also that PCA can be very impractical for large data sets which exceed the memory per core as the computational requirement goes as $\mathscr{O}(D^3$) and the memory requirement goes as $\mathscr{O}(2D^2)$.
Missing Data
We have assumed so far that there is no missing data (e.g., bad pixels in the spectrum, etc.). But often the data set is incomplete. Since PCA encodes the flux correlation with wavelength (or whatever parameters are in your data set), we can actually use it to determine missing values.
An example is shown below. Here, black are the observed spectra. Gray are the regions where we have no data. Blue is the PCA reconstruction, including the regions where there are no data. Awesome, isn't it?
|
# Execute this cell
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from astroML.datasets import fetch_sdss_corrected_spectra
from astroML.datasets import sdss_corrected_spectra
#------------------------------------------------------------
# Get spectra and eigenvectors used to reconstruct them
data = fetch_sdss_corrected_spectra()
spec = sdss_corrected_spectra.reconstruct_spectra(data)
lam = sdss_corrected_spectra.compute_wavelengths(data)
evecs = data['evecs']
mu = data['mu']
norms = data['norms']
mask = data['mask']
#------------------------------------------------------------
# plot the results
i_plot = ((lam > 5750) & (lam < 6350))
lam = lam[i_plot]
specnums = [20, 8, 9]
subplots = [311, 312, 313]
fig = plt.figure(figsize=(8, 10))
fig.subplots_adjust(hspace=0)
for subplot, i in zip(subplots, specnums):
ax = fig.add_subplot(subplot)
# compute eigen-coefficients
spec_i_centered = spec[i] / norms[i] - mu
coeffs = np.dot(spec_i_centered, evecs.T)
# blank out masked regions
spec_i = spec[i]
mask_i = mask[i]
spec_i[mask_i] = np.nan
# plot the raw masked spectrum
ax.plot(lam, spec_i[i_plot], '-', color='k', lw=2,
label='True spectrum')
# plot two levels of reconstruction
for nev in [10]:
if nev == 0:
label = 'mean'
else:
label = 'N EV=%i' % nev
spec_i_recons = norms[i] * (mu + np.dot(coeffs[:nev], evecs[:nev]))
ax.plot(lam, spec_i_recons[i_plot], label=label)
# plot shaded background in masked region
ylim = ax.get_ylim()
mask_shade = ylim[0] + mask[i][i_plot].astype(float) * ylim[1]
plt.fill(np.concatenate([lam[:1], lam, lam[-1:]]),
np.concatenate([[ylim[0]], mask_shade, [ylim[0]]]),
lw=0, fc='k', alpha=0.2)
ax.set_xlim(lam[0], lam[-1])
ax.set_ylim(ylim)
ax.yaxis.set_major_formatter(ticker.NullFormatter())
if subplot == 311:
ax.legend(loc=1, prop=dict(size=14))
ax.set_xlabel('$\lambda\ (\AA)$')
ax.set_ylabel('normalized flux')
plt.show()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
The example that we have been using above is "spectral" PCA. Some examples from the literature include:
- Francis et al. 1992
- Connolly et al. 1995
- Yip et al. 2004
One can also do PCA on features that aren't ordered (as they were for the spectra). E.g., if you have $D$ different parameters measured for your objects. The classic example in astronomy is
Boroson & Green 1992
Caveats II
One of the things that I don't like about PCA is that the eigenvectors are just mathematical constructs. They often don't look anything like the spectra themselves. Whereas it is often the case that you might expect that the components would look like, well, the physical components. For example, quasars are fundamentally galaxies. So, part of their flux comes from the galaxy that they live in. But PCA doesn't return any component that looks like a typical galaxy. Essentially this is because the components can be both positive and negative.
Non-negative Matrix Factorization (NMF)
This is where Non-negative Matrix Factorizaiton (NMF) comes in. Here we are treating the data as a linear sum of positive-definite components.
NMF assumes any data matrix can be factored into two matrices, $W$ and $Y$, with
$$\begin{equation}
X=W Y,
\end{equation}
$$
where both $W$ and $Y$ are nonnegative.
So, $WY$ is an approximation of $X$. Minimizing the reconstruction error $|| (X - W Y)^2 ||$,
nonnegative bases can be derived through an iterative process.
Note, however, that the iterative process does not guarantee nonlocal minima (like $K$-means and EM), but using
random initialization and cross-validation can be used to find the global minimum.
An example from the literature is Allen et al. 2008
In Scikit-Learn the NMF implementation looks like:
|
# Execute this cell
import numpy as np
from sklearn.decomposition import NMF
X = np.random.random((100,3)) # 100 points in 3D
nmf = NMF(n_components=3)
nmf.fit(X)
proj = nmf.transform(X) # project to 3 dimension
comp = nmf.components_ # 3x10 array of components
err = nmf.reconstruction_err_ # how well 3 components capture the data
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
An example (and comparison to PCA) is given below.
|
# Execute the next 2 cells
# Example from Figure 7.4
# Author: Jake VanderPlas
# License: BSD
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import NMF
from sklearn.decomposition import RandomizedPCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#----------------------------------------------------------------------
# Compute PCA, and NMF components
def compute_PCA_NMF(n_components=5):
spec_mean = spectra.mean(0)
# PCA: use randomized PCA for speed
pca = RandomizedPCA(n_components - 1)
pca.fit(spectra)
pca_comp = np.vstack([spec_mean,
pca.components_])
# NMF requires all elements of the input to be greater than zero
spectra[spectra < 0] = 0
nmf = NMF(n_components)
nmf.fit(spectra)
nmf_comp = nmf.components_
return pca_comp, nmf_comp
n_components = 5
decompositions = compute_PCA_NMF(n_components)
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = ['PCA components', 'NMF components']
for i, comp in enumerate(decompositions):
for j in range(n_components):
ax = fig.add_subplot(n_components, 3, 3 * j + 1 + i)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel('wavelength (Angstroms)')
ax.plot(wavelengths, comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
if j == 0:
ax.set_title(titles[i])
if titles[i].startswith('PCA') or titles[i].startswith('ICA'):
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
else:
label = 'component %i' % (j + 1)
ax.text(0.03, 0.94, label, transform=ax.transAxes,
ha='left', va='top')
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(2)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
plt.show()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Independent Component Analysis (ICA)
For data where the components are statistically independent (or nearly so) Independent Component Analysis (ICA) has become a popular method for separating mixed components. The classical example is the so-called "cocktail party" problem. This is illustrated in the following figure from Hastie, Tibshirani, and Friedman (Figure 14.27 on page 497 in my copy, so they have clearly added some stuff!). Think of the "source signals" as two voices at a party. You are trying to concentrate on just one voice. What you hear is something like the "measured signals" pattern. You could run the data through PCA and that would do an excellent job of reconstructing the signal with reduced dimensionality, but it wouldn't actually isolate the different physical components (bottom-left panel) ICA on the other hand can (bottom-right panel).
ICA is a good choice for a complex system with relatively indepent components. For example a galaxy is roughly a linear combination of cool stars and hot stars, and a quasar is just a galaxy with others component from an accretion disk and emission line regions. Ideally we want "eigenvectors" that are aligned with those physical traits/regions as opposed to mathematical constructs.
The basic call to the FastICA algoirthm in Scikit-Learn looks like:
|
# Execute this cell
import numpy as np
from sklearn.decomposition import FastICA
X = np.random.normal(size=(100,2)) # 100 objects in 2D
R = np.random.random((2,5)) # mixing matrix
X = np.dot(X,R) # 2D data in 5D space
ica = FastICA(2) # fit 2 components
ica.fit(X)
proj = ica.transform(X) # 100x2 projection of the data
comp = ica.components_ # 2x5 matrix of independent components
## sources = ica.sources_ # 100x2 matrix of sources
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Execute the next 2 cells to produce a plot showing the ICA components.
|
%matplotlib inline
#Example from Andy Connolly
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import FastICA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#----------------------------------------------------------------------
# Compute PCA
np.random.seed(500)
nrows = 500
n_components = 5
ind = np.random.randint(spectra.shape[0], size=nrows)
spec_mean = spectra[ind].mean(0)
# spec_mean = spectra[:50].mean(0)
ica = FastICA(n_components - 1)
ica.fit(spectra[ind])
ica_comp = np.vstack([spec_mean,ica.components_]) #Add the mean to the components
#Make plots
fig = plt.figure(figsize=(10, 8))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = 'ICA components'
for j in range(n_components):
# plot the components
ax = fig.add_subplot(n_components, 2, 2*j+2)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel(r'wavelength ${\rm (\AA)}$')
ax.plot(wavelengths, ica_comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
# plot the first j spectra
ax2 = fig.add_subplot(n_components, 2, 2*j+1)
ax2.yaxis.set_major_formatter(plt.NullFormatter())
ax2.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax2.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax2.set_xlabel(r'wavelength ${\rm (\AA)}$')
ax2.plot(wavelengths, spectra[j], '-k', lw=1)
# plot zero line
ax2.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax2.set_xlim(xlim)
if j == 0:
ax.set_title(titles, fontsize='medium')
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax2.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
ax.text(0.02, 0.95, label, transform=ax.transAxes,
ha='left', va='top', bbox=dict(ec='w', fc='w'),
fontsize='small')
plt.show()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
As with PCA and NMF, we can similarly do a reconstruction:
|
# Execute this cell
#------------------------------------------------------------
# Find the coefficients of a particular spectrum
spec = spectra[1]
evecs = data['evecs']
coeff = np.dot(evecs, spec - spec_mean)
#------------------------------------------------------------
# Plot the sequence of reconstructions
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(hspace=0)
for i, n in enumerate([0, 2, 4, 8]):
ax = fig.add_subplot(411 + i)
ax.plot(wavelengths, spec, '-', c='gray')
ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k')
if i < 3:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylim(-2, 21)
ax.set_ylabel('flux')
if n == 0:
text = "mean"
elif n == 1:
text = "mean + 1 component\n"
#text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
else:
text = "mean + %i components\n" % n
#text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
## GTR: had to comment this out for some reason
## ax.text(0.01, 0.95, text, ha='left', va='top', transform=ax.transAxes)
fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$')
plt.show()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Ivezic, Figure 7.4 compares the components found by the PCA, ICA, and NMF algorithms. Their differences and similarities are quite interesting.
If you think that I was pulling your leg about the cocktail problem, try it yourself!
Load the code instead of running it and see what effect changing some things has.
|
%run code/plot_ica_blind_source_separation.py
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Let's revisit the digits sample and see what PCA, NMF, and ICA do for it.
|
# Execute this cell to load the digits sample
%matplotlib inline
import numpy as np
from sklearn.datasets import load_digits
from matplotlib import pyplot as plt
digits = load_digits()
grid_data = np.reshape(digits.data[0], (8,8)) #reshape to 8x8
plt.imshow(grid_data, interpolation = "nearest", cmap = "bone_r")
print grid_data
X = digits.data
y = digits.target
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Do the PCA transform, projecting to 2 dimensions and plot the results.
|
# PCA
from sklearn.decomposition import PCA
pca = # Complete
# Complete
X_reduced = # Complete
plt.scatter(# Complete, c=y, cmap="nipy_spectral", edgecolor="None")
plt.colorbar()
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Similarly for NMF and ICA
|
# NMF
# Complete
# ICA
from sklearn.decomposition import FastICA
# Complete
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Take a second to think about what ICA is doing. What if you had digits from digital clocks instead of handwritten?
I wasn't going to introduce Neural Networks yet, but it is worth noting that Scikit-Learn's Bernoulli Restricted Boltzman Machine (RBM) is discussed in the (unsupervised) neural network part of the User's Guide and is relevant here as the data input must be either binary or values between 0 and 1, which is the case that we have here.
We could think about doing dimensional reduction of the digits data set in another way. There are 64 pixels in each of our images. Presumably all of them aren't equally useful. Let's figure out exactly which pixels are the most relevant. We'll use Scikit-Learn's RandomForestRegressor. We won't get to regression until next week, but you don't need to understand the algorithm to do this, just look at the inputs and outputs. Which pixels are the most important? As a bonus see if you can plot digit images with those pixels highlighted.
|
from sklearn.ensemble import RandomForestRegressor
RFreg = RandomForestRegressor()# Complete or leave blank as you see fit
# Do Fitting
importances = # Determine importances
np.argsort( # Complete to rank importances
|
DimensionReduction.ipynb
|
gtrichards/PHYS_T480
|
mit
|
Data-MC comparison
Table of contents
Data preprocessing
Weight simulation events to spectrum
S125 verification
$\log_{10}(\mathrm{dE/dX})$ verification
|
from __future__ import division, print_function
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from icecube.weighting.weighting import from_simprod
from icecube import dataclasses
import comptools as comp
import comptools.analysis.plotting as plotting
color_dict = comp.analysis.get_color_dict()
%matplotlib inline
|
notebooks/data-MC-comparison.ipynb
|
jrbourbeau/cr-composition
|
mit
|
Data preprocessing
[ back to top ]
1. Load simulation/data dataframe and apply specified quality cuts
2. Extract desired features from dataframe
3. Get separate testing and training datasets
4. Feature selection
Load simulation, format feature and target matrices
|
config = 'IC86.2012'
# comp_list = ['light', 'heavy']
comp_list = ['PPlus', 'Fe56Nucleus']
june_july_data_only = False
sim_df = comp.load_dataframe(datatype='sim', config=config, split=False)
data_df = comp.load_dataframe(datatype='data', config=config)
data_df = data_df[np.isfinite(data_df['log_dEdX'])]
if june_july_data_only:
print('Masking out all data events not in June or July')
def is_june_july(time):
i3_time = dataclasses.I3Time(time)
return i3_time.date_time.month in [6, 7]
june_july_mask = data_df.end_time_mjd.apply(is_june_july)
data_df = data_df[june_july_mask].reset_index(drop=True)
months = (6, 7) if june_july_data_only else None
livetime, livetime_err = comp.get_detector_livetime(config, months=months)
|
notebooks/data-MC-comparison.ipynb
|
jrbourbeau/cr-composition
|
mit
|
Weight simulation events to spectrum
[ back to top ]
For more information, see the IT73-IC79 Data-MC comparison wiki page.
First, we'll need to define a 'realistic' flux model
|
phi_0 = 3.5e-6
# phi_0 = 2.95e-6
gamma_1 = -2.7
gamma_2 = -3.1
eps = 100
def flux(E):
E = np.array(E) * 1e-6
return (1e-6) * phi_0 * E**gamma_1 *(1+(E/3.)**eps)**((gamma_2-gamma_1)/eps)
from icecube.weighting.weighting import PowerLaw
pl_flux = PowerLaw(eslope=-2.7, emin=1e5, emax=3e6, nevents=1e6) + \
PowerLaw(eslope=-3.1, emin=3e6, emax=1e10, nevents=1e2)
pl_flux.spectra
from icecube.weighting.fluxes import GaisserH3a, GaisserH4a, Hoerandel5
flux_h4a = GaisserH4a()
energy_points = np.logspace(6.0, 9.0, 100)
fig, ax = plt.subplots()
ax.plot(np.log10(energy_points), energy_points**2.7*flux_h4a(energy_points, 2212),
marker='None', ls='-', lw=2, label='H4a proton')
ax.plot(np.log10(energy_points), energy_points**2.7*flux_h4a(energy_points, 1000260560),
marker='None', ls='-', lw=2, label='H4a iron')
ax.plot(np.log10(energy_points), energy_points**2.7*flux(energy_points),
marker='None', ls='-', lw=2, label='Simple knee')
ax.plot(np.log10(energy_points), energy_points**2.7*pl_flux(energy_points),
marker='None', ls='-', lw=2, label='Power law (weighting)')
ax.set_yscale('log', nonposy='clip')
ax.set_xlabel('$\log_{10}(E/\mathrm{GeV})$')
ax.set_ylabel('$\mathrm{E}^{2.7} \ J(E) \ [\mathrm{GeV}^{1.7} \mathrm{m}^{-2} \mathrm{sr}^{-1} \mathrm{s}^{-1}]$')
ax.grid(which='both')
ax.legend()
plt.show()
simlist = np.unique(sim_df['sim'])
for i, sim in enumerate(simlist):
gcd_file, sim_files = comp.simfunctions.get_level3_sim_files(sim)
num_files = len(sim_files)
print('Simulation set {}: {} files'.format(sim, num_files))
if i == 0:
generator = num_files*from_simprod(int(sim))
else:
generator += num_files*from_simprod(int(sim))
energy = sim_df['MC_energy'].values
ptype = sim_df['MC_type'].values
num_ptypes = np.unique(ptype).size
cos_theta = np.cos(sim_df['MC_zenith']).values
weights = 1.0/generator(energy, ptype, cos_theta)
# weights = weights/num_ptypes
sim_df['weights'] = flux(sim_df['MC_energy'])*weights
# sim_df['weights'] = flux_h4a(sim_df['MC_energy'], sim_df['MC_type'])*weights
MC_comp_mask = {}
for composition in comp_list:
MC_comp_mask[composition] = sim_df['MC_comp'] == composition
# MC_comp_mask[composition] = sim_df['MC_comp_class'] == composition
def plot_rate(array, weights, bins, xlabel=None, color='C0',
label=None, legend=True, alpha=0.8, ax=None):
if ax is None:
ax = plt.gca()
rate = np.histogram(array, bins=bins, weights=weights)[0]
rate_err = np.sqrt(np.histogram(array, bins=bins, weights=weights**2)[0])
plotting.plot_steps(bins, rate, yerr=rate_err, color=color,
label=label, alpha=alpha, ax=ax)
ax.set_yscale('log', nonposy='clip')
ax.set_ylabel('Rate [Hz]')
if xlabel:
ax.set_xlabel(xlabel)
if legend:
ax.legend()
ax.grid(True)
return ax
def plot_data_MC_ratio(sim_array, sim_weights, data_array, data_weights, bins,
xlabel=None, color='C0', alpha=0.8, label=None,
legend=False, ylim=None, ax=None):
if ax is None:
ax = plt.gca()
sim_rate = np.histogram(sim_array, bins=bins, weights=sim_weights)[0]
sim_rate_err = np.sqrt(np.histogram(sim_array, bins=bins, weights=sim_weights**2)[0])
data_rate = np.histogram(data_array, bins=bins, weights=data_weights)[0]
data_rate_err = np.sqrt(np.histogram(data_array, bins=bins, weights=data_weights**2)[0])
ratio, ratio_err = comp.analysis.ratio_error(data_rate, data_rate_err, sim_rate, sim_rate_err)
plotting.plot_steps(bins, ratio, yerr=ratio_err,
color=color, label=label, alpha=alpha, ax=ax)
ax.grid(True)
ax.set_ylabel('Data/MC')
if xlabel:
ax.set_xlabel(xlabel)
if ylim:
ax.set_ylim(ylim)
if legend:
ax.legend()
ax.axhline(1, marker='None', ls='-.', color='k')
return ax
|
notebooks/data-MC-comparison.ipynb
|
jrbourbeau/cr-composition
|
mit
|
$\log_{10}(\mathrm{S_{125}})$ verification
[ back to top ]
|
sim_df['log_s125'].plot(kind='hist', bins=100, alpha=0.6, lw=1.5)
plt.xlabel('$\log_{10}(\mathrm{S}_{125})$')
plt.ylabel('Counts');
log_s125_bins = np.linspace(-0.5, 3.5, 75)
gs = gridspec.GridSpec(2, 1, height_ratios=[2,1], hspace=0.0)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1], sharex=ax1)
for composition in comp_list:
sim_s125 = sim_df[MC_comp_mask[composition]]['log_s125']
sim_weights = sim_df[MC_comp_mask[composition]]['weights']
plot_rate(sim_s125, sim_weights, bins=log_s125_bins,
color=color_dict[composition], label=composition, ax=ax1)
data_weights = np.array([1/livetime]*len(data_df['log_s125']))
plot_rate(data_df['log_s125'], data_weights, bins=log_s125_bins,
color=color_dict['data'], label='Data', ax=ax1)
for composition in comp_list:
sim_s125 = sim_df[MC_comp_mask[composition]]['log_s125']
sim_weights = sim_df[MC_comp_mask[composition]]['weights']
ax2 = plot_data_MC_ratio(sim_s125, sim_weights,
data_df['log_s125'], data_weights, log_s125_bins,
xlabel='$\log_{10}(\mathrm{S}_{125})$', color=color_dict[composition],
label=composition, ax=ax2)
ax2.set_ylim((0, 2))
ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0., frameon=False)
plt.savefig(os.path.join(comp.paths.figures_dir, 'data-MC-comparison', 's125.png'))
plt.show()
|
notebooks/data-MC-comparison.ipynb
|
jrbourbeau/cr-composition
|
mit
|
$\log_{10}(\mathrm{dE/dX})$ verification
|
sim_df['log_dEdX'].plot(kind='hist', bins=100, alpha=0.6, lw=1.5)
plt.xlabel('$\log_{10}(\mathrm{dE/dX})$')
plt.ylabel('Counts');
log_dEdX_bins = np.linspace(-2, 4, 75)
gs = gridspec.GridSpec(2, 1, height_ratios=[2,1], hspace=0.0)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1], sharex=ax1)
for composition in comp_list:
sim_dEdX = sim_df[MC_comp_mask[composition]]['log_dEdX']
sim_weights = sim_df[MC_comp_mask[composition]]['weights']
plot_rate(sim_dEdX, sim_weights, bins=log_dEdX_bins,
color=color_dict[composition], label=composition, ax=ax1)
data_weights = np.array([1/livetime]*len(data_df))
plot_rate(data_df['log_dEdX'], data_weights, bins=log_dEdX_bins,
color=color_dict['data'], label='Data', ax=ax1)
for composition in comp_list:
sim_dEdX = sim_df[MC_comp_mask[composition]]['log_dEdX']
sim_weights = sim_df[MC_comp_mask[composition]]['weights']
ax2 = plot_data_MC_ratio(sim_dEdX, sim_weights,
data_df['log_dEdX'], data_weights, log_dEdX_bins,
xlabel='$\log_{10}(\mathrm{dE/dX})$', color=color_dict[composition],
label=composition, ylim=[0, 5.5], ax=ax2)
ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0., frameon=False)
plt.savefig(os.path.join(comp.paths.figures_dir, 'data-MC-comparison', 'dEdX.png'))
plt.show()
|
notebooks/data-MC-comparison.ipynb
|
jrbourbeau/cr-composition
|
mit
|
$\cos(\theta)$ verification
|
sim_df['lap_cos_zenith'].plot(kind='hist', bins=100, alpha=0.6, lw=1.5)
plt.xlabel('$\cos(\\theta_{\mathrm{reco}})$')
plt.ylabel('Counts');
cos_zenith_bins = np.linspace(0.8, 1.0, 75)
gs = gridspec.GridSpec(2, 1, height_ratios=[2,1], hspace=0.0)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1], sharex=ax1)
for composition in comp_list:
sim_cos_zenith = sim_df[MC_comp_mask[composition]]['lap_cos_zenith']
sim_weights = sim_df[MC_comp_mask[composition]]['weights']
plot_rate(sim_cos_zenith, sim_weights, bins=cos_zenith_bins,
color=color_dict[composition], label=composition, ax=ax1)
data_weights = np.array([1/livetime]*len(data_df))
plot_rate(data_df['lap_cos_zenith'], data_weights, bins=cos_zenith_bins,
color=color_dict['data'], label='Data', ax=ax1)
for composition in comp_list:
sim_cos_zenith = sim_df[MC_comp_mask[composition]]['lap_cos_zenith']
sim_weights = sim_df[MC_comp_mask[composition]]['weights']
ax2 = plot_data_MC_ratio(sim_cos_zenith, sim_weights,
data_df['lap_cos_zenith'], data_weights, cos_zenith_bins,
xlabel='$\cos(\\theta_{\mathrm{reco}})$', color=color_dict[composition],
label=composition, ylim=[0, 3], ax=ax2)
ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0., frameon=False)
plt.savefig(os.path.join(comp.paths.figures_dir, 'data-MC-comparison', 'zenith.png'))
plt.show()
sim_df['avg_inice_radius'].plot(kind='hist', bins=100, alpha=0.6, lw=1.5)
# plt.xlabel('$\cos(\\theta_{\mathrm{reco}})$')
plt.ylabel('Counts');
inice_radius_bins = np.linspace(0.0, 200, 75)
gs = gridspec.GridSpec(2, 1, height_ratios=[2,1], hspace=0.0)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1], sharex=ax1)
for composition in comp_list:
sim_inice_radius = sim_df[MC_comp_mask[composition]]['avg_inice_radius']
sim_weights = sim_df[MC_comp_mask[composition]]['weights']
plot_rate(sim_inice_radius, sim_weights, bins=inice_radius_bins,
color=color_dict[composition], label=composition, ax=ax1)
data_weights = np.array([1/livetime]*len(data_df))
plot_rate(data_df['avg_inice_radius'], data_weights, bins=inice_radius_bins,
color=color_dict['data'], label='Data', ax=ax1)
for composition in comp_list:
sim_inice_radius = sim_df[MC_comp_mask[composition]]['avg_inice_radius']
sim_weights = sim_df[MC_comp_mask[composition]]['weights']
ax2 = plot_data_MC_ratio(sim_inice_radius, sim_weights,
data_df['avg_inice_radius'], data_weights, inice_radius_bins,
xlabel='$\cos(\\theta_{\mathrm{reco}})$', color=color_dict[composition],
label=composition, ylim=[0, 3], ax=ax2)
ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0., frameon=False)
# plt.savefig(os.path.join(comp.paths.figures_dir, 'data-MC-comparison', 'zenith.png'))
plt.show()
sim_df.columns
|
notebooks/data-MC-comparison.ipynb
|
jrbourbeau/cr-composition
|
mit
|
Search for hierarchy identifiers: FeatureSet and PhenotypeAssociationSet
The G2P dataset exists within the hierarchy of Ga4GH datasets and featuresets. This call returns phenotype association sets hosted by the API. Observe that we are querying all datasets hosted in the endpoint. The identifiers for the featureset and phenotype association set are used by all subsequent API calls.
|
datasets = c.search_datasets()
phenotype_association_set_id = None
phenotype_association_set_name = None
for dataset in datasets:
phenotype_association_sets = c.search_phenotype_association_sets(dataset_id=dataset.id)
for phenotype_association_set in phenotype_association_sets:
phenotype_association_set_id = phenotype_association_set.id
phenotype_association_set_name = phenotype_association_set.name
print 'Found G2P phenotype_association_set:', phenotype_association_set.id, phenotype_association_set.name
break
assert phenotype_association_set_id
assert phenotype_association_set_name
feature_set_id = None
datasets = c.search_datasets()
for dataset in datasets:
featuresets = c.search_feature_sets(dataset_id=dataset.id)
for featureset in featuresets:
if phenotype_association_set_name in featureset.name:
feature_set_id = featureset.id
print 'Found G2P feature_set:', feature_set_id
break
assert feature_set_id
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Use case: Find evidence for a Genomic Feature
Search for Features by location
Using the feature set id returned above, the following request returns a list of features that exactly match a location
|
feature_generator = c.search_features(feature_set_id=feature_set_id,
reference_name="chr7",
start=55249005,
end=55249006
)
features = list(feature_generator)
assert len(features) == 1
print "Found {} features in G2P feature_set {}".format(len(features),feature_set_id)
feature = features[0]
print [feature.name,feature.gene_symbol,feature.reference_name,feature.start,feature.end]
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Search features by name
Alternatively, if the location is not known, we can query using the name of the feature. Using the feature set id returned above, the following request returns a list of features that exactly match a given name - 'EGFR S768I missense mutation'.
|
feature_generator = c.search_features(feature_set_id=feature_set_id, name='EGFR S768I missense mutation')
features = list(feature_generator)
assert len(features) == 1
print "Found {} features in G2P feature_set {}".format(len(features),feature_set_id)
feature = features[0]
print [feature.name,feature.gene_symbol,feature.reference_name,feature.start,feature.end]
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Get evidence associated with that feature.
Once we have looked up the feature, we can then search for all evidence associated with that feature.
|
feature_phenotype_associations = c.search_genotype_phenotype(
phenotype_association_set_id=phenotype_association_set_id,
feature_ids=[f.id for f in features])
associations = list(feature_phenotype_associations)
assert len(associations) >= len(features)
print "There are {} associations".format(len(associations))
print "\n".join([a.description for a in associations])
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Display evidence
Explore the evidence. For example, a publication.
|
from IPython.display import IFrame
IFrame(associations[0].evidence[0].info['publications'][0], "100%",300)
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Use Case: Find evidence for a Phenotype
Search a phenotype
Alternatively, a researcher can query for a phenotype. In this case by the phenotype's description matching 'Adenosquamous carcinoma .*'
|
phenotypes_generator = c.search_phenotype(
phenotype_association_set_id=phenotype_association_set_id,
description="Adenosquamous carcinoma .*"
)
phenotypes = list(phenotypes_generator)
assert len(phenotypes) >= 0
print "\n".join(set([p.description for p in phenotypes]))
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Get evidence associated with those phenotypes.
The researcher can use those phenotype identifiers to query for evidence associations.
|
feature_phenotype_associations = c.search_genotype_phenotype(
phenotype_association_set_id=phenotype_association_set_id,
phenotype_ids=[p.id for p in phenotypes])
associations = list(feature_phenotype_associations)
assert len(associations) >= len(phenotypes)
print "There are {} associations. First five...".format(len(associations))
print "\n".join([a.description for a in associations][:5])
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Further constrain associations with environment
The researcher can limit the associations returned by introducing the evironment contraint
|
import ga4gh_client.protocol as protocol
evidence = protocol.EvidenceQuery()
evidence.description = "MEK inhibitors"
feature_phenotype_associations = c.search_genotype_phenotype(
phenotype_association_set_id=phenotype_association_set_id,
phenotype_ids=[p.id for p in phenotypes],
evidence = [evidence]
)
associations = list(feature_phenotype_associations)
print "There are {} associations. First five...".format(len(associations))
print "\n".join([a.description for a in associations][:5])
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Use Case: Association Heatmap
The bokeh package should be installed for graphing.
Find features
First, we collect a set of features.
|
feature_generator = c.search_features(feature_set_id=feature_set_id, name='.*KIT.*')
features = list(feature_generator)
assert len(features) > 0
print "Found {} features. First five...".format(len(features),feature_set_id)
print "\n".join([a.description for a in associations][:5])
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Get all associations
Then we select all the associations for those features.
|
feature_phenotype_associations = c.search_genotype_phenotype(
phenotype_association_set_id=phenotype_association_set_id,
feature_ids=[f.id for f in features])
associations = list(feature_phenotype_associations)
print "There are {} associations. First five...".format(len(associations))
print "\n".join([a.description for a in associations][:5])
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Association Heatmap
Developers can use the G2P package to create researcher friendly applications.
Here we take the results from the GA4GH queries and create a dataframe showing association counts.
|
from bokeh.charts import HeatMap, output_notebook, output_file, show
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import DataTable, TableColumn
from bokeh.models import HoverTool
feature_ids = {}
for feature in features:
feature_ids[feature.id]=feature.name
phenotype_descriptions = []
feature_names = []
association_count = []
association_descriptions = []
for association in associations:
for feature_id in association.feature_ids:
phenotype_descriptions.append(association.phenotype.description)
feature_names.append(feature_ids[feature_id])
association_count.append(1)
association_descriptions.append(association.description)
output_notebook()
output_file("g2p_heatmap.html")
data = {'feature': feature_names ,
'association_count': association_count,
'phenotype': phenotype_descriptions,
'association_descriptions': association_descriptions
}
hover = HoverTool(
tooltips=[
("associations", "@values")
]
)
hm = HeatMap(data, x='feature', y='phenotype', values='association_count',
title='G2P Associations for KIT', stat='sum',
legend=False,width=1024,
tools=[hover], #"hover,pan,wheel_zoom,box_zoom,reset,tap",
toolbar_location="above")
source = ColumnDataSource(data)
columns = [
TableColumn(field="association_descriptions", title="Description"),
]
data_table = DataTable(source=source, columns=columns,width=1024 )
show( column(hm,data_table) )
|
python_notebooks/g2p-example-notebook.ipynb
|
david4096/bioapi-examples
|
apache-2.0
|
Now that we have our electrode positions in MRI coordinates, we can create
our measurement info structure.
|
info = mne.create_info(ch_names, 1000., 'ecog').set_montage(montage)
|
0.20/_downloads/f760cc2f1a5d6c625b1e14a0b05176dd/plot_ecog.ipynb
|
mne-tools/mne-tools.github.io
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.