repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
mne-tools/mne-tools.github.io | 0.18/_downloads/4eb6243ca7f447169baac6cdad977ee8/plot_stats_spatio_temporal_cluster_sensors.ipynb | bsd-3-clause | # Authors: Denis Engemann <denis.engemann@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mne.viz import plot_topomap
import mne
from mne.stats import spatio_temporal_cluster_test
from mne.datasets import sample
from mne.channels import find_ch_connectivity
from mne.viz import plot_compare_evokeds
print(__doc__)
"""
Explanation: Spatiotemporal permutation F-test on full sensor data
Tests for differential evoked responses in at least
one condition using a permutation clustering test.
The FieldTrip neighbor templates will be used to determine
the adjacency between sensors. This serves as a spatial prior
to the clustering. Spatiotemporal clusters will then
be visualized using custom matplotlib code.
Caveat for the interpretation of "significant" clusters: see
the FieldTrip website_.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = {'Aud/L': 1, 'Aud/R': 2, 'Vis/L': 3, 'Vis/R': 4}
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 30, fir_design='firwin')
events = mne.read_events(event_fname)
"""
Explanation: Set parameters
End of explanation
"""
picks = mne.pick_types(raw.info, meg='mag', eog=True)
reject = dict(mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, reject=reject, preload=True)
epochs.drop_channels(['EOG 061'])
epochs.equalize_event_counts(event_id)
X = [epochs[k].get_data() for k in event_id] # as 3D matrix
X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering
"""
Explanation: Read epochs for the channel of interest
End of explanation
"""
connectivity, ch_names = find_ch_connectivity(epochs.info, ch_type='mag')
print(type(connectivity)) # it's a sparse matrix!
plt.imshow(connectivity.toarray(), cmap='gray', origin='lower',
interpolation='nearest')
plt.xlabel('{} Magnetometers'.format(len(ch_names)))
plt.ylabel('{} Magnetometers'.format(len(ch_names)))
plt.title('Between-sensor adjacency')
"""
Explanation: Find the FieldTrip neighbor definition to setup sensor connectivity
End of explanation
"""
# set cluster threshold
threshold = 50.0 # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.01
cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
threshold=threshold, tail=1,
n_jobs=1, buffer_size=None,
connectivity=connectivity)
T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
"""
Explanation: Compute permutation statistic
How does it work? We use clustering to bind together features which are
similar. Our features are the magnetic fields measured over our sensor
array at different times. This reduces the multiple comparison problem.
To compute the actual test-statistic, we first sum all F-values in all
clusters. We end up with one statistic for each cluster.
Then we generate a distribution from the data by shuffling our conditions
between our samples and recomputing our clusters and the test statistics.
We test for the significance of a given cluster by computing the probability
of observing a cluster of that size. For more background read:
Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
doi:10.1016/j.jneumeth.2007.03.024
End of explanation
"""
# configure variables for visualization
colors = {"Aud": "crimson", "Vis": 'steelblue'}
linestyles = {"L": '-', "R": '--'}
# get sensor positions via layout
pos = mne.find_layout(epochs.info).pos
# organize data for plotting
evokeds = {cond: epochs[cond].average() for cond in event_id}
# loop over clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
# unpack cluster information, get unique indices
time_inds, space_inds = np.squeeze(clusters[clu_idx])
ch_inds = np.unique(space_inds)
time_inds = np.unique(time_inds)
# get topography for F stat
f_map = T_obs[time_inds, ...].mean(axis=0)
# get signals at the sensors contributing to the cluster
sig_times = epochs.times[time_inds]
# create spatial mask
mask = np.zeros((f_map.shape[0], 1), dtype=bool)
mask[ch_inds, :] = True
# initialize figure
fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
# plot average test statistic and mark significant sensors
image, _ = plot_topomap(f_map, pos, mask=mask, axes=ax_topo, cmap='Reds',
vmin=np.min, vmax=np.max, show=False)
# create additional axes (for ERF and colorbar)
divider = make_axes_locatable(ax_topo)
# add axes for colorbar
ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
plt.colorbar(image, cax=ax_colorbar)
ax_topo.set_xlabel(
'Averaged F-map ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))
# add new axis for time courses and plot time courses
ax_signals = divider.append_axes('right', size='300%', pad=1.2)
title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))
if len(ch_inds) > 1:
title += "s (mean)"
plot_compare_evokeds(evokeds, title=title, picks=ch_inds, axes=ax_signals,
colors=colors, linestyles=linestyles, show=False,
split_legend=True, truncate_yaxis='max_ticks')
# plot temporal cluster extent
ymin, ymax = ax_signals.get_ylim()
ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
color='orange', alpha=0.3)
# clean up viz
mne.viz.tight_layout(fig=fig)
fig.subplots_adjust(bottom=.05)
plt.show()
"""
Explanation: Note. The same functions work with source estimate. The only differences
are the origin of the data, the size, and the connectivity definition.
It can be used for single trials or for groups of subjects.
Visualize clusters
End of explanation
"""
|
dato-code/tutorials | notebooks/introduction_to_sframes.ipynb | apache-2.0 | import graphlab as gl
"""
Explanation: Introduction to SFrames
What is an SFrame?
Note: This notebook uses GraphLab Create 1.7.
An SFrame is a tabular data structure. If you are familiar with R or the pandas python package, SFrames behave similarly to the dataframes available in those frameworks. SFrames act like a table by consisting of 0 or more columns. Each column has its own datatype and every column of a particular SFrame must have the same number of entries as the other columns that already exist. There are two things that make SFrames very different from other dataframes:
Each column is an SArray, which is a series of elements stored on disk. This makes SFrames disk-based and therefore able to hold datasets that are too large to fit in your system's memory. You'll see this come in to play throughout this demo.
An SFrame's data is located on the server that is running the GraphLab toolkits, which is not necessarily on your client machine. While this example does not demonstrate working with a GraphLab server on a different machine, you can see that in action <a href="https://turi.com/learn/notebooks/running_in_the_cloud.html">here</a>.
This tutorial shows you how to import data into an SFrame, do some basic data cleaning/exploration, and save your work for later. If you are someone that likes to learn these things through reading comprehensive documentation instead of tutorials, then you can visit our <a href="https://turi.com/products/create/docs/generated/graphlab.SFrame.html">API Reference</a> first. If not, read on!
Getting Started: Creating SFrames
First we will get set up with import statements for this tutorial.
End of explanation
"""
# In order to interact with S3 we need to set our AWS credentials.
# You can use your own credentials or use the ones below.
gl.aws.set_credentials('AKIAJMHKEZGY6YP24BXA', 'vf/miz2Zx7V7VkCai9ZeJR45ZSimqu6/W7qdRLmN')
# The below will download a 78 MB file.
song_sf = gl.SFrame.read_csv('https://static.turi.com/datasets/millionsong/song_data.csv',
column_type_hints = {'year' : int})
song_sf.num_rows()
"""
Explanation: Reading a csv file from an S3 bucket is just one way to import your data into an SFrame. The read_csv function gives you lots of control over where to read your data from and how to parse it, which you can read about <a href="https://turi.com/products/create/docs/generated/graphlab.SFrame.read_csv.html#graphlab.SFrame.read_csv">here</a>. The column_type_hints option is important to highlight though. By default, SFrame tries to infer the types of the values it is parsing and usually does well, but providing a hint for the type of a column ensures it is parsed the way you intend. If type inference fails on a particular column, SFrame will simply interpret that column as a string. Here, only the year column is of type int, while the rest are strings.
(The csv file of song metadata comes from the Million Song Dataset. This data set was used for a Kaggle challenge and includes data from The Echo Nest, SecondHandSongs, musiXmatch, and Last.fm.)
End of explanation
"""
#song_sf = gl.SFrame.read_csv('https://static.turi.com/datasets/millionsong/song_data.csv', header=False,
# column_type_hints = {'X5' : int})
#song_sf.head(1)
"""
Explanation: If the csv file we want to read does not have a header, we can still provide column_type_hints, but with GraphLab's default column names. Below is the code that would accomplish this, but I have commented it out because I don't want to affect the dataset we work with in the rest of this tutorial.
End of explanation
"""
song_sf.save('orig_song_data')
"""
Explanation: Before we start playing with this data, I want to highlight that you can save and load an SFrame for later use. This is great if you don't want to re-download a file from S3 a bunch of times, or re-parse a large csv file. Here's how to save to your current directory:
End of explanation
"""
song_sf = gl.load_sframe('orig_song_data')
"""
Explanation: That save operation takes some time because it copies the files SFrame uses to the given location (in this case, an auto-created directory called 'orig_song_data'). The load operation, however, is instantaneous. This is one of the perks of using a disk-backed dataframe.
End of explanation
"""
song_sf.head(5)
song_sf.tail(5)
song_sf.num_rows(), len(song_sf)
song_sf.num_cols()
song_sf.column_names()
song_sf.column_types()
"""
Explanation: Viewing data
I can emit several commands to see that we are working with a fairly tame dataset. After all, we only have five columns.
End of explanation
"""
year_i_was_born = 1988
# Count the number of words in each song title and add the word count as a new feature
song_sf['title_length'] = song_sf['title'].apply(lambda x: len(x.split()))
# Count how old I was when this song came out
song_sf.add_column(song_sf.select_column('year').apply(lambda x: x - year_i_was_born),
'how_old_was_i')
# Add a 0 rating for every song
song_sf['my_rating'] = 0
song_sf.head(5)
"""
Explanation: Modifying an SFrame
Alright, I want a little more out of this SFrame. I want to add a few columns. Let's say I care about the length of the title of each song, what I've rated the song, and how many years old I was when the song was created.
End of explanation
"""
song_sf['my_rating'] = 1
song_sf.head(5)
"""
Explanation: Clearly songs with a '0' year are a problem, but we'll cover that later.
A few things to cover from the snippet above:
I can either use 'add_column'/'select_column', or just use python's index syntax to complete the same task.
It is easy to create new columns from an existing one by using 'apply' to apply a function to each element.
If you want a column to have the same value for every entry, just assign a single value to it. We can do this with existing columns as well:
End of explanation
"""
song_sf[['dumb_col','dumb_col2']] = [song_sf['title_length'],song_sf['my_rating']]
song_sf.head(5)
"""
Explanation: We can also add several columns at a time:
End of explanation
"""
song_sf.rename({'dumb_col2' : 'another_dumb_col'})
song_sf.swap_columns('dumb_col', 'another_dumb_col')
del song_sf['dumb_col']
del song_sf['another_dumb_col']
song_sf.head(5)
"""
Explanation: But maybe that was a dumb idea. Let's get rid of those. Before I do that, I'll show you how to rename and swap column ordering. Why not?
End of explanation
"""
song_sf.column_types()
"""
Explanation: Still with me? Notice that the column types for the transformed columns are correct.
End of explanation
"""
song_sf['my_rating'] = song_sf['my_rating'].astype(float)
song_sf.column_types()
"""
Explanation: Hold on though, I think I'd actually like the rating to be a float.
End of explanation
"""
song_sf['love_count'] = song_sf[['release', 'title', 'artist_name']].apply(
lambda row: sum(x.lower().split().count('love') for x in row.values()))
song_sf.topk('love_count').head(5)
"""
Explanation: To create even more interesting feature columns, you may want to apply a function using multiple (or all) columns. When you apply a function to an SFrame (instead of just an SArray like I did earlier), the input to the function is a dictionary where the keys are your column names. Here I'd like to know what combination of song title, album title, and artist name mentions the word 'love' the most:
End of explanation
"""
year_count = song_sf.groupby('year', gl.aggregate.COUNT)
print year_count.head()
print "Number of unique years: " + str(len(year_count))
print "Number of invalid years: "
year_count.topk('year', reverse=True, k=1)
"""
Explanation: We can see from these examples that adding and deleting columns is a simple task for an SFrame. This is because an SFrame is essentially the keeper of references to columns (SArrays), so adding and deleting columns is a very cheap operation. However, the fact that SFrames store their data on disk produces some important limitations when thinking about editing an SFrame:
SFrames are immutable with respect to column size and data.
SFrames do not support random access of elements and are not indexed.
Sequential access is king on disk, and this is very useful to remember when working with SFrames. This means that inspecting a specific row would perform quite poorly and writing to a specific row is not possible. However, while working with SFrames you'll find that you can still accomplish nearly all of what you would have done with a more classic dataframe using transform and filter operations, yet you'll still reap the huge benefit of creating SFrames that are larger than the size of your machine's memory. So let's learn about filtering!
Filtering and Missing Values
I think I want to take care of those invalid year entries now. I don't really know how many there are, so I'll find out, as the answer to that may change what I do.
End of explanation
"""
song_sf['year'] = song_sf['year'].apply(lambda x :None if x == 0 else x)
song_sf.head(5)
"""
Explanation: Yikes, that's almost half of my dataset. Maybe I don't want to just get rid of that data. SFrames support missing values, and these are represented using 'None' . We will transform the apporpriate values to missing here:
End of explanation
"""
song_sf['how_old_was_i'] = song_sf['year'].apply(lambda x : x - year_i_was_born)
song_sf.head(5)
"""
Explanation: To show that normal operations work on columns with missing values, we will do the 'how_old_was_i' transformation again.
End of explanation
"""
song_sf_valid_years = song_sf[song_sf['year'] > 0]
print "Length of trimmed data: " + str(len(song_sf_valid_years))
song_sf_valid_years.head(5)
"""
Explanation: However, if I actually did want to filter out these missing values, that is easy too.
End of explanation
"""
tmp = song_sf['year'] > 0
tmp
"""
Explanation: What I'm showing off here is that we can filter an SFrame by an SArray, where only the SFrame rows that correspond to the given SArray entries evaluating to True will make it through the filter. This happens when an SArray is given as the index of an SFrame. Furthermore, we can create a new SArray from an existing one by using any of the comparison operators. To execute this filter we did both of these things, but you can do them in isolation as well. Here I show the resulting SArray from running the '> 0' operation in isolation:
End of explanation
"""
my_fav_hs_songs = song_sf[((song_sf['artist_name'] == 'Relient K')
| (song_sf['artist_name'] == 'Streetlight Manifesto'))
& (song_sf['how_old_was_i'] >= 14) & (song_sf['how_old_was_i'] <= 18)]
my_fav_hs_songs
"""
Explanation: Keep in mind that the SArray must be the same length of the SFrame in order to filter. This also works with more complicated, chained filters with logical operators. Here's a list of songs that came out while I was in high school by a couple of my favorite bands in that period of my life:
End of explanation
"""
# Look at lots of descriptive statistics of title_length
print "mean: " + str(song_sf['title_length'].mean())
print "std: " + str(song_sf['title_length'].std())
print "var: " + str(song_sf['title_length'].var())
print "min: " + str(song_sf['title_length'].min())
print "max: " + str(song_sf['title_length'].max())
print "sum: " + str(song_sf['title_length'].sum())
print "number of non-zero entries: " + str(song_sf['title_length'].nnz())
"""
Explanation: That's not all of them, but that's a pretty decent selection for a dataset of a million songs. Notice that I had to use the bitwise operators instead of the 'and'/'or' keyword. Python does not allow the overloading of logical operators, so remember to use the bitwise ones.
Descriptive Statistics
The descriptive statistics below are operations done on the SArray, and cannot be done on the SFrame.
End of explanation
"""
approx_sketch = song_sf['title_length'].sketch_summary()
print approx_sketch
"""
Explanation: We can accomplish essentially the same thing by getting a sketch_summary on this column. This will give the exact values of the descriptive statistics I asked for above, and then give approximate values of some other useful stuff like quantiles and counts of unique values. These values are approximate because performing the real operation on a dataset that is larger than your memory size could exhaust your memory or take too long to compute. Each operation has well-defined bounds on how wrong the answer will be, which are listed in our <a href="https://turi.com/products/create/docs/generated/graphlab.Sketch.html">API Reference</a>.
End of explanation
"""
print approx_sketch.quantile(.25)
print approx_sketch.quantile(.75)
print approx_sketch.quantile(.993)
print approx_sketch.quantile(.995)
print approx_sketch.quantile(.997)
"""
Explanation: Saving the return value from sketch_summary gives you a graphlab.Sketch object, which can be queried further (details <a href="https://turi.com/products/create/docs/generated/graphlab.Sketch.html">here</a>). Here, I can drill deeper into those quantiles:
End of explanation
"""
top10_titles = song_sf.topk('title_length')
top10_titles
top10_titles['title'][0]
"""
Explanation: But wow...47 words?!? I gotta see what that song is.
End of explanation
"""
song_sf.topk('title_length', k=5, reverse=True)
"""
Explanation: Makes sense...looks like a song with several movements. I'm somewhat curious about the titles with no words too.
End of explanation
"""
before_i_was_born = song_sf['how_old_was_i'] < 0
before_i_was_born.all(), before_i_was_born.any()
"""
Explanation: Here are a couple boolean operations too, with which I can prove that there were, in fact, songs before I was born. Just not all of them.
End of explanation
"""
song_sf.groupby(['artist_name', 'release'], {'num_songs_in_album' : gl.aggregate.COUNT}).topk('num_songs_in_album')
"""
Explanation: Perhaps let's try some deeper analysis, like what albums have the most songs?
End of explanation
"""
usage_data = gl.SFrame.read_csv("https://static.turi.com/datasets/millionsong/10000.txt", header=False, delimiter='\t', column_type_hints={'X3':int})
usage_data.rename({'X1':'user_id', 'X2':'song_id', 'X3':'listen_count'})
"""
Explanation: Our groupby function only supports aggregation after grouping. The aggregation functions you can use are listed <a href="https://turi.com/products/create/docs/generated/graphlab.data_structures.html#module-graphlab.aggregate">here</a>.
You can only go so far in analyzing this data though. We might want to match this data with user information, like how many times a certain person played one of these songs. For that, we need to the join function, but first we need to read this data in as an SFrame:
End of explanation
"""
num_users = len(usage_data['user_id'].unique())
print num_users
fav_hs_listen_data = my_fav_hs_songs.join(usage_data, 'song_id')
num_fav_hs_users = len(fav_hs_listen_data['user_id'].unique())
print num_fav_hs_users
print float(num_fav_hs_users) / float(num_users)
"""
Explanation: I could just join the listen data with the song data, but maybe I'll do something a bit more interesting. Let's find out how many users from this dataset have listened to any one of those songs from my high school times, compared to the total number of users. First we need the total number of users:
End of explanation
"""
most_popular = fav_hs_listen_data.groupby(['song_id'], {'total_listens':gl.aggregate.SUM('listen_count'),
'num_unique_users':gl.aggregate.COUNT('user_id')})
most_popular.join(song_sf, 'song_id').topk('total_listens',k=20)
"""
Explanation: That's really small. Those other people don't know what they're missing. Maybe the small proportion is simply because I'm only using a list of 42 songs. For kicks, what is the most popular song of that set of songs?
End of explanation
"""
me = gl.SFrame({'user_id':['evan'],'song_id':['SOSFAVU12A6D4FDC6A'],'listen_count':[4000]})
usage_data = usage_data.append(me)
fav_hs_listen_data = my_fav_hs_songs.join(usage_data, 'song_id')
most_popular = fav_hs_listen_data.groupby(['song_id'], {'total_listens':gl.aggregate.SUM('listen_count'),
'num_unique_users':gl.aggregate.COUNT('user_id')})
most_popular.join(song_sf, 'song_id').topk('total_listens',k=20)
"""
Explanation: ...and only 5 even got listens, but "Keasbey Nights" wins from this small subset. Now, suppose I was a cheater and wanted to make this look a little better? I'll pretend I am so you can see 'append' in action.
End of explanation
"""
# Randomly split data rows into two subsets
first_set, second_set = song_sf.random_split(0.8, seed = 1)
first_set.num_rows(), second_set.num_rows()
"""
Explanation: Splitting and Sampling
We're almost done with the tour of features. For easy splitting into training and test sets, we have the random_split function:
End of explanation
"""
songs_before = song_sf[song_sf['how_old_was_i'] < 0]
songs_after = song_sf[song_sf['how_old_was_i'] >= 0]
songs_before.num_rows(), songs_after.num_rows()
"""
Explanation: If you want to split on a predicate though, you'll have to do that manually.
End of explanation
"""
pct37 = song_sf.sample(.37)
pct37.num_rows()
"""
Explanation: We can also get a random sample of the dataset.
End of explanation
"""
sa = gl.SArray([1,2,3])
sa2 = sa * 2
print sa2
"""
Explanation: Other Cool Features
SArrays support lots of mathematical operations. They can be performed with a scalar
End of explanation
"""
add = sa + sa2
div = sa / sa2
print add
print div
"""
Explanation: ...or they can be performed element-wise with another SArray.
End of explanation
"""
for i in song_sf:
if i['title_length'] >= 45:
print "Whoa that's long!"
"""
Explanation: You can also iterate over SArrays and SFrames. When iterating over an SFrame, the returned element is a Python dictionary.
End of explanation
"""
song_sf.save('new_song_data.csv', format='csv')
"""
Explanation: Saving Our Work
I think I'm done exploring this dataset, but I'd like to save it for later. There's a couple ways I can do this. I can save it to a csv:
End of explanation
"""
song_sf.save('new_song_data_sframe')
"""
Explanation: Or I can just save it as an SFrame as I showed earlier.
End of explanation
"""
# In order to save to S3, you will need to use your own bucket and your own credentials.
# You can set your AWS credentials using the below function:
# graphlab.aws.set_credentials(<access_key_id>, <secret_access_key>)
#song_sf.save('https://static.turi.com/datasets/my_sframes/new_song_sframe') # S3://<bucket-name>/<file-path>
"""
Explanation: And of course, we can do all of this on S3. Note that if you download this notebook and run it, you won't be able to save to our datasets bucket. Simply set your AWS credentials and uncomment the code below (replacing our S3 bucket with yours) to see this in action.
End of explanation
"""
# The below will download about 78 MB.
#hello_again = gl.load_sframe('https://static.turi.com/datasets/my_sframes/new_song_sframe')
"""
Explanation: Now to load an SFrame back, we use the handy 'load_sframe' function like before. This takes the name of the sframe's top level directory:
End of explanation
"""
|
wdbm/Psychedelic_Machine_Learning_in_the_Cenozoic_Era | TensorFlow_introduction.ipynb | gpl-3.0 | import tensorflow as tf
print('TensorFlow version:', tf.__version__)
"""
Explanation: TensorFlow introduction: the art of the sesh
This introduction seeks to broach a few basic topics in TensorFlow: what it is, how operations and data are defined for its computational graphs and how its operations are visualized. In doing this, some basic examples are shown, involving linear regression and basic optimizer usage.
What the shit is TensorFlow?
TensorFlow is an open source software library for numerical computation using data flow graphs. In a data flow graph, nodes represent mathematical operations and edges represent the multidimensional data arrays (tensors) communicated between them.
TensorFlow is usually used in Python, though in the background it is using hardcore efficient code to parallelize its calculations a lot and is well-suited to GPU hardware. The Python convention to import TensorFlow is as follows:
End of explanation
"""
tf.TF_CPP_MIN_LOG_LEVEL = 3
"""
Explanation: It can be helpful to hide some TensorFlow logging messages:
End of explanation
"""
node_1 = tf.constant(3.0, tf.float32)
node_2 = tf.constant(4.0) # (also tf.float32 by default)
print("node_1: {node}".format(node=node_1))
print("node_2: {node}".format(node=node_2))
"""
Explanation: tensors, ranks, shapes and types
The central unit of data in TensorFlow is the tensor, in the sense of it being an array of some arbitrary dimensionality. A tensor of rank 0 is a scalar, a tensor of rank 1 is a vector, a tensor of rank 2 is a matrix, a tensor of rank 3 is a 3-tensor, and so on.
|rank|mathamatical object|shape |example |
|--------|-----------------------|-----------|----------------------------------|
|0 |scalar |[] |3 |
|1 |vector |[3] |[1. ,2., 3.] |
|2 |matrix |[2, 3] |[[1., 2., 3.], [4., 5., 6.]] |
|3 |3-tensor |[2, 1, 3]|[[[1., 2., 3.]], [[7., 8., 9.]]]|
|n |n-tensor |... |... |
The various number types that TensorFlow can handle are as follows:
|data type|Python type|description |
|-------------|-----------|----------------------|
|DT_FLOAT |t.float32|32 bits floating point|
|DT_DOUBLE |t.float64|64 bits floating point|
|DT_INT8 |t.int8 |8 bits signed integer |
|DT_INT16 |t.int16 |16 bits signed integer|
|DT_INT32 |t.int32 |32 bits signed integer|
|DT_INT64 |t.int64 |64 bits signed integer|
TensorFlow mechanics: computational graphs and nodes
TensorFlow programs are defined as computational graphs. For TensorFlow, a computational graph is a series of TensorFlow operations arranged in a graph of nodes. A node takes zero or more tensors as inputs and produces a tensor as an output. Generally, a TensorFlow program consists of sections like these:
1 Build a graph using TensorFlow operations.
2 Feed data to TensorFlow and run the graph.
3 Update variables in the graph and return values.
A simple TensorFlow node is a constant. It takes no inputs and simply outputs a value that it stores internally. Here are some constants:
End of explanation
"""
sesh = tf.Session()
sesh.run([node_1, node_2])
"""
Explanation: The printouts of the nodes do not evaluate the outputs the nodes would produce, but show simply what the nodes would evaluate. To evaluate nodes, the computational graph is run in an encapsulation of the control and state of the TensorFlow runtime called a TensorFlow "session".
session
A Session is a class for running TensorFlow operations. A session object encapsulates the environment in which operations are executed and tensors are evaluated. For example, sesh.run(c) evaluates the tensor c.
A session is run using its run method:
Python
tf.Session.run(
fetches,
feed_dict = None,
options = None,
run_metadata = None
)
This method runs operations and evaluates tensors in fetches. It returns one epoch of TensorFlow computation, by running the necessary graph fragment to execute every operation and evaluate every tensor in fetches, substituting the values in feed_dict for the corresponding input values. The fetches option can be a single graph element, or an arbitrary nested list, tuple, namedtuple, dict or OrderedDict containing graph elements at its leaves. The value returned by run has the same shape as the fetches argument, where the leaves are replaced by the corresponding values returned by TensorFlow.
So, those constant nodes could be evaluated like this:
End of explanation
"""
node_3 = tf.add(node_1, node_2)
node_3
sesh.run(node_3)
"""
Explanation: More complicated nodes than constants are operations. For example, two constant nodes could be added:
End of explanation
"""
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a+b # + provides a shortcut for tf.add(a, b)
adder_node
"""
Explanation: This is, of course, a trivial mathematical operation, but it has been performed using very computationally efficient infrastructure. Far more complicated operations can be encoded in a computational graph and run using TensorFlow.
placeholders
A computational graph can be parameterized to accept external inputs. These entry points for data are called placeholders.
So, let's create some placeholders that can hold 32 bit floating point numbers and let's also make a node for the addition operation applied to these placeholders:
End of explanation
"""
print(sesh.run(adder_node, {a: 3, b: 4}))
"""
Explanation: The feed_dict parameter of a session run method is used to input data to these placeholders:
End of explanation
"""
print(sesh.run(adder_node, {a: [3, 4], b: [5, 6]}))
"""
Explanation: The same can be done with multiple values:
End of explanation
"""
add_and_triple = adder_node * 3.
sesh.run(add_and_triple, {a: 3, b: 4})
"""
Explanation: You can start to see now how parallelism is core to TensorFlow.
Further nodes can be added to the computational graph easily:
End of explanation
"""
W = tf.Variable([ .3], dtype=tf.float32)
b = tf.Variable([- .3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W*x+b
"""
Explanation: variables
Variables are nodes that have values that can change. These are used to have variable values in models, to make models trainable. A variable is defined with a type and an initial value.
We can make a linear model featuring changable variables like this:
End of explanation
"""
sesh = tf.Session()
init = tf.global_variables_initializer()
sesh.run(init)
"""
Explanation: Constants are initialized when they are called and their value doesn't change, but variables are initialized in a TensorFlow program using a special operation:
End of explanation
"""
print(sesh.run(linear_model, {x: [1, 2, 3, 4]}))
"""
Explanation: Since x is a placeholder, this linear model can be evaluated for several x values in parallel:
End of explanation
"""
weights = tf.Variable(
tf.random_normal(
[784, 200],
stddev=0.35
),
name = "weights"
)
with tf.Session() as sesh:
sesh.run(tf.global_variables_initializer())
print(sesh.run(weights))
"""
Explanation: A large number of values can be stored in a variable easily, like this:
End of explanation
"""
a = tf.Variable(10, dtype=tf.float32)
with tf.Session() as sesh:
sesh.run(tf.global_variables_initializer())
print("initial variable value: {value}".format(value = sesh.run(a)))
sesh.run(tf.assign(a, 20))
print("reassigned variable value: {value}".format(value = sesh.run(a)))
"""
Explanation: The value of a variable can be changed using operating like tf.assign:
End of explanation
"""
W = tf.Variable([ .3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W*x+b
"""
Explanation: loss function
A loss function measures how far a model is from provided data. For a linear regression model, a standard loss function is the sums of the squares of the deltas between the current model and the provided data.
So, here we create a linear model:
End of explanation
"""
y = tf.placeholder(tf.float32)
"""
Explanation: We create a placeholder for our target values:
End of explanation
"""
loss = tf.reduce_sum(tf.square(linear_model-y))
"""
Explanation: We create the loss function for the model:
End of explanation
"""
with tf.Session() as sesh:
sesh.run(tf.global_variables_initializer())
results = sesh.run(
loss,
{
x: [1, 2, 3, 4],
y: [0, -1, -2, -3]
}
)
print(results)
"""
Explanation: We launch a TensorFlow session, initialize graph variables (W, b), specify the target values (y), specify the model parameters to try (x) and run:
End of explanation
"""
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(loss)
with tf.Session() as sesh:
sesh.run(tf.global_variables_initializer())
for i in range(1000):
sesh.run(
train,
{
x: [1, 2, 3, 4],
y: [0, -1, -2, -3]
}
)
print(sesh.run([W, b]))
"""
Explanation: In machine learning, a simple linear model like this would be modified automatically by changing the variables W and b to try to find good model parameters. In this example, ideal values would be W = -1 and b = 1, which would result in the loss function being 0.
training optimizers
Optimizers change variables in models in order to minimize loss functions. There is a lot of study ongoing on optimizers and there are many types. A simple optimizer is gradient descent. It modifies each variable according to the magnitude of the derivative of loss with respect to that variable.
Let's see the gradient descent optimize our linear regression model. We can do this by defining the optimizer (including its learning rate), defining what it is trying to minimize and then running that minimization in TensorFlow (instead of just running the loss function):
End of explanation
"""
import tensorflow as tf
tf.reset_default_graph()
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
W = tf.Variable([ .3], dtype = tf.float32)
b = tf.Variable([-.3], dtype = tf.float32)
linear_model = W*x+b
loss = tf.reduce_sum(tf.square(linear_model-y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(loss)
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
with tf.Session() as sesh:
sesh.run(tf.global_variables_initializer())
for i in range(1000):
sesh.run(
train,
{
x: x_train,
y: y_train
}
)
current_W, current_b, current_loss = sesh.run(
[W, b, loss],
{
x: x_train,
y: y_train
}
)
print('W: {W}, b: {b}, loss: {loss}'.format(W=current_W, b=current_b, loss=current_loss))
"""
Explanation: Here we can see the linear model parameters that resulted from this gradient descent minimization, and they are pretty close to -1 and -1, which is cool. Tous en choeur maintenant, the code is as follows:
End of explanation
"""
|
QFinancier/blog | give_me_data_or_death/give_me_good_data_or_give_me_death.ipynb | mit | import pandas as pd
import numpy as np
#create sizeable dataset
n_obs = 1000000
idx = pd.date_range('2015-01-01', periods=n_obs, freq='L')
df = pd.DataFrame(np.random.randn(n_obs,4), index=idx,
columns=["Open", "High", "Low", "Close"])
df.head()
"""
Explanation: Give me good data, or give me death
A good discussion not to long ago led me to start a revolution against some data management aspects of my technology stack. Indeed it is one of the areas where the decisions made will impact every project undertaken down the road. Time is one of our most valuable ressources and we need to minimize the amount of it we have to spend dealing with data issues. Messy and/or hard to use data is the greatest drag I have encountered when trying to produce research.
I had to keep a couple things in mind when deciding on a solution. First, I knew I did not want to depend on any database software. I also knew that I would not be the only one using that data and that although I use Python, other potential users still don't know better and use R. The ideal solution would be as close to language agnostic as possible. Furthermore, I wanted a solution stable enough that I did not have to worry too much about backward compatibility in case of future upgrade.
With those guidelines in mind, I could start to outline what the process would look like:
1. Fetch data from vendor (csv form)
2. Clean the data
3. Write the data on disk
The biggest decision I had to make at this stage was the format used to store the data. Based on the requirements listed above, I shortlisted a few formats that I tought would fit my purpose: csv, json, hdf5, and msgpack.
At this stage I wanted to get a feel for the performance of each of the options. In order to do that I created a simple dataset of 1M millisecond bars so 4M observations.
End of explanation
"""
%timeit df.to_csv("csv_format")
%timeit df.to_json("json_format")
%timeit df.to_hdf("hdf_format", "df", mode="w")
%timeit df.to_msgpack("msgpack_format")
"""
Explanation: Let's now see how they perform for writing.
End of explanation
"""
%timeit pd.read_csv("csv_format")
%timeit pd.read_json("json_format")
%timeit pd.read_hdf("hdf_format", "df")
%timeit pd.read_msgpack("msgpack_format")
"""
Explanation: And finally let's have a look at their read performance.
End of explanation
"""
|
eds-uga/csci1360-fa16 | assignments/A1/A1_Q5.ipynb | mit | import numpy as np
def magic():
return np.random.randint(0, 10)
def how_many_loops(stop_val):
loops = 0
### BEGIN SOLUTION
### END SOLUTION
return loops
np.random.seed(3849)
s1 = 5
l1 = 6
assert l1 == how_many_loops(s1)
np.random.seed(895768)
s2 = 3
l2 = 20
assert l2 == how_many_loops(s2)
"""
Explanation: Q5
In this question, we'll work exclusively with while loops.
A
In the code below, there's a function named how_many_loops that takes one argument. That argument is a number (an integer); specifically, a stopping criterion. The other function, magic, returns a random number between 0 and 10 (both inclusive).
Your goal is to write a while loop that counts how many times it needed to run before the magic function magic returned the same number as stop_val. Store that count in the loops variable and return it.
End of explanation
"""
def first_negative(inlist):
index = 0
### BEGIN SOLUTION
### END SOLUTION
return index
import numpy as np
np.random.seed(85435)
i1 = np.random.randint(-50, 50, 10).tolist()
assert 0 == first_negative(i1)
np.random.seed(9893743)
i2 = np.random.randint(-25, 75, 10000).tolist()
assert 4 == first_negative(i2)
"""
Explanation: B
In this question, you'll loop over a list, looking for the first negative number. When you find a negative number, you'll return the index of that element in the list.
So if my input list is [3, -1, 14, -2], I would return 1. If my list is [13, 68, 2, -4, 6], I would return 3. Store this value in the variable index.
End of explanation
"""
def first_negative_limit(inlist, limit):
index = 0
### BEGIN SOLUTION
### END SOLUTION
return index
import numpy as np
np.random.seed(85435)
i1 = np.random.randint(-50, 50, 10).tolist()
assert 0 == first_negative_limit(i1, 10)
np.random.seed(9893743)
i2 = np.random.randint(-25, 75, 10000).tolist()
assert 3 == first_negative_limit(i2, 3)
"""
Explanation: C
Often when you're testing some boolean expression, the expression itself doesn't consist of just one check; it can consist of many conditions you're testing simultaneously. For example, just in Part B above, what would have happened if none of the numbers I passed in were negative? Rather than simply looping until you found a negative number (which would have gone on forever or until the program crashed), you could look for a negative number while also counting how many numbers you've looked at, and "giving up" after a certain number of checks to prevent going on forever.
You can string multiple boolean conditions together with the keyword and.
while x < 5 and x > 0:
In doing so, you're telling the loop to continue only as long as both conditions are True; once even one of them changes to False, break the loop.
In this problem, you'll fill in the code below for a new version of the Part B code that also maintains a counter to see how many checks it's made. If the number of checks surpass a given limit, the loop ends and the current value of index is returned.
In summary: you'll do the same thing as before, but check two conditions in your while loop: if the number you're looking at is negative, and if you've checked fewer than limit numbers, then return that index as before. But if you exceed the check limit, then just return whatever the value of index is when the loop ends.
End of explanation
"""
|
rgarcia-herrera/sistemas-dinamicos | human_immune.ipynb | gpl-3.0 | # Para hacer experimentos numéricos importamos numpy
import numpy as np
# y biblioteca para plotear
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
# cómputo simbólico con sympy
from sympy import *
# init_printing(use_latex='matplotlib') # en emacs
init_printing()
"""
Explanation: Human immune response to infectious disease
Unos modelos dinámicos.
End of explanation
"""
Lamda1, Lamda2, mu1, mu2, a1, a2, b1, b2, E1, E2 = symbols('Lamda1 Lamda2 mu1 mu2 a1 a2 b1 b2 E1 E2')
dE1 = Lamda1 - mu1 * E1 + (a1 * E1 * E2)/(1 + b1 * E1 * E2)
dE1
dE2 = Lamda2 - mu2 * E2 + (a2 * E1 * E2)/(1 + b2 * E1 * E2)
dE2
"""
Explanation: Linfocitos sin virus
End of explanation
"""
# resolver dE2 para E1, viceversa
e1 = solve(dE2, E1)[0]
e2 = solve(dE1, E2)[0]
e1, e2
# sustituir una de las expresiones de la celda anterior en una de las expresiones originales, resolver
static_e1 = solve(e1.subs(E2, e2), E1)
static_e1
"""
Explanation: Búsqueda de Equilibrios
Abuso del cómputo simbólico
Los objetos creados dE1 y dE2 contienen operaciones representadas simbólicamente que conjugan todas las variables del sistema. Imprimirlas reconstruye las ecuaciones en su notación matemática, a partir de las expresiones simbólicas de Python. Para lograrlo hay que declararle a Python cuáles símbolos componen las expresiones matemáticas. Esto permite hacer operaciones simbólicas sobre las expresiones, como resolver, derivar, sustituir.
Todas las magnitudes del modelo se representan en las ecuaciones por símbolos, como $\Lambda_{1}, \mu_{2}, a{1}$, etc.
Por otro lado, las expresiones simbólicas dE1 y dE2 se pueden convertir en funciones ejecutables, pero estos símbolos no son argumentos de las funciones dE1 y dE2, se asume que serán constantes y que las únicas variables con cambios serán E1 y E2.
En el intento que sigue de encontrar algebráicamente los equilibrios del sistema, he incluido como símbolos todas esas constantes del sistema. Mi esperanza era usar operaciones de SymPy hasta tener una expresión simbólica que volver ejecutable, y sólo entonces alimentarle valores numéricos al modelo computacional.
Es que es una mala práctica de programación incluir en el código "números mágicos". O sea: números en medio de expresiones matemáticas, sin explicación, e.g.:
a = 3.14159 * r**2
Es mejor estilo escribir
pi = 3.141592
a = pi * r**2
Pero cada símbolo es una variable para SymPy, y el intento de encontrar soluciones al sistema de ecuaciones con tantas variables genera expresiones ilegibles.
End of explanation
"""
# borro las variables previas, para quitarles lo simbólico
#del(Lamda1, Lamda2, mu1, mu2, a1, a2, b1, b2, E1, E2, dE1, dE2)
# variables simples de Python
Lamda1 = 1
Lamda2 = 1
mu1 = 1.25
mu2 = 1.25
a1 = 0.252
a2 = 0.252
b1 = 0.008
b2 = 0.008
# esta vez sólo dos símbolos
E1, E2 = symbols('E1 E2')
dE1 = Lamda1 - mu1 * E1 + (a1 * E1 * E2)/(1 + b1 * E1 * E2)
dE1
dE2 = Lamda2 - mu2 * E2 + (a2 * E1 * E2)/(1 + b2 * E1 * E2)
dE2
"""
Explanation: Esto es una barbaridad, un error. A continuación cómo lo resolví.
Sustituir magnitudes al crear las expresiones simbólicas
Si declaro antes, ya no son números mágicos. Al crear las expresiones se interpolarán los valores numéricos así que las expresiones simbólicas serán simples: SymPy hace la aritmética necesaria para simplificar, lo que le era imposible con tanto símbolo.
End of explanation
"""
e2 = solve(dE1, E2)[0]
e2
solve(dE2.subs({E2: e2}), E1)
"""
Explanation: ¡Mucho mejor! Ahora sí:
Búsqueda de los equilibrios
End of explanation
"""
J = symbols("J")
J = Matrix([[diff(dE1, E1), diff(dE1, E2)],
[diff(dE2, E1), diff(dE2, E2)]])
J
"""
Explanation: Jacobiana
End of explanation
"""
Je1 = J.subs({E1: 1, E2:1})
Je1
Je1.det(), Je1.trace()
Je1.eigenvects()
"""
Explanation: Evaluada en los puntos de equilibrio
(1,1) un sumidero
End of explanation
"""
Je2 = J.subs({E1: 5, E2:5})
Je2
Je2.det(), Je2.trace()
Je2.eigenvects(), Je2.eigenvals()
"""
Explanation: (5, 5) un punto de ensilladura
End of explanation
"""
Je3 = J.subs({E1: 20, E2: 20})
Je3
Je3.det(), Je3.trace()
Je3.eigenvects()
"""
Explanation: (20, 20) otro sumidero
End of explanation
"""
del(Lamda1, Lamda2, mu1, mu2, a1, a2, b1, b2, E1, E2, dE1, dE2)
def dE1(E1, E2):
return Lamda1 - mu1 * E1 + (a1 * E1 * E2)/(1 + b1*E1*E2)
def dE2(E1, E2):
return Lamda2 - mu2 * E2 + (a2 * E1 * E2)/(1 + b2*E1*E2)
Lamda1 = 1
Lamda2 = 1
mu1 = 1.25
mu2 = 1.25
a1 = 0.252
a2 = 0.252
b1 = 0.008
b2 = 0.008
plt.figure(figsize=(16, 7))
s = 1
for eq in [(1,1), (5,5), (20,20)]:
ax = plt.subplot(1, 3, s)
s += 1
ax.set_title("(%s, %s)" % eq, fontsize=13)
i, j = np.meshgrid(np.linspace(eq[0] - 2,
eq[0] + 2, 15),
np.linspace(eq[1] - 2,
eq[1] + 2, 15))
u = dE1(i, j)
v = dE2(i, j)
plt.quiver(i, j, u, v)
"""
Explanation: Campos de vectores
End of explanation
"""
def step(x, y, dt, f, g):
return (x + dt * f(x, y),
y + dt * g(x, y))
def trayectoria(x0, y0, f, g, dt=0.01, steps=100):
x = x0
y = y0
t = list()
for n in range(steps):
t.append((x, y))
x, y = step(x, y, dt, f, g)
return t
def linea(E1_0, E2_0, dt=0.1, steps=300, color='teal'):
E1, E2 = zip(*[coord for coord in
trayectoria(E1_0, E2_0, dE1, dE2, dt, steps)])
plt.plot(E1, E2, color=color)
plt.arrow(E1[-2], E2[-2],E1[-1] - E1[-2], E2[-1] - E2[-2],
color=color, head_width=0.3, head_length=0.3)
i, j = np.meshgrid(np.linspace(-3, 24, 25),
np.linspace(-3, 24, 25))
u = dE1(i, j)
v = dE2(i, j)
fig = plt.figure(figsize=(9,7))
plt.quiver(i, j, u, v)
linea(0,0, dt=0.01)
linea(0,14, dt=0.01)
linea(10,-1, dt=0.01, steps=120)
for N in np.linspace(-0.5, 0.5, 32):
linea(24, N, dt=0.01, color='orange')
for N in np.linspace(-0.5, 0.5, 32):
linea(N, 24, dt=0.01, color='violet')
linea(22, 20, dt=0.01, color='purple')
linea(20, 22, dt=0.01, color='purple')
linea(10, 20, dt=0.01, color='purple')
linea(20, 6, dt=0.01, color='purple')
plt.plot(1, 1, 'teal', marker='o', markersize=10, alpha=0.4)
plt.plot(5, 5, 'orange', marker='o', markersize=10, alpha=0.4)
plt.plot(20, 20, 'purple', marker='o', markersize=10, alpha=0.4)
plt.show()
"""
Explanation: Trayectorias
End of explanation
"""
fig = plt.figure(figsize=(10, 6))
def pop(E1_0, E2_0, steps = 1500):
e1t = [E1_0, ] + np.zeros(steps)
e2t = [E2_0, ] + np.zeros(steps)
dt = 0.01
for t in range(1, steps):
e1t[t] = e1t[t-1] + dt * dE1(e1t[t-1], e2t[t-1])
e2t[t] = e2t[t-1] + dt * dE2(e1t[t-1], e2t[t-1])
plt.plot(range(steps), e1t, color='teal')
plt.plot(range(steps), e2t, color='orange')
pop(2, 3)
pop(4,6)
pop(18,22)
"""
Explanation: Tamaños de población al paso del tiempo
End of explanation
"""
# del(a1, a2, b1, b2, dE1, dE2)
Lamda1 = 1
Lamda2 = 1
mu1 = 1.25
mu2 = 1.25
a1 = 0.252
a2 = 0.252
b1 = 0.008
b2 = 0.008
r = 0.07
k = 0.01
K = 0.05
V, E1, E2 = symbols('V E1 E2')
dE1 = Lamda1 - mu1 * E1 + (a1 * E1 * E2)/(1 + b1 * E1 * E2) + K*V*E1
dE2 = Lamda2 - mu2 * E2 + (a2 * E1 * E2)/(1 + b2 * E1 * E2)
dV = r*V - k*V*E1
J = symbols("J")
J = Matrix([[diff(dE1, E1), diff(dE1, E2), diff(dE1, V)],
[diff(dE2, E1), diff(dE2, E2), diff(dE2, V)],
[diff(dV, E1), diff(dV, E2), diff(dV, V)]])
J
Je1 = J.subs({E1: 1, E2:1, V:0})
Je1
Je1 = J.subs({E1: 5, E2:5, V:0})
Je1
Je1 = J.subs({E1: 20, E2:20, V:0})
Je1
def dE1(E1, E2, V):
return Lamda1 - mu1 * E1 + (a1 * E1 * E2)/(1 + b1*E1*E2) + K*V*E1
def dE2(E1, E2):
return Lamda2 - mu2 * E2 + (a2 * E1 * E2)/(1 + b2*E1*E2)
def dV(V, E1):
return r*V - k*V*E1
from mpl_toolkits.mplot3d import Axes3D
Lamda1 = 1
Lamda2 = 1
mu1 = 1.25
mu2 = 1.25
a1 = 0.252
a2 = 0.252
b1 = 0.008
b2 = 0.008
r = 0.07
k = 0.01
K = 0.05
def pops(e1_0, e2_0, v_0, steps=10000, dt=0.001):
e1t = [e1_0, ] + np.zeros(steps)
e2t = [e2_0, ] + np.zeros(steps)
vt = [v_0, ] + np.zeros(steps)
for t in range(1, steps):
e1t[t] = e1t[t-1] + dt * dE1(e1t[t-1], e2t[t-1], vt[t-1])
e2t[t] = e2t[t-1] + dt * dE2(e1t[t-1], e2t[t-1])
vt[t] = vt[t-1] + dt * dV(e1t[t-1], vt[t-1])
return e1t, e2t, vt
fig = plt.figure(figsize=(14, 13))
ax = fig.gca(projection='3d')
for i in np.linspace(0, 22, 10):
for j in np.linspace(0, 22, 10):
e1t, e2t, vt = pops(i, j, 0.1)
ax.plot(vt, e1t, e2t)
ax.set_xlabel("V")
ax.set_ylabel("$E_{1}$")
ax.set_zlabel("$E_{2}$")
plt.show()
fig = plt.figure(figsize=(10,6))
e1t, e2t, vt = pops(20, 20, 30)
plt.plot(e1t, color='teal')
plt.plot(e2t, color='orange')
plt.plot(vt, color='green')
fig = plt.figure(figsize=(10,6))
e1t, e2t, vt = pops(20, 10, 30)
plt.plot(e1t, color='teal')
plt.plot(e2t, color='orange')
plt.plot(vt, color='green')
fig = plt.figure(figsize=(10,6))
e1t, e2t, vt = pops(2, 1, 30)
plt.plot(e1t, color='teal')
plt.plot(e2t, color='orange')
plt.plot(vt, color='green')
fig = plt.figure(figsize=(10,6))
e1t, e2t, vt = pops(1, 1, 0.1, steps=60000)
plt.plot(e1t, color='teal')
plt.plot(e2t, color='orange')
plt.plot(vt, color='green')
"""
Explanation: En presencia del Virus
End of explanation
"""
|
arviz-devs/arviz | doc/source/user_guide/numpyro_refitting_xr_lik.ipynb | apache-2.0 | import arviz as az
import numpyro
import numpyro.distributions as dist
import jax.random as random
from numpyro.infer import MCMC, NUTS
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import xarray as xr
numpyro.set_host_device_count(4)
"""
Explanation: Refitting NumPyro models with ArviZ (and xarray)
ArviZ is backend agnostic and therefore does not sample directly. In order to take advantage of algorithms that require refitting models several times, ArviZ uses SamplingWrappers to convert the API of the sampling backend to a common set of functions. Hence, functions like Leave Future Out Cross Validation can be used in ArviZ independently of the sampling backend used.
Below there is an example of SamplingWrapper usage for NumPyro.
End of explanation
"""
np.random.seed(26)
xdata = np.linspace(0, 50, 100)
b0, b1, sigma = -2, 1, 3
ydata = np.random.normal(loc=b1 * xdata + b0, scale=sigma)
plt.plot(xdata, ydata)
"""
Explanation: For this example, we will use a linear regression model.
End of explanation
"""
def model(N, x, y=None):
b0 = numpyro.sample("b0", dist.Normal(0, 10))
b1 = numpyro.sample("b1", dist.Normal(0, 10))
sigma_e = numpyro.sample("sigma_e", dist.HalfNormal(10))
numpyro.sample("y", dist.Normal(b0 + b1 * x, sigma_e), obs=y)
data_dict = {
"N": len(ydata),
"y": ydata,
"x": xdata,
}
kernel = NUTS(model)
sample_kwargs = dict(
sampler=kernel,
num_warmup=1000,
num_samples=1000,
num_chains=4,
chain_method="parallel"
)
mcmc = MCMC(**sample_kwargs)
mcmc.run(random.PRNGKey(0), **data_dict)
"""
Explanation: Now we will write the NumPyro Code:
End of explanation
"""
dims = {"y": ["time"], "x": ["time"]}
idata_kwargs = {
"dims": dims,
"constant_data": {"x": xdata}
}
idata = az.from_numpyro(mcmc, **idata_kwargs)
del idata.log_likelihood
idata
"""
Explanation: We have defined a dictionary sample_kwargs that will be passed to the SamplingWrapper in order to make sure that all
refits use the same sampler parameters. We follow the same pattern with {func}az.from_numpyro <arviz.from_numpyro>.
End of explanation
"""
def calculate_log_lik(x, y, b0, b1, sigma_e):
mu = b0 + b1 * x
return stats.norm(mu, sigma_e).logpdf(y)
"""
Explanation: We are now missing the log_likelihood group because we have not used the log_likelihood argument in idata_kwargs. We are doing this to ease the job of the sampling wrapper. Instead of going out of our way to get Stan to calculate the pointwise log likelihood values for each refit and for the excluded observation at every refit, we will compromise and manually write a function to calculate the pointwise log likelihood.
Even though it is not ideal to lose part of the straight out of the box capabilities of PyStan-ArviZ integration, this should generally not be a problem. We are basically moving the pointwise log likelihood calculation from the Stan Code to the Python code, in both cases, we need to manually write the function to calculate the pointwise log likelihood.
Moreover, the Python computation could even be written to be compatible with Dask. Thus it will work even in cases where the large number of observations makes it impossible to store pointwise log likelihood values (with shape n_samples * n_observations) in memory.
End of explanation
"""
log_lik = xr.apply_ufunc(
calculate_log_lik,
idata.constant_data["x"],
idata.observed_data["y"],
idata.posterior["b0"],
idata.posterior["b1"],
idata.posterior["sigma_e"],
)
idata.add_groups(log_likelihood=log_lik)
"""
Explanation: This function should work for any shape of the input arrays as long as their shapes are compatible and can broadcast. There is no need to loop over each draw in order to calculate the pointwise log likelihood using scalars.
Therefore, we can use {func}xr.apply_ufunc <xarray.apply_ufunc> to handle the broadcasting and preserve the dimension names:
End of explanation
"""
calculate_log_lik(
idata.constant_data["x"].values,
idata.observed_data["y"].values,
idata.posterior["b0"].values,
idata.posterior["b1"].values,
idata.posterior["sigma_e"].values
)
"""
Explanation: The first argument is the function, followed by as many positional arguments as needed by the function, 5 in our case. As this case does not have many different dimensions nor combinations of these, we do not need to use any extra kwargs passed to xr.apply_ufunc.
We are now passing the arguments to calculate_log_lik initially as xr.DataArrays. What is happening here behind the scenes is that xr.apply_ufunc is broadcasting and aligning the dimensions of all the DataArrays involved and afterwards passing NumPy arrays to calculate_log_lik. Everything works automagically.
Now let's see what happens if we were to pass the arrays directly to calculate_log_lik instead:
End of explanation
"""
idata
"""
Explanation: If you are still curious about the magic of xarray and apply_ufunc, you can also try to modify the dims used to generate the InferenceData a couple cells before:
dims = {"y": ["time"], "x": ["time"]}
What happens to the result if you use a different name for the dimension of x?
End of explanation
"""
class NumPyroSamplingWrapper(az.SamplingWrapper):
def __init__(self, model, **kwargs):
self.rng_key = kwargs.pop("rng_key", random.PRNGKey(0))
super(NumPyroSamplingWrapper, self).__init__(model, **kwargs)
def sample(self, modified_observed_data):
self.rng_key, subkey = random.split(self.rng_key)
mcmc = MCMC(**self.sample_kwargs)
mcmc.run(subkey, **modified_observed_data)
return mcmc
def get_inference_data(self, fit):
# Cloned from PyStanSamplingWrapper.
idata = az.from_numpyro(mcmc, **self.idata_kwargs)
return idata
class LinRegWrapper(NumPyroSamplingWrapper):
def sel_observations(self, idx):
xdata = self.idata_orig.constant_data["x"]
ydata = self.idata_orig.observed_data["y"]
mask = np.isin(np.arange(len(xdata)), idx)
# data__i is passed to numpyro to sample on it -> dict of numpy array
# data_ex is passed to apply_ufunc -> list of DataArray
data__i = {"x": xdata[~mask].values, "y": ydata[~mask].values, "N": len(ydata[~mask])}
data_ex = [xdata[mask], ydata[mask]]
return data__i, data_ex
loo_orig = az.loo(idata, pointwise=True)
loo_orig
"""
Explanation: We will create a subclass of {class}~arviz.SamplingWrapper. Therefore, instead of having to implement all functions required by {func}~arviz.reloo we only have to implement {func}~arviz.SamplingWrapper.sel_observations (we are cloning {func}~arviz.SamplingWrapper.sample and {func}~arviz.SamplingWrapper.get_inference_data from the {class}~arviz.SamplingWrapper in order to use apply_ufunc instead of assuming the log likelihood is calculated within Stan).
Let's check the 2 outputs of sel_observations.
1. data__i is a dictionary because it is an argument of sample which will pass it as is to model.sampling.
2. data_ex is a list because it is an argument to log_likelihood__i which will pass it as *data_ex to apply_ufunc.
More on data_ex and apply_ufunc integration is given below.
End of explanation
"""
loo_orig.pareto_k[[13, 42, 56, 73]] = np.array([0.8, 1.2, 2.6, 0.9])
"""
Explanation: In this case, the Leave-One-Out Cross Validation (LOO-CV) approximation using Pareto Smoothed Importance Sampling (PSIS) works for all observations, so we will use modify loo_orig in order to make {func}~arviz.reloo believe that PSIS failed for some observations. This will also serve as a validation of our wrapper, as the PSIS LOO-CV already returned the correct value.
End of explanation
"""
pystan_wrapper = LinRegWrapper(
mcmc,
rng_key=random.PRNGKey(7),
log_lik_fun=calculate_log_lik,
posterior_vars=("b0", "b1", "sigma_e"),
idata_orig=idata,
sample_kwargs=sample_kwargs,
idata_kwargs=idata_kwargs
)
"""
Explanation: We initialize our sampling wrapper. Let's stop and analyze each of the arguments.
We use the log_lik_fun and posterior_vars argument to tell the wrapper how to call {func}~xarray:xarray.apply_ufunc. log_lik_fun is the function to be called, which is then called with the following positional arguments:
log_lik_fun(*data_ex, *[idata__i.posterior[var_name] for var_name in posterior_vars]
where data_ex is the second element returned by sel_observations and idata__i is the InferenceData object result of get_inference_data which contains the fit on the subsetted data. We have generated data_ex to be a tuple of DataArrays so it plays nicely with this call signature.
We use idata_orig as a starting point, and mostly as a source of observed and constant data which is then subsetted in sel_observations.
Finally, sample_kwargs and idata_kwargs are used to make sure all refits and corresponding InferenceData are generated with the same properties.
End of explanation
"""
loo_relooed = az.reloo(pystan_wrapper, loo_orig=loo_orig)
loo_relooed
loo_orig
"""
Explanation: And eventually, we can use this wrapper to call {func}~arviz.reloo, and compare the results with the PSIS LOO-CV results.
End of explanation
"""
|
wiheto/teneto | docs/tutorial/tctc.ipynb | gpl-3.0 | %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from teneto.communitydetection import tctc
import pandas as pd
data = np.array([[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 1, 2, 1],
[0, 0, 0, 0, 1, 1, 1, 0, 2, 2, 2, 2, 1],
[1, 0, 1, 1, 1, 1, 1, 1, 2, 2, 1, 0, 0], [-1, 0, 1, 1, 0, -1, 0, -1, 0, 2, 1, 0, -1]], dtype=float)
data = data.transpose()
np.random.seed(2019)
data += np.random.uniform(-0.2, 0.2, data.shape)
# Lets have a look at the data
fig, ax = plt.subplots(1)
p = ax.plot(data)
ax.legend(p, [0,1,2,3])
ax.set_xlabel('time')
ax.set_ylabel('amplitude')
print(data.shape)
"""
Explanation: TCTC
Backgorund
TCTC stands for Temporal Communities by Trajectory Clustering.
It is an algorithm designed to find temporal communities on time series data.
The kind of data needed for TCTC are:
Multiple time series.
The time series are from nodes in a network.
Most community detection requires to first create an "edge inference" step where the edges of the different nodes are first calculated.
TCTC first finds clusters of trajectories in the time series without inferring edges. A trajectory is a time series moving through some space.
Trajectory clustering tries to group together nodes that have similar paths through a space.
The hyperparameters of TCTC dictate what type of trajectory is found in the data. There are four hyperparameters:
A maximum distance parameter ($\epsilon$). The distance between all nodes part of the same trajectory must be $\epsilon$ or lower.
A minimum size parameter ($\sigma$). All trajectories must include at least $\sigma$ many nodes.
A minimum time parameter ($\tau$). All trajectories must persist for $\tau$ time-points.
A tolerance parameter ($\kappa$). $\kappa$ consecutive "exception" time-points can exist before the trajectory ends.
Outline
This example shows only how TCTC is run and how the different hyperparameters effect the community detection.
These hyperparameters can be trained (saved for another example).
Read more
TCTC is outlined in more detail in this article
TCTC - example
We will start by generating some data and importing everything we need.
End of explanation
"""
parameters = {
'epsilon': 0.5,
'tau': 3,
'sigma': 2,
'kappa': 0
}
tctc_array = tctc(data, **parameters)
print(tctc_array.shape)
"""
Explanation: There are two different outputs that TCTC can produce.
TCTC allows for multilabel communities (i.e. the same node can belong to multiple communities).
The output of TCTC can either be:
As a binary array (dimensions: node,node,time) where each 1 designates that two nodes are in the same community.
As a dataframe where each row is a community.
The default output is option one.
So let us run TCTC on the data we have above.
End of explanation
"""
parameters = {
'epsilon': 0.5,
'tau': 3,
'sigma': 2,
'kappa': 0
}
tctc_df = tctc(data, **parameters, output='df')
print(tctc_df.head())
"""
Explanation: For now ignore the values in the "parameters" dictionary, we will go through that later.
In order to get the dataframe output, just add output='df'.
End of explanation
"""
def community_plot(df, data):
nrows = int(np.ceil((len(df)+1)/2))
fig, ax = plt.subplots(nrows, 2, sharex=True, sharey=True, figsize=(8, 2+nrows))
ax = ax.flatten()
p = ax[0].plot(data)
ax[0].set_xlabel('time')
ax[0].set_ylabel('amplitude')
ax[0].set_title('Original data')
for i, row in enumerate(df.iterrows()):
ax[i+1].plot(data, alpha=0.15, color='gray')
ax[i+1].plot(np.arange(row[1]['start'],row[1]['end']),data[row[1]['start']:row[1]['end'], row[1]['community']],color=plt.cm.Set2.colors[i])
ax[i+1].set_title('Community: ' + str(i))
plt.tight_layout()
return fig, ax
fig, ax = community_plot(tctc_df, data)
"""
Explanation: Here we can see when the different communities start, end, the size, and the length.
Below we define a function which plots each community on the original data.
End of explanation
"""
parameters = {
'epsilon': 1.5,
'tau': 3,
'sigma': 2,
'kappa': 0
}
tctc_df_largeep = tctc(data, **parameters, output='df')
fig, ax = community_plot(tctc_df_largeep, data)
"""
Explanation: The multiple community labels can be seed in 0 and 2 above. Where 2 contains three nodes and community 0 contains 2 nodes.
Changing the hyperparameters
Now we will rerun TCTC but change each of the parameters in turn and then display them on a community plot.
Changing $\epsilon$
If we make $\epsilon$ larger, we will include more time series in a trajectory.
This however can mean that the communities you detect are less "connected" than if $\epsilon$ was smaller
End of explanation
"""
parameters = {
'epsilon': 0.5,
'tau': 2,
'sigma': 2,
'kappa': 0
}
tctc_df_shorttau = tctc(data, **parameters, output='df')
fig, ax = community_plot(tctc_df_shorttau, data)
parameters = {
'epsilon': 0.5,
'tau': 5,
'sigma': 2,
'kappa': 0
}
tctc_df_longtau = tctc(data, **parameters, output='df')
fig, ax = community_plot(tctc_df_longtau, data)
"""
Explanation: Changing $\tau$
If we make $\tau$ larger, it requires that trajectories persist for more time points.
Shorter trajectories increase the change of more noisey connections.
End of explanation
"""
parameters = {
'epsilon': 0.5,
'tau': 3,
'sigma': 3,
'kappa': 0
}
tctc_df_longsigma = tctc(data, **parameters, output='df')
fig, ax = community_plot(tctc_df_longsigma, data)
"""
Explanation: Changing $\sigma$
If we make $\sigma$ larger, it requires that more nodes are part of the trajectory.
Smaller values of $\sigma$ will result in possible noiser connections.
End of explanation
"""
parameters = {
'epsilon': 0.5,
'tau': 3,
'sigma': 2,
'kappa': 1
}
tctc_df_withkappa = tctc(data, **parameters, output='df')
fig, ax = community_plot(tctc_df_withkappa, data)
"""
Explanation: Changing $\kappa$
If we make $\kappa$ larger, it allows for that many number of "noisey" time-points to exist to see if the trajectory continues.
In the data we have been looking at, node 0 and 1 are close to each other except for time-point 7 and 10. If we let $\kappa$ be 1, if will ignore these time-points and allow the trajectory to continue.
End of explanation
"""
|
pybel/pybel-notebooks | BEL to Natural Language.ipynb | apache-2.0 | import sys
import time
import indra
import indra.util.get_version
import ndex2
import pybel
from indra.assemblers.english_assembler import EnglishAssembler
from indra.sources.bel.bel_api import process_pybel_graph
from pybel.examples import sialic_acid_graph
from pybel_tools.visualization import to_jupyter
"""
Explanation: BEL to Natural Language
Author: Charles Tapley Hoyt
Estimated Run Time: 5 seconds
This notebook shows how the PyBEL-INDRA integration can be used to turn a BEL graph into natural language. Special thanks to John Bachman and Ben Gyori for all of their efforts in making this possible.
To view the interactive Javascript output in this notebook, open in the Jupyter NBViewer.
Imports
End of explanation
"""
print(sys.version)
print(time.asctime())
"""
Explanation: Environment
End of explanation
"""
pybel.utils.get_version()
indra.util.get_version.get_version()
"""
Explanation: Dependencies
End of explanation
"""
to_jupyter(sialic_acid_graph)
"""
Explanation: Data
The Sialic Acid graph is used as an example.
End of explanation
"""
pbp = process_pybel_graph(sialic_acid_graph)
"""
Explanation: Conversion
The PyBEL BELGraph instance is converted to INDRA statments with the function process_pybel_graph. It returns an instance of PybelProcessor, which stores the INDRA statments.
End of explanation
"""
stmts = pbp.statements
stmts
"""
Explanation: A list of INDRA statements is extracted from the BEL graph and stored in the field PybelProcessor.statements. Note that INDRA is built to consider mechanistic information, and therefore excludes most associative relationships.
End of explanation
"""
asm = EnglishAssembler(stmts)
print(asm.make_model(), sep='\n')
"""
Explanation: The list of INDRA statements is converted to plain english using the EnglishAssembler.
End of explanation
"""
|
tensorflow/tensorboard | tensorboard/plugins/mesh/Mesh_Plugin_Tensorboard.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Uninstall tensorboard and tensorflow
!pip uninstall -q -y tensorboard
!pip uninstall -q -y tensorflow
# Install nightly TensorFlow with nightly TensorBoard.
!pip install tf-nightly
# Install trimesh lib to read .PLY files.
!pip freeze | grep -qF 'trimesh==' || pip install trimesh
%load_ext tensorboard
import os
import numpy as np
import tensorflow as tf
import trimesh
import tensorboard
from tensorboard.plugins.mesh import summary as mesh_summary
sample_mesh = 'https://storage.googleapis.com/tensorflow-graphics/tensorboard/test_data/ShortDance07_a175_00001.ply'
log_dir = '/tmp/mesh_demo'
batch_size = 1
!rm -rf /tmp/mesh_demo
"""
Explanation: Overview
This is a simple demo of how to use Mesh plugin for TensorBoard. The demo will load static triangulated mesh (in PLY format), create a mesh summary with it and then display in TensorBoard.
Setup Imports
End of explanation
"""
# Camera and scene configuration.
config_dict = {
'camera': {'cls': 'PerspectiveCamera', 'fov': 75},
'lights': [
{
'cls': 'AmbientLight',
'color': '#ffffff',
'intensity': 0.75,
}, {
'cls': 'DirectionalLight',
'color': '#ffffff',
'intensity': 0.75,
'position': [0, -1, 2],
}],
'material': {
'cls': 'MeshStandardMaterial',
'roughness': 1,
'metalness': 0
}
}
# Read all sample PLY files.
mesh = trimesh.load_remote(sample_mesh)
vertices = np.array(mesh.vertices)
# Currently only supports RGB colors.
colors = np.array(mesh.visual.vertex_colors[:, :3])
faces = np.array(mesh.faces)
# Add batch dimension, so our data will be of shape BxNxC.
vertices = np.expand_dims(vertices, 0)
colors = np.expand_dims(colors, 0)
faces = np.expand_dims(faces, 0)
"""
Explanation: Read sample .PLY files
End of explanation
"""
# Create data placeholders of the same shape as data itself.
vertices_tensor = tf.placeholder(tf.float32, vertices.shape)
faces_tensor = tf.placeholder(tf.int32, faces.shape)
colors_tensor = tf.placeholder(tf.int32, colors.shape)
meshes_summary = mesh_summary.op(
'mesh_color_tensor', vertices=vertices_tensor, faces=faces_tensor,
colors=colors_tensor, config_dict=config_dict)
# Create summary writer and session.
writer = tf.summary.FileWriter(log_dir)
sess = tf.Session()
"""
Explanation: Create summaries and session
End of explanation
"""
summaries = sess.run([meshes_summary], feed_dict={
vertices_tensor: vertices,
faces_tensor: faces,
colors_tensor: colors,
})
# Save summaries.
for summary in summaries:
writer.add_summary(summary)
"""
Explanation: Run the model, save summaries to disk
End of explanation
"""
%tensorboard --logdir=/tmp/mesh_demo
"""
Explanation: TensorBoard
End of explanation
"""
|
johnpfay/environ859 | 07_DataWrangling/notebooks/00-Intro-to-NumPy.ipynb | gpl-3.0 | #Create a list of heights and weights
height = [1.73, 1.68, 1.17, 1.89, 1.79]
weight = [65.4, 59.2, 63.6, 88.4, 68.7]
print height
print weight
"""
Explanation: Intro to NumPy
This notebook demonstrates the limitations of Python's built-in data types in executing some scientific analyses.
Source: https://campus.datacamp.com/courses/intro-to-python-for-data-science
First, let's create a dummy datasets of heights and weights of 5 imaginary people.
End of explanation
"""
#[Attempt to] compute BMI from lists
bmi = weight/height ** 2
"""
Explanation: If we assume body mass index (BMI) = weight / height ** 2, what would it take to compute BMI for our data?
End of explanation
"""
#Compute BMI from lists
bmi = []
for idx in range(len(height)):
bmi.append(weight[idx] / height[idx] ** 2)
print bmi
"""
Explanation: The above attempt raises an error because we can't do this with lists.<br>
The only way around this is to iterate through each item in the lists...
End of explanation
"""
#Import numpy, often done using the alias 'np'
import numpy as np
#Convert the height and weight lists to arrays
arrHeight = np.array(height)
arrWeight = np.array(weight)
print arrHeight
print arrWeight
"""
Explanation: However with NumPy, we have access to more data types, specifically arrays, that can speed through this process.
End of explanation
"""
arrBMI = arrWeight / arrHeight ** 2
print arrBMI
"""
Explanation: NumPy arrays allow us to do computations on entire collections...
End of explanation
"""
|
Housebeer/Natural-Gas-Model | .ipynb_checkpoints/Matching Market-checkpoint.ipynb | mit | import random as rnd
class Supplier():
def __init__(self):
self.wta = []
# the supplier has n quantities that they can sell
# they may be willing to sell this quantity anywhere from a lower price of l
# to a higher price of u
def set_quantity(self,n,l,u):
for i in range(n):
p = rnd.uniform(l,u)
self.wta.append(p)
# return the dictionary of willingness to ask
def get_ask(self):
return self.wta
class Buyer():
def __init__(self):
self.wtp = []
# the supplier has n quantities that they can buy
# they may be willing to sell this quantity anywhere from a lower price of l
# to a higher price of u
def set_quantity(self,n,l,u):
for i in range(n):
p = rnd.uniform(l,u)
self.wtp.append(p)
# return list of willingness to pay
def get_bid(self):
return self.wtp
class Market():
count = 0
last_price = ''
b = []
s = []
def __init__(self,b,s):
# buyer list sorted in descending order
self.b = sorted(b, reverse=True)
# seller list sorted in ascending order
self.s = sorted(s, reverse=False)
# return the price at which the market clears
# assume equal numbers of sincere buyers and sellers
def get_clearing_price(self):
# buyer makes a bid, starting with the buyer which wants it most
for i in range(len(self.b)):
if (self.b[i] > self.s[i]):
self.count +=1
self.last_price = self.b[i]
return self.last_price
def get_units_cleared(self):
return self.count
"""
Explanation: Matching Market
This simple model consists of a buyer, a supplier, and a market.
The buyer represents a group of customers whose willingness to pay for a single unit of the good is captured by a vector of prices wta. You can initiate the buyer with a set_quantity function which randomly assigns the willingness to pay according to your specifications. You may ask for these willingness to pay quantities with a get_bid function.
The supplier is similiar, but instead the supplier is willing to be paid to sell a unit of technology. The supplier for instance may have non-zero variable costs that make them unwilling to produce the good unless they receive a specified price. Similarly the supplier has a get_ask function which returns a list of desired prices.
The willingness to pay or sell are set randomly using uniform random distributions. The resultant lists of bids are effectively a demand curve. Likewise the list of asks is effectively a supply curve. A more complex determination of bids and asks is possible, for instance using time of year to vary the quantities being demanded.
Microeconomic Foundations
The market assumes the presence of an auctioneer which will create a book, which seeks to match the bids and the asks as much as possible. If the auctioneer is neutral, then it is incentive compatible for the buyer and the supplier to truthfully announce their bids and asks. The auctioneer will find a single price which clears as much of the market as possible. Clearing the market means that as many willing swaps happens as possible. You may ask the market object at what price the market clears with the get_clearing_price function. You may also ask the market how many units were exchanged with the get_units_cleared function.
Agent-Based Objects
The following section presents three objects which can be used to make an agent-based model of an efficient, two-sided market.
End of explanation
"""
# make a supplier and get the asks
supplier = Supplier()
supplier.set_quantity(100,0,10)
ask = supplier.get_ask()
# make a buyer and get the bids
buyer = Buyer()
buyer.set_quantity(100,0,10)
bid = buyer.get_bid()
# make a market where the buyers and suppliers can meet
# the bids and asks are a list of prices
market = Market(bid,ask)
price = market.get_clearing_price()
quantity = market.get_units_cleared()
# output the results of the market
print("Goods cleared for a price of ",price)
print("Units sold are ", quantity)
"""
Explanation: Example Market
In the following code example we use the buyer and supplier objects to create a market. At the market a single price is announced which causes as many units of goods to be swapped as possible. The buyers and sellers stop trading when it is no longer in their own interest to continue.
End of explanation
"""
|
darothen/py-mie | tutorials/Tutorial.ipynb | mit | import mie
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import seaborn as sns
rc = {
"figure.figsize": (12,6),
"xtick.major.size": 12.0,
"xtick.minor.size": 8.0,
"ytick.major.size": 12.0,
"ytick.minor.size": 8.0,
"axes.linewidth": 1.75,
"xtick.color": '0',
"ytick.color": '0',
"axes.labelcolor": '0'
}
sns.set("notebook", style="ticks", palette='deep', rc=rc, font_scale=1.75)
%matplotlib inline
"""
Explanation: Using the py-mie library
Below is a very quick, hacked together jupyter notebook that details how to use the py-mie python library to make basic Mie calculations for homogeneous and heterogeneous particles.
There are just three functions available in the library:
bhmie_scatter
core_shell_scatter
integrate_mode
End of explanation
"""
# Define the complex refractive index for Black Carbon (Data from Sienfeld and Pandis)
refr_BC = 1.95 + 0.79*1j
# Define the wavelength of incident light (658 nm)
wl = 0.658
# Let's plot over a range of particle diameters
diams = np.logspace(-2, 1, 500)
res = {
"Qabs": np.zeros(len(diams)),
"Qsca": np.zeros(len(diams)),
}
for i, dp in enumerate(diams):
qsca, qabs, _ = mie.bhmie_scatter(
particle_radius=dp/2.,
radiation_lambda=wl,
n_particle=refr_BC)
res["Qabs"][i] = qabs
res["Qsca"][i] = qsca
# Plot
fig, ax = plt.subplots(1, figsize=(14,7))
ax.plot(diams, res["Qabs"], label="$Q_{abs}$", lw=6)
ax.plot(diams, res["Qsca"], label="$Q_{sca}$", lw=6)
ax.set_title("Scattering and Absorption by BC at $\lambda={:.0f}\;nm$".format(wl*1000))
ax.set_ylabel("$\eta$")
ax.set_xlabel("$D_P\;[\mu m]$")
ax.legend(loc='best')
ax.semilogx()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
sns.despine(offset=10)
"""
Explanation: mie.bhmie_scatter(particle_radius, radiation_lambda, n_particle)
The bhmie_scatter function computes the scattering/absoprtion efficiency and assymetry parameter for a homogeneous particle.
The function has three parameters:
particle_radius: total particle radius (core and shell) in microns
radiation_lambda: wavelength of the incident light in microns
n_particle: complex refractive index of the particle material
The function returns three floats:
$Q_{sca}$: Scattering efficiency
$Q_{abs}$: Absoprtion efficiency
$asym$: assymetry parameter for the specified particle
Example: Scattering and Absorption by Homogeneous Black Carbon
End of explanation
"""
# Let's plot over a range of particle diameters
diams = np.logspace(-1, 1, 500)
particles = []
particles.append(("Amm. Sulfate", 1.521 + 0*1j)) # PubChem (589nm)
particles.append(("BC", 1.95 + 0.79*1j))
particles.append(("Sulfuric Acid", 1.4183 + 0*1j)) # CRC (589nm)
particles.append(("Water", 1.331 + 1.64e-8*1j)) # S+P T15.1 (650nm)
fig, ax = plt.subplots(1, figsize=(14,7))
for each in particles:
res = []
for i, dp in enumerate(diams):
qsca, _, _ = mie.bhmie_scatter(dp/2., wl, each[1])
res.append(qsca)
ax.plot(diams, res, label=each[0], lw=6)
ax.set_title("Scattering by Homogeneous Particles at $\lambda={:.0f}\;nm$".format(wl*1000))
ax.set_ylabel("$Q_{sca}$")
ax.set_xlabel("$D_P\;[\mu m]$")
ax.legend(loc='best')
ax.semilogx()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
sns.despine(offset=10)
"""
Explanation: Example: Scattering by Homogeneous Particles of Various Composition
Let's try plotting the scattering by various particles...
End of explanation
"""
core_frac = np.linspace(0.0, 1.0, 5)
refr_SO4 = [
1.53+imag*1j for imag in \
[0.158, 0.057, 0.003, 0.001, 0.001, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.551]
]
refr_BC = 1.95 + 0.79*1j
res = []
fig, ax = plt.subplots(1, figsize=(14,7))
for frac in core_frac:
res = []
for dp in diams:
qsca, qabs, _ = mie.core_shell_scatter(
particle_radius=dp/2.,
core_fraction=frac,
radiation_lambda=wl,
n_core=refr_BC,
n_shell=refr_SO4
)
res.append(qsca)
ax.plot(diams, res, label="Core Frac={:.1f}".format(frac), lw=6)
ax.set_title("Scattering by Heterogeneous BC/SO4 at $\lambda={:.0f}\;nm$".format(wl*1000))
ax.set_ylabel("$Q_{sca}$")
ax.set_xlabel("$D_P\;[\mu m]$")
ax.legend(loc='best')
ax.semilogx()
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.3g"))
sns.despine(offset=10)
"""
Explanation: mie.core_shell_scatter(particle_radius, core_fraction, radiation_lambda, n_shell, n_particle)
The core_shell_scatter function computes the scattering/absoprtion efficiency and assymetry parameter for a heterogeneous, core-shell mixed particle.
The function has five parameters:
particle_radius: total particle radius (core and shell) in microns
core_fraction: the fraction of the particle comprised by it's core (0.0-1.0)
radiation_lambda: wavelength of the incident light in microns
n_core: complex refractive index of the particle core material
n_shell: complex refractive index of the particle shell material
The function returns three floats:
$Q_{sca}$: Scattering efficiency
$Q_{abs}$: Absoprtion efficiency
$asym$: assymetry parameter for the specified particle
Example: Scattering and Absorption by BC Coated with SO4
End of explanation
"""
refr_ammsulf = 1.521 + 0*1j
# Range of Geo. Means
gm_range = np.linspace(0.05, 0.3, 50)
# Range of Geo. Standard Deviations
gsd_range = np.linspace(1.25, 2.5, 50)
val_matrix = np.zeros((len(gm_range), len(gsd_range))) * np.nan
for i, gm in enumerate(gm_range):
for j, gsd in enumerate(gsd_range):
qsca, qabs, _ = mie.integrate_mode(
core_fraction=1,
n_shell=refr_BC,
n_core=refr_ammsulf,
radiation_lambda=wl,
mode_radius=gm,
mode_sigma=gsd
)
val_matrix[i][j] = qsca
# Plot the results
fig, ax = plt.subplots(1, figsize=(12,8))
im = plt.pcolormesh(gm_range, gsd_range, val_matrix.T, cmap="seismic")
ax.set_xlabel("$GM\;[\mu m]$")
ax.set_ylabel("$GSD$")
ax.set_title("Integrated Scattering of Amm. Sulfate at 658 nm", y=1.05)
ax.set_ylim([gsd_range.min(), gsd_range.max()])
plt.colorbar(im, label="$Q_{sca}$")
plt.show()
"""
Explanation: mie.integrate_mode(core_fraction, n_shell, n_core, radiation_lambda, mode_radius, mode_sigma, r_min=1e-3, r_max=100., nr=200)
The integrate_mode function integrates the Mie theory calculation over a lognormal aerosol mode with homogeneous particle properties, weighting by its size distribution.
The function has six required parameters:
core_fraction: the fraction of the particle comprised by it's core (0.0-1.0)
n_core: complex refractive index of the particle core material
n_shell: complex refractive index of the particle shell material
radiation_lambda: wavelength of the incident light in microns
mode_radius: the geometric mean or mode radius of the aerosol size distribution in microns
mode_sigma: the geometric standard deviation of the aerosol size distribution
The function also has three optional parameters:
r_min: the minimum radius to integrate over
r_max: the maximum radius to integrate over
nr: the number of particle radii to use in the integration
The function returns three floats:
$Q_{sca}$: Scattering efficiency
$Q_{abs}$: Absoprtion efficiency
$asym$: assymetry parameter for the specified particle
Example: Scattering of Ammonium Sulfate at 658 nm for Various Size Distributions
End of explanation
"""
|
liganega/Gongsu-DataSci | notebooks/GongSu17-Pandas-tutorial-03.ipynb | gpl-3.0 | import pandas as pd
import matplotlib.pyplot as plt
import numpy.random as np
# 쥬피터 노트북에서 그래프를 직접 나타내기 위해 사용하는 코드
# 파이썬 전문 에디터에서는 사용하지 않음
%matplotlib inline
"""
Explanation: pandas 3
자료 안내:
pandas 라이브러리 튜토리얼에
있는 Lessons for new pandas users의 03-Lesson 내용을 담고 있다.
익명함수(lambda 함수), GroupBy, apply, transform에 대한 설명은 파이썬 튜토리얼,
pandas 튜토리얼과 한빛미디어의 <파이썬 라이브러리를 활용한 데이터 분석>책의 일부이다.
사분위수에 관한 내용은 자유아카데미의 <통계학>책의 일부이다.
End of explanation
"""
# seed 값을 111
np.seed(111)
# 테스트 데이터를 생성하는 함수 정의
def CreateDataSet(Number=1):
Output = []
for i in range(Number):
# 2009년 1월 1일부터 2012년 12월 31일 사이에 있는 월요일에 해당하는 날짜를 생성
rng = pd.date_range(start='1/1/2009', end='12/31/2012', freq='W-MON')
# rng의 길이와 같은 크기의 랜덤한 수에 대한 리스트 만들기
# 이때, 랜덤수는 25와 1000 사이에 있는 정수
data = np.randint(low=25,high=1000,size=len(rng))
# Status에 대한 리스트 만들기
status = [1,2,3]
# rng의 길이와 같은 크기의 랜덤한 statuses 리스트 만들기
random_status = [status[np.randint(low=0,high=len(status))] for i in range(len(rng))]
# State에 대한 리스트 만들기
states = ['GA','FL','fl','NY','NJ','TX']
# rng의 길이와 같은 크기의 랜덤한 states 리스트 만들기
random_states = [states[np.randint(low=0,high=len(states))] for i in range(len(rng))]
Output.extend(zip(random_states, random_status, data, rng))
return Output
"""
Explanation: 분석을 위한 테스트 데이터를 만들어 보자.
End of explanation
"""
dataset = CreateDataSet(4)
df = pd.DataFrame(data=dataset, columns=['State','Status','CustomerCount','StatusDate'])
df.info()
df.head()
"""
Explanation: 위의 함수를 이용하여 테스트 데이터를 만들고, 이를 다시 데이터프레임으로 만들어보자.
End of explanation
"""
df.to_excel('Lesson3.xlsx', index=False)
print('Done')
"""
Explanation: 위의 데이터프레임을 Excel 파일로 저장하자. 이 때 인덱스 값은 원래의 테스트 데이터셋의 일부가 아니기 때문에 저장하지 않는다.
End of explanation
"""
# 파일의 위치
Location = 'Lesson3.xlsx'
# 아래의 코드에서 0은 첫번째 시트를 의미.
# index_col = 'StatusDate'는 StatusDate를 인덱스로 가져오라는 의미
df = pd.read_excel(Location, 0, index_col='StatusDate')
df.dtypes
# 데이터프레임의 인덱스를 확인
df.index
df.head()
"""
Explanation: 1. Excel로부터 데이터 가져오기
read_excel 함수를 이용하여 Excel 파일을 읽을 수 있다. 이 함수는 특정한 이름 또는 위치의 탭(tab)을 읽을 수 있다.
End of explanation
"""
df['State'].unique()
"""
Explanation: 2. 데이터 준비하기
분석을 위해서 데이터에 다음과 같은 전처리를 해보자.
1) state 열의 값이 모두 대문자인지를 확인
2) status 값이 1인 레코드만 선택
3) state열에서 NJ를 NY으로 변경
4) 이상치 제거
1) state 열의 값이 모두 대문자인지를 확인
: State 열의 값이 대문자인지, 소문자인지를 빠르게 확인해 보자.
End of explanation
"""
# 람다 함수는 아래와 같이 사용
# lambda arguments : expression
# 예를 들어, 아래의 코드는 두 개의 argument의 합을 리턴
x = lambda a, b : a + b
x(3, 5)
"""
Explanation: State 열의 값을 모두 대문자로 변경하기 위해서 upper() 함수와 데이터프레임의 apply을 이용한다. apply 메소드를 통해서 각 로우(row)나 칼럼(column)의 1차원 배열에 함수를 적용할 수 있다. 그리고 lambda함수는 간단하게 State 열의 각 값을 대문자로 변경하도록 해준다.
먼저 lambda 함수에 대해서 간단히 알아보자.
[익명 함수 또는 lambda 함수]
파이썬은 익명 함수 또는 lambda 함수라고 하는, 값을 반환하는 단순한 한 문장으로 이루어진 함수를 지원한다. 람다 함수는 데이터 분석에서 특히 편리한데, 이는 람다 함수를 사용하면 코드를 적게 쓰며, 코드도 더 간결해지기 때문이다.
End of explanation
"""
# State 열의 값을 대문자로 변경
df['State'] = df.State.apply(lambda x: x.upper())
df['State'].unique()
"""
Explanation: 이제 State 열의 값을 대문자로 변경해 보자.
End of explanation
"""
# Only grab where Status == 1
mask = df['Status'] == 1
df = df[mask]
"""
Explanation: 2) status 값이 1인 레코드만 선택
End of explanation
"""
mask = df.State == 'NJ'
df['State'][mask] = 'NY'
"""
Explanation: 3) state열에서 NJ를 NY으로 변경
[df.State == 'NJ'] - State 열의 값이 NJ 인 모든 레코드를 찾기
df.State[df.State == 'NJ'] = 'NY' - State 열의 값이 NJ인 모든 레코드의 NJ를 NY으로 변경.
End of explanation
"""
df['State'].unique()
"""
Explanation: 이제 정리된 데이터의 State의 열의 유일한 값들을 확인해 보자.
End of explanation
"""
from IPython.display import Image
Image("python_for_data_analysis_p346.png")
"""
Explanation: 4) 이상치 제거
본 절에서는 데이터프레임을 State와 StatusDate의 연도를 기준으로 그룹을 분리한 후, 각 그룹에 있는 CustomeCount에 대해서 사분위수를 이용하여 이상치 제거를 하려고 한다.
먼저 GroupBy과 apply, transform 메소드를 간단하게 살펴보자.
[GroupBy]
pandas는 데이터셋을 자연스럽게 나누고 요약할 수 있는 groupby라는 유연한 방법을 제공한다.
그룹연산(분리-적용-결합)의 첫 번째 단계는 데이터프레임에 들어있는 데이터를 하나 이상의 색인을 기준으로 분리한다. 예를 들어, 데이터프레임은 로우(axis = 0)로 분리하거나 칼럼(axis = 1)로 분리할 수 있다. 분리하고 나면 함수를 각 그룹에 적용시켜 새로운 값을 얻어낸다. 그리고 마지막으로 함수를 적용한 결과를 하나의 객체로 결합한다.
[그림 9-1]은 그룹 연산의 예시이다.
End of explanation
"""
dftest = pd.DataFrame({'key': ['A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C' ], 'data' : [0, 5, 10, 5, 10, 15, 10, 15, 20]})
dftest
# key라는 열에 대해서 그룹으로 분리하고, 각 그룹에 sum()를 적용
dftest.groupby('key').sum()
"""
Explanation: 실제로 데이터프레임을 만들어 그룹 연산을 시행해 보자.
End of explanation
"""
dftest.groupby('key')['data'].apply(lambda x : x.sum())
dftest.groupby('key')['data'].transform(lambda x : x.sum())
"""
Explanation: [apply 과 transform]
위에서 생성한 데이터프레임 dftest에 apply와 transform 메소드로 그룹 연산을 수행해보자.
End of explanation
"""
df.reset_index().head()
Daily = df.reset_index().groupby(['State','StatusDate']).sum()
Daily.head()
"""
Explanation: apply의 결과는 병합된 것을 볼 수 있는 반면 transform 메소드는 데이터프레임의 크기를 유지하는 것을 볼 수 있다.
이제 State와 StatusDate를 기준으로 CustomerCount 값을 합해보자. 이때, 데이터프레임 df에는 StatusDate가 index이므로 StatusDate를 기준으로 그룹화하기 위해서 이를 일반열로 보내야 한다. 이를 위해 reset_index()를 이용한다.
End of explanation
"""
del Daily['Status']
Daily.head()
"""
Explanation: Status의 값은 필요가 없으므로, 아래와 같이 삭제한다.
End of explanation
"""
Daily.index
"""
Explanation: 데이터프레임 Daily의 인덱스를 확인해 보자.
End of explanation
"""
# State 인덱스 확인
Daily.index.levels[0]
# StatusDate 인덱스 확인
Daily.index.levels[1]
"""
Explanation: 다음과 같이 각각의 인덱스도 확인할 수 있다.
End of explanation
"""
StateYear = Daily.groupby([Daily.index.get_level_values(0), Daily.index.get_level_values(1).year])
"""
Explanation: 이제 데이터프레임을 State와 StatusDate의 연도를 기준으로 그룹을 분리해 보자.
End of explanation
"""
dftest1 = pd.DataFrame({'A' : [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 100]})
dftest1
# A 열의 값에 대한 제 1사분위수는 3.5
Q1 = dftest1.quantile(q = 0.25)
Q1
# A 열의 값에 대한 제 2사분위수는 5.5
Q2 = dftest1.quantile(q = 0.5)
Q2
# A 열의 값에 대한 제 3사분위수는 8.5
Q3 = dftest1.quantile(q = 0.75)
Q3
# Lower = Q1 - 1.5 IQR
Lower = Q1 - 1.5*(Q3 - Q1)
Lower
# Upper = Q3 + 1.5 IQR
Upper = Q3 + 1.5*(Q3 - Q1)
Upper
"""
Explanation: StateYear의 각 그룹에 있는 CustomerCount에 대해서 사분위수를 이용하여 이상치를 제거를 시행해 보고자 한다. 이를 위해 먼저 사분위수를 이용하여 이상치를 제거하는 방법에 대해서 간단하게 살펴보자.
[사분위수를 이용하여 이상치를 제거하는 방법]
(a) 사분위수
전체 관측값을 작은 순서로 배열하였을 때, 사분위수는 전체를 사등분하는 값이다. 전체의 사분의 1, 사분의 2, 사분의 3은 각각 전체의 25%, 50%, 75%이고, 이를 제 1사분위수(Q1), 제 2사분위수(Q2) = 중앙값, 제 3사분위수(Q3)라고 한다.
(c) 사분위수 범위
제 3 사분위수와 제 1사분위수 사이의 거리를 퍼진 정도의 측도로 사용할 수 있는데, 이를 사분위수 범위(IQR)이라고 한다. 즉, IQR = 제 3사분위수 - 제 1사분위수 = Q3 - Q1
(d) 사분위수를 이용하여 이상치를 제거하는 방법
관측값이 Q1 - 1.5 IQR 보다 작거나 Q3 + 1.5 IQR 보다 크면, 이 값을 이상치라고 한다.
예제로 살펴보자.
End of explanation
"""
Daily['Lower'] = StateYear['CustomerCount'].transform( lambda x: x.quantile(q=.25) - 1.5*(x.quantile(q=.75)-x.quantile(q=.25)))
Daily['Upper'] = StateYear['CustomerCount'].transform( lambda x: x.quantile(q=.75) + 1.5*(x.quantile(q=.75)-x.quantile(q=.25)))
Daily['Outlier'] = (Daily['CustomerCount'] < Daily['Lower']) | (Daily['CustomerCount'] > Daily['Upper'])
# 이상치를 제거해 보자.
Daily = Daily[Daily['Outlier'] == False]
Daily.head()
"""
Explanation: dftest1의 A열의 자료 중 100은 Upper보다 크므로 이상치라고 할 수 있다.
이제 StateYear의 각 그룹에 있는 CustomerCount에 대해서 사분위수를 이용하여 이상치를 제거 해보자.
End of explanation
"""
|
christophebertrand/ada-epfl | HW01-Intro_to_Pandas/Intro to Pandas.ipynb | mit | import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
"""
Explanation: Table of Contents
<p><div class="lev1"><a href="#Introduction-to-Pandas"><span class="toc-item-num">1 </span>Introduction to Pandas</a></div><div class="lev2"><a href="#Pandas-Data-Structures"><span class="toc-item-num">1.1 </span>Pandas Data Structures</a></div><div class="lev3"><a href="#Series"><span class="toc-item-num">1.1.1 </span>Series</a></div><div class="lev3"><a href="#DataFrame"><span class="toc-item-num">1.1.2 </span>DataFrame</a></div><div class="lev3"><a href="#Exercise-1"><span class="toc-item-num">1.1.3 </span>Exercise 1</a></div><div class="lev3"><a href="#Exercise-2"><span class="toc-item-num">1.1.4 </span>Exercise 2</a></div><div class="lev2"><a href="#Importing-data"><span class="toc-item-num">1.2 </span>Importing data</a></div><div class="lev3"><a href="#Microsoft-Excel"><span class="toc-item-num">1.2.1 </span>Microsoft Excel</a></div><div class="lev2"><a href="#Pandas-Fundamentals"><span class="toc-item-num">1.3 </span>Pandas Fundamentals</a></div><div class="lev3"><a href="#Manipulating-indices"><span class="toc-item-num">1.3.1 </span>Manipulating indices</a></div><div class="lev2"><a href="#Indexing-and-Selection"><span class="toc-item-num">1.4 </span>Indexing and Selection</a></div><div class="lev3"><a href="#Exercise-3"><span class="toc-item-num">1.4.1 </span>Exercise 3</a></div><div class="lev2"><a href="#Operations"><span class="toc-item-num">1.5 </span>Operations</a></div><div class="lev2"><a href="#Sorting-and-Ranking"><span class="toc-item-num">1.6 </span>Sorting and Ranking</a></div><div class="lev3"><a href="#Exercise-4"><span class="toc-item-num">1.6.1 </span>Exercise 4</a></div><div class="lev2"><a href="#Hierarchical-indexing"><span class="toc-item-num">1.7 </span>Hierarchical indexing</a></div><div class="lev2"><a href="#Missing-data"><span class="toc-item-num">1.8 </span>Missing data</a></div><div class="lev3"><a href="#Exercise-5"><span class="toc-item-num">1.8.1 </span>Exercise 5</a></div><div class="lev2"><a href="#Data-summarization"><span class="toc-item-num">1.9 </span>Data summarization</a></div><div class="lev2"><a href="#Writing-Data-to-Files"><span class="toc-item-num">1.10 </span>Writing Data to Files</a></div><div class="lev3"><a href="#Advanced-Exercise:-Compiling-Ebola-Data"><span class="toc-item-num">1.10.1 </span>Advanced Exercise: Compiling Ebola Data</a></div><div class="lev2"><a href="#References"><span class="toc-item-num">1.11 </span>References</a></div>
# Introduction to Pandas
**pandas** is a Python package providing fast, flexible, and expressive data structures designed to work with *relational* or *labeled* data both. It is a fundamental high-level building block for doing practical, real world data analysis in Python.
pandas is well suited for:
- Tabular data with heterogeneously-typed columns, as in an SQL table or Excel spreadsheet
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and column labels
- Any other form of observational / statistical data sets. The data actually need not be labeled at all to be placed into a pandas data structure
Key features:
- Easy handling of **missing data**
- **Size mutability**: columns can be inserted and deleted from DataFrame and higher dimensional objects
- Automatic and explicit **data alignment**: objects can be explicitly aligned to a set of labels, or the data can be aligned automatically
- Powerful, flexible **group by functionality** to perform split-apply-combine operations on data sets
- Intelligent label-based **slicing, fancy indexing, and subsetting** of large data sets
- Intuitive **merging and joining** data sets
- Flexible **reshaping and pivoting** of data sets
- **Hierarchical labeling** of axes
- Robust **IO tools** for loading data from flat files, Excel files, databases, and HDF5
- **Time series functionality**: date range generation and frequency conversion, moving window statistics, moving window linear regressions, date shifting and lagging, etc.
End of explanation
"""
counts = pd.Series([632, 1638, 569, 115])
counts
"""
Explanation: Pandas Data Structures
Series
A Series is a single vector of data (like a NumPy array) with an index that labels each element in the vector.
End of explanation
"""
counts.values
counts.index
"""
Explanation: If an index is not specified, a default sequence of integers is assigned as the index. A NumPy array comprises the values of the Series, while the index is a pandas Index object.
End of explanation
"""
bacteria = pd.Series([632, 1638, 569, 115],
index=['Firmicutes', 'Proteobacteria', 'Actinobacteria', 'Bacteroidetes'])
bacteria
"""
Explanation: We can assign meaningful labels to the index, if they are available:
End of explanation
"""
bacteria['Actinobacteria']
bacteria[[name.endswith('bacteria') for name in bacteria.index]]
[name.endswith('bacteria') for name in bacteria.index]
"""
Explanation: These labels can be used to refer to the values in the Series.
End of explanation
"""
bacteria[0]
"""
Explanation: Notice that the indexing operation preserved the association between the values and the corresponding indices.
We can still use positional indexing if we wish.
End of explanation
"""
bacteria.name = 'counts'
bacteria.index.name = 'phylum'
bacteria
"""
Explanation: We can give both the array of values and the index meaningful labels themselves:
End of explanation
"""
# natural logarithm
np.log(bacteria)
# log base 10
np.log10(bacteria)
"""
Explanation: NumPy's math functions and other operations can be applied to Series without losing the data structure.
End of explanation
"""
bacteria[bacteria>1000]
"""
Explanation: We can also filter according to the values in the Series:
End of explanation
"""
bacteria_dict = {'Firmicutes': 632, 'Proteobacteria': 1638, 'Actinobacteria': 569,
'Bacteroidetes': 115}
pd.Series(bacteria_dict)
"""
Explanation: A Series can be thought of as an ordered key-value store. In fact, we can create one from a dict:
End of explanation
"""
bacteria2 = pd.Series(bacteria_dict,
index=['Cyanobacteria','Firmicutes',
'Proteobacteria','Actinobacteria'])
bacteria2
bacteria2.isnull()
"""
Explanation: Notice that the Series is created in key-sorted order.
If we pass a custom index to Series, it will select the corresponding values from the dict, and treat indices without corrsponding values as missing. Pandas uses the NaN (not a number) type for missing values.
End of explanation
"""
bacteria + bacteria2
"""
Explanation: Critically, the labels are used to align data when used in operations with other Series objects:
End of explanation
"""
data = pd.DataFrame({'value':[632, 1638, 569, 115, 433, 1130, 754, 555],
'patient':[1, 1, 1, 1, 2, 2, 2, 2],
'phylum':['Firmicutes', 'Proteobacteria', 'Actinobacteria',
'Bacteroidetes', 'Firmicutes', 'Proteobacteria', 'Actinobacteria', 'Bacteroidetes']})
data
"""
Explanation: Contrast this with NumPy arrays, where arrays of the same length will combine values element-wise; adding Series combined values with the same label in the resulting series. Notice also that the missing values were propogated by addition.
DataFrame
Inevitably, we want to be able to store, view and manipulate data that is multivariate, where for every index there are multiple fields or columns of data, often of varying data type.
A DataFrame is a tabular data structure, encapsulating multiple series like columns in a spreadsheet. Data are stored internally as a 2-dimensional object, but the DataFrame allows us to represent and manipulate higher-dimensional data.
End of explanation
"""
data[['phylum','value','patient']]
"""
Explanation: Notice the DataFrame is sorted by column name. We can change the order by indexing them in the order we desire:
End of explanation
"""
data.columns
"""
Explanation: A DataFrame has a second index, representing the columns:
End of explanation
"""
data.dtypes
"""
Explanation: The dtypes attribute reveals the data type for each column in our DataFrame.
int64 is numeric integer values
object strings (letters and numbers)
float64 floating-point values
End of explanation
"""
data['patient']
data.patient
type(data.value)
data[['value']]
"""
Explanation: If we wish to access columns, we can do so either by dict-like indexing or by attribute:
End of explanation
"""
data.loc[3]
"""
Explanation: Notice this is different than with Series, where dict-like indexing retrieved a particular element (row).
If we want access to a row in a DataFrame, we index its loc attribute.
End of explanation
"""
data.head() # returns the first (5 by default) rows of data.
data.tail(3) # returns the 3 last rows of data
data.shape # returns the dimension of data (nbr rows, nbr cols)
"""
Explanation: Exercise 1
Try out these commands to see what they return:
data.head()
data.tail(3)
data.shape
End of explanation
"""
data = pd.DataFrame([{'patient': 1, 'phylum': 'Firmicutes', 'value': 632},
{'patient': 1, 'phylum': 'Proteobacteria', 'value': 1638},
{'patient': 1, 'phylum': 'Actinobacteria', 'value': 569},
{'patient': 1, 'phylum': 'Bacteroidetes', 'value': 115},
{'patient': 2, 'phylum': 'Firmicutes', 'value': 433},
{'patient': 2, 'phylum': 'Proteobacteria', 'value': 1130},
{'patient': 2, 'phylum': 'Actinobacteria', 'value': 754},
{'patient': 2, 'phylum': 'Bacteroidetes', 'value': 555}])
data
"""
Explanation: An alternative way of initializing a DataFrame is with a list of dicts:
End of explanation
"""
vals = data.value
vals
vals[5] = 0
vals
"""
Explanation: Its important to note that the Series returned when a DataFrame is indexted is merely a view on the DataFrame, and not a copy of the data itself. So you must be cautious when manipulating this data:
End of explanation
"""
vals = data.value.copy()
vals[5] = 1000
vals
"""
Explanation: If we plan on modifying an extracted Series, its a good idea to make a copy.
End of explanation
"""
data.value[[3,4,6]] = [14, 21, 5]
data
data['year'] = 2013
data
"""
Explanation: We can create or modify columns by assignment:
End of explanation
"""
data.treatment = 1
data
data.treatment
"""
Explanation: But note, we cannot use the attribute indexing method to add a new column:
End of explanation
"""
colwitbacteria = [col for col in data['phylum'] if col.endswith('bacteria')]
colwitbacteria
"""
Explanation: Exercise 2
From the data table above, create an index to return all rows for which the phylum name ends in "bacteria" and the value is greater than 1000.
Find the values of 'phylum' ending in 'bacteria'
End of explanation
"""
rowswithbacteria = data[data['phylum'].isin(colwitbacteria)]
"""
Explanation: then filter the rows having one of the 'bacteria' values
End of explanation
"""
rowswithbacteria[rowswithbacteria.value > 1000]
"""
Explanation: then take the values bigger than 1000
End of explanation
"""
treatment = pd.Series([0]*4 + [1]*2)
treatment
data['treatment'] = treatment
data
"""
Explanation: Note that it is probably faster to first filter the values bigger than 1000 as it filters more values out.
Specifying a Series as a new columns cause its values to be added according to the DataFrame's index:
End of explanation
"""
month = ['Jan', 'Feb', 'Mar', 'Apr']
# data['month'] = month # throws error (done on puropse)
data['month'] = ['Jan']*len(data)
data
"""
Explanation: Other Python data structures (ones without an index) need to be the same length as the DataFrame:
End of explanation
"""
data_nomonth = data.drop('month', axis=1)
data_nomonth
"""
Explanation: We can use the drop method to remove rows or columns, which by default drops rows. We can be explicit by using the axis argument:
End of explanation
"""
data.values
"""
Explanation: We can extract the underlying data as a simple ndarray by accessing the values attribute:
End of explanation
"""
df = pd.DataFrame({'foo': [1,2,3], 'bar':[0.4, -1.0, 4.5]})
df.values
"""
Explanation: Notice that because of the mix of string and integer (and NaN) values, the dtype of the array is object. The dtype will automatically be chosen to be as general as needed to accomodate all the columns.
End of explanation
"""
data.index
"""
Explanation: Pandas uses a custom data structure to represent the indices of Series and DataFrames.
End of explanation
"""
# data.index[0] = 15 # throws error
"""
Explanation: Index objects are immutable:
End of explanation
"""
bacteria2.index = bacteria.index
bacteria2
"""
Explanation: This is so that Index objects can be shared between data structures without fear that they will be changed.
End of explanation
"""
!cat Data/microbiome.csv
"""
Explanation: Importing data
A key, but often under-appreciated, step in data analysis is importing the data that we wish to analyze. Though it is easy to load basic data structures into Python using built-in tools or those provided by packages like NumPy, it is non-trivial to import structured data well, and to easily convert this input into a robust data structure:
genes = np.loadtxt("genes.csv", delimiter=",", dtype=[('gene', '|S10'), ('value', '<f4')])
Pandas provides a convenient set of functions for importing tabular data in a number of formats directly into a DataFrame object. These functions include a slew of options to perform type inference, indexing, parsing, iterating and cleaning automatically as data are imported.
Let's start with some more bacteria data, stored in csv format.
End of explanation
"""
mb = pd.read_csv("Data/microbiome.csv")
mb
"""
Explanation: This table can be read into a DataFrame using read_csv:
End of explanation
"""
pd.read_csv("Data/microbiome.csv", header=None).head()
"""
Explanation: Notice that read_csv automatically considered the first row in the file to be a header row.
We can override default behavior by customizing some the arguments, like header, names or index_col.
End of explanation
"""
mb = pd.read_table("Data/microbiome.csv", sep=',')
"""
Explanation: read_csv is just a convenience function for read_table, since csv is such a common format:
End of explanation
"""
mb = pd.read_csv("Data/microbiome.csv", index_col=['Patient','Taxon'])
mb.head()
"""
Explanation: The sep argument can be customized as needed to accomodate arbitrary separators. For example, we can use a regular expression to define a variable amount of whitespace, which is unfortunately very common in some data formats:
sep='\s+'
For a more useful index, we can specify the first two columns, which together provide a unique index to the data.
End of explanation
"""
pd.read_csv("Data/microbiome.csv", skiprows=[3,4,6]).head()
"""
Explanation: This is called a hierarchical index, which we will revisit later in the section.
If we have sections of data that we do not wish to import (for example, known bad data), we can populate the skiprows argument:
End of explanation
"""
pd.read_csv("Data/microbiome.csv", nrows=4)
"""
Explanation: If we only want to import a small number of rows from, say, a very large data file we can use nrows:
End of explanation
"""
pd.read_csv("Data/microbiome.csv", chunksize=14)
data_chunks = pd.read_csv("Data/microbiome.csv", chunksize=14)
mean_tissue = pd.Series({chunk.Taxon[0]: chunk.Tissue.mean() for chunk in data_chunks})
mean_tissue
"""
Explanation: Alternately, if we want to process our data in reasonable chunks, the chunksize argument will return an iterable object that can be employed in a data processing loop. For example, our microbiome data are organized by bacterial phylum, with 14 patients represented in each:
End of explanation
"""
!cat Data/microbiome_missing.csv
pd.read_csv("Data/microbiome_missing.csv").head(20)
"""
Explanation: Most real-world data is incomplete, with values missing due to incomplete observation, data entry or transcription error, or other reasons. Pandas will automatically recognize and parse common missing data indicators, including NA and NULL.
End of explanation
"""
pd.isnull(pd.read_csv("Data/microbiome_missing.csv")).head(20)
"""
Explanation: Above, Pandas recognized NA and an empty field as missing data.
End of explanation
"""
pd.read_csv("Data/microbiome_missing.csv", na_values=['?', -99999]).head(20)
"""
Explanation: Unfortunately, there will sometimes be inconsistency with the conventions for missing data. In this example, there is a question mark "?" and a large negative number where there should have been a positive integer. We can specify additional symbols with the na_values argument:
End of explanation
"""
mb = pd.read_excel('Data/microbiome/MID2.xls', sheetname='Sheet 1', header=None)
mb.head()
"""
Explanation: These can be specified on a column-wise basis using an appropriate dict as the argument for na_values.
Microsoft Excel
Since so much financial and scientific data ends up in Excel spreadsheets (regrettably), Pandas' ability to directly import Excel spreadsheets is valuable. This support is contingent on having one or two dependencies (depending on what version of Excel file is being imported) installed: xlrd and openpyxl (these may be installed with either pip or easy_install).
The read_excel convenience function in pandas imports a specific sheet from an Excel file
End of explanation
"""
baseball = pd.read_csv("Data/baseball.csv", index_col='id')
baseball.head()
"""
Explanation: There are several other data formats that can be imported into Python and converted into DataFrames, with the help of buitl-in or third-party libraries. These include JSON, XML, HDF5, relational and non-relational databases, and various web APIs. These are beyond the scope of this tutorial, but are covered in Python for Data Analysis.
Pandas Fundamentals
This section introduces the new user to the key functionality of Pandas that is required to use the software effectively.
For some variety, we will leave our digestive tract bacteria behind and employ some baseball data.
End of explanation
"""
player_id = baseball.player + baseball.year.astype(str)
baseball_newind = baseball.copy()
baseball_newind.index = player_id
baseball_newind.head()
"""
Explanation: Notice that we specified the id column as the index, since it appears to be a unique identifier. We could try to create a unique index ourselves by combining player and year:
End of explanation
"""
baseball_newind.index.is_unique
"""
Explanation: This looks okay, but let's check:
End of explanation
"""
pd.Series(baseball_newind.index).value_counts()
"""
Explanation: So, indices need not be unique. Our choice is not unique because some players change teams within years.
End of explanation
"""
baseball_newind.loc['wickmbo012007']
"""
Explanation: The most important consequence of a non-unique index is that indexing by label will return multiple values for some labels:
End of explanation
"""
player_unique = baseball.player + baseball.team + baseball.year.astype(str)
baseball_newind = baseball.copy()
baseball_newind.index = player_unique
baseball_newind.head()
baseball_newind.index.is_unique
"""
Explanation: We will learn more about indexing below.
We can create a truly unique index by combining player, team and year:
End of explanation
"""
baseball.reindex(baseball.index[::-1]).head()
"""
Explanation: We can create meaningful indices more easily using a hierarchical index; for now, we will stick with the numeric id field as our index.
Manipulating indices
Reindexing allows users to manipulate the data labels in a DataFrame. It forces a DataFrame to conform to the new index, and optionally, fill in missing data if requested.
A simple use of reindex is to alter the order of the rows:
End of explanation
"""
id_range = range(baseball.index.values.min(), baseball.index.values.max())
baseball.reindex(id_range).head()
"""
Explanation: Notice that the id index is not sequential. Say we wanted to populate the table with every id value. We could specify and index that is a sequence from the first to the last id numbers in the database, and Pandas would fill in the missing data with NaN values:
End of explanation
"""
baseball.reindex(id_range, method='ffill', columns=['player','year']).head()
baseball.reindex(id_range, fill_value='charliebrown', columns=['player']).head()
"""
Explanation: Missing values can be filled as desired, either with selected values, or by rule:
End of explanation
"""
baseball.shape
baseball.drop([89525, 89526])
baseball.drop(['ibb','hbp'], axis=1)
"""
Explanation: Keep in mind that reindex does not work if we pass a non-unique index series.
We can remove rows or columns via the drop method:
End of explanation
"""
# Sample Series object
hits = baseball_newind.h
hits
# Numpy-style indexing
hits[:3]
# Indexing by label
hits[['womacto01CHN2006','schilcu01BOS2006']]
"""
Explanation: Indexing and Selection
Indexing works analogously to indexing in NumPy arrays, except we can use the labels in the Index object to extract values in addition to arrays of integers.
End of explanation
"""
hits['womacto01CHN2006':'gonzalu01ARI2006']
hits['womacto01CHN2006':'gonzalu01ARI2006'] = 5
hits
"""
Explanation: We can also slice with data labels, since they have an intrinsic order within the Index:
End of explanation
"""
baseball_newind[['h','ab']]
baseball_newind[baseball_newind.ab>500]
"""
Explanation: In a DataFrame we can slice along either or both axes:
End of explanation
"""
baseball_newind.query('ab > 500')
"""
Explanation: For a more concise (and readable) syntax, we can use the new query method to perform selection on a DataFrame. Instead of having to type the fully-specified column, we can simply pass a string that describes what to select. The query above is then simply:
End of explanation
"""
min_ab = 450
baseball_newind.query('ab > @min_ab')
"""
Explanation: The DataFrame.index and DataFrame.columns are placed in the query namespace by default. If you want to refer to a variable in the current namespace, you can prefix the variable with @:
End of explanation
"""
baseball_newind.loc['gonzalu01ARI2006', ['h','X2b', 'X3b', 'hr']]
baseball_newind.loc[:'myersmi01NYA2006', 'hr']
"""
Explanation: The indexing field loc allows us to select subsets of rows and columns in an intuitive way:
End of explanation
"""
baseball_newind.iloc[:5, 5:8]
"""
Explanation: In addition to using loc to select rows and columns by label, pandas also allows indexing by position using the iloc attribute.
So, we can query rows and columns by absolute position, rather than by name:
End of explanation
"""
baseball[baseball['team'].isin(['LAN', 'SFN'])]
"""
Explanation: Exercise 3
You can use the isin method query a DataFrame based upon a list of values as follows:
data['phylum'].isin(['Firmacutes', 'Bacteroidetes'])
Use isin to find all players that played for the Los Angeles Dodgers (LAN) or the San Francisco Giants (SFN). How many records contain these values?
End of explanation
"""
hr2006 = baseball.loc[baseball.year==2006, 'hr']
hr2006.index = baseball.player[baseball.year==2006]
hr2007 = baseball.loc[baseball.year==2007, 'hr']
hr2007.index = baseball.player[baseball.year==2007]
hr2007
"""
Explanation: 15 records contains those values
Operations
DataFrame and Series objects allow for several operations to take place either on a single object, or between two or more objects.
For example, we can perform arithmetic on the elements of two objects, such as combining baseball statistics across years. First, let's (artificially) construct two Series, consisting of home runs hit in years 2006 and 2007, respectively:
End of explanation
"""
hr_total = hr2006 + hr2007
hr_total
"""
Explanation: Now, let's add them together, in hopes of getting 2-year home run totals:
End of explanation
"""
hr_total[hr_total.notnull()]
"""
Explanation: Pandas' data alignment places NaN values for labels that do not overlap in the two Series. In fact, there are only 6 players that occur in both years.
End of explanation
"""
hr2007.add(hr2006, fill_value=0)
"""
Explanation: While we do want the operation to honor the data labels in this way, we probably do not want the missing values to be filled with NaN. We can use the add method to calculate player home run totals by using the fill_value argument to insert a zero for home runs where labels do not overlap:
End of explanation
"""
baseball.hr - baseball.hr.max()
"""
Explanation: Operations can also be broadcast between rows or columns.
For example, if we subtract the maximum number of home runs hit from the hr column, we get how many fewer than the maximum were hit by each player:
End of explanation
"""
baseball.loc[89521, "player"]
stats = baseball[['h','X2b', 'X3b', 'hr']]
diff = stats - stats.loc[88641]
diff[:10]
"""
Explanation: Or, looking at things row-wise, we can see how a particular player compares with the rest of the group with respect to important statistics
End of explanation
"""
stats.apply(np.median)
def range_calc(x):
return x.max() - x.min()
stat_range = lambda x: x.max() - x.min()
stats.apply(stat_range)
"""
Explanation: We can also apply functions to each column or row of a DataFrame
End of explanation
"""
def slugging(x):
bases = x['h']-x['X2b']-x['X3b']-x['hr'] + 2*x['X2b'] + 3*x['X3b'] + 4*x['hr']
ab = x['ab']+1e-6
return bases/ab
baseball.apply(slugging, axis=1).round(3)
"""
Explanation: Lets use apply to calculate a meaningful baseball statistics, slugging percentage:
$$SLG = \frac{1B + (2 \times 2B) + (3 \times 3B) + (4 \times HR)}{AB}$$
And just for fun, we will format the resulting estimate.
End of explanation
"""
baseball_newind.sort_index().head()
baseball_newind.sort_index(ascending=False).head()
"""
Explanation: Sorting and Ranking
Pandas objects include methods for re-ordering data.
End of explanation
"""
baseball_newind.sort_index(axis=1).head()
"""
Explanation: Try sorting the columns instead of the rows, in ascending order:
End of explanation
"""
baseball.hr.sort_values(ascending=False)
"""
Explanation: We can also use sort_values to sort a Series by value, rather than by label.
End of explanation
"""
baseball[['player','sb','cs']].sort_values(ascending=[False,True],
by=['sb', 'cs']).head(10)
"""
Explanation: For a DataFrame, we can sort according to the values of one or more columns using the by argument of sort_values:
End of explanation
"""
baseball.hr.rank()
"""
Explanation: Ranking does not re-arrange data, but instead returns an index that ranks each value relative to others in the Series.
End of explanation
"""
pd.Series([100,100]).rank()
"""
Explanation: Ties are assigned the mean value of the tied ranks, which may result in decimal values.
End of explanation
"""
baseball.hr.rank(method='first')
"""
Explanation: Alternatively, you can break ties via one of several methods, such as by the order in which they occur in the dataset:
End of explanation
"""
baseball.rank(ascending=False).head()
baseball[['r','h','hr']].rank(ascending=False).head()
"""
Explanation: Calling the DataFrame's rank method results in the ranks of all columns:
End of explanation
"""
def on_base_perc(pl):
nominator = pl['h'] + pl['bb'] + pl['hbp'] #H+BB+HBP
denom = pl['ab'] + pl['bb'] +pl['hbp'] +pl['sf']
if(denom == 0) : # If the denom == 0 we can not compute nominator/denom, hence we retrun NaN
return np.NaN
return nominator / denom
baseball.apply(on_base_perc, axis=1).round(3)
"""
Explanation: Exercise 4
Calculate on base percentage for each player, and return the ordered series of estimates.
$$OBP = \frac{H + BB + HBP}{AB + BB + HBP + SF}$$
define the function and appy it.
End of explanation
"""
baseball.apply(on_base_perc, axis=1).round(3).sort_values(ascending=False)
"""
Explanation: and again but ordered
End of explanation
"""
baseball_h = baseball.set_index(['year', 'team', 'player'])
baseball_h.head(10)
"""
Explanation: Hierarchical indexing
In the baseball example, I was forced to combine 3 fields to obtain a unique index that was not simply an integer value. A more elegant way to have done this would be to create a hierarchical index from the three fields.
End of explanation
"""
baseball_h.index[:10]
baseball_h.index.is_unique
"""
Explanation: This index is a MultiIndex object that consists of a sequence of tuples, the elements of which is some combination of the three columns used to create the index. Where there are multiple repeated values, Pandas does not print the repeats, making it easy to identify groups of values.
End of explanation
"""
baseball_h.loc[(2007, 'ATL', 'francju01')]
"""
Explanation: Try using this hierarchical index to retrieve Julio Franco (francju01), who played for the Atlanta Braves (ATL) in 2007:
End of explanation
"""
mb = pd.read_csv("Data/microbiome.csv", index_col=['Taxon','Patient'])
mb.head(10)
"""
Explanation: Recall earlier we imported some microbiome data using two index columns. This created a 2-level hierarchical index:
End of explanation
"""
mb.loc['Proteobacteria']
"""
Explanation: With a hierachical index, we can select subsets of the data based on a partial index:
End of explanation
"""
frame = pd.DataFrame(np.arange(12).reshape(( 4, 3)),
index =[['a', 'a', 'b', 'b'], [1, 2, 1, 2]],
columns =[['Ohio', 'Ohio', 'Colorado'], ['Green', 'Red', 'Green']])
frame
"""
Explanation: Hierarchical indices can be created on either or both axes. Here is a trivial example:
End of explanation
"""
frame.index.names = ['key1', 'key2']
frame.columns.names = ['state', 'color']
frame
"""
Explanation: If you want to get fancy, both the row and column indices themselves can be given names:
End of explanation
"""
frame.loc['a', 'Ohio']
"""
Explanation: With this, we can do all sorts of custom indexing:
End of explanation
"""
frame.loc['b', 2]['Colorado']
"""
Explanation: Try retrieving the value corresponding to b2 in Colorado:
fetch b2 and then Colorado
End of explanation
"""
mb.swaplevel('Patient', 'Taxon').head()
"""
Explanation: Additionally, the order of the set of indices in a hierarchical MultiIndex can be changed by swapping them pairwise:
End of explanation
"""
mb.sortlevel('Patient', ascending=False).head()
"""
Explanation: Data can also be sorted by any index level, using sortlevel:
End of explanation
"""
foo = pd.Series([np.nan, -3, None, 'foobar'])
foo
foo.isnull()
"""
Explanation: Missing data
The occurence of missing data is so prevalent that it pays to use tools like Pandas, which seamlessly integrates missing data handling so that it can be dealt with easily, and in the manner required by the analysis at hand.
Missing data are represented in Series and DataFrame objects by the NaN floating point value. However, None is also treated as missing, since it is commonly used as such in other contexts (e.g. NumPy).
End of explanation
"""
bacteria2
bacteria2.dropna()
bacteria2.isnull()
bacteria2[bacteria2.notnull()]
"""
Explanation: Missing values may be dropped or indexed out:
End of explanation
"""
data.dropna()
"""
Explanation: By default, dropna drops entire rows in which one or more values are missing.
End of explanation
"""
data.dropna(how='all')
"""
Explanation: This can be overridden by passing the how='all' argument, which only drops a row when every field is a missing value.
End of explanation
"""
data.loc[7, 'year'] = np.nan
data
data.dropna(thresh=5)
"""
Explanation: This can be customized further by specifying how many values need to be present before a row is dropped via the thresh argument.
End of explanation
"""
data.dropna(axis=1)
"""
Explanation: This is typically used in time series applications, where there are repeated measurements that are incomplete for some subjects.
Exercise 5
Try using the axis argument to drop columns with missing values:
End of explanation
"""
bacteria2.fillna(0)
data.fillna({'year': 2013, 'treatment':2})
"""
Explanation: Rather than omitting missing data from an analysis, in some cases it may be suitable to fill the missing value in, either with a default value (such as zero) or a value that is either imputed or carried forward/backward from similar data points. We can do this programmatically in Pandas with the fillna argument.
End of explanation
"""
data.year.fillna(2013, inplace=True)
data
"""
Explanation: Notice that fillna by default returns a new object with the desired filling behavior, rather than changing the Series or DataFrame in place (in general, we like to do this, by the way!).
We can alter values in-place using inplace=True.
End of explanation
"""
bacteria2.fillna(method='bfill')
"""
Explanation: Missing values can also be interpolated, using any one of a variety of methods:
End of explanation
"""
baseball.sum()
"""
Explanation: Data summarization
We often wish to summarize data in Series or DataFrame objects, so that they can more easily be understood or compared with similar data. The NumPy package contains several functions that are useful here, but several summarization or reduction methods are built into Pandas data structures.
End of explanation
"""
baseball.mean()
"""
Explanation: Clearly, sum is more meaningful for some columns than others. For methods like mean for which application to string variables is not just meaningless, but impossible, these columns are automatically exculded:
End of explanation
"""
bacteria2
bacteria2.mean()
"""
Explanation: The important difference between NumPy's functions and Pandas' methods is that the latter have built-in support for handling missing data.
End of explanation
"""
bacteria2.mean(skipna=False)
"""
Explanation: Sometimes we may not want to ignore missing values, and allow the nan to propagate.
End of explanation
"""
extra_bases = baseball[['X2b','X3b','hr']].sum(axis=1)
extra_bases.sort_values(ascending=False)
"""
Explanation: Passing axis=1 will summarize over rows instead of columns, which only makes sense in certain situations.
End of explanation
"""
baseball.describe()
"""
Explanation: A useful summarization that gives a quick snapshot of multiple statistics for a Series or DataFrame is describe:
End of explanation
"""
baseball.player.describe()
"""
Explanation: describe can detect non-numeric data and sometimes yield useful information about it.
End of explanation
"""
baseball.hr.cov(baseball.X2b)
"""
Explanation: We can also calculate summary statistics across multiple columns, for example, correlation and covariance.
$$cov(x,y) = \sum_i (x_i - \bar{x})(y_i - \bar{y})$$
End of explanation
"""
baseball.hr.corr(baseball.X2b)
baseball.ab.corr(baseball.h)
"""
Explanation: $$corr(x,y) = \frac{cov(x,y)}{(n-1)s_x s_y} = \frac{\sum_i (x_i - \bar{x})(y_i - \bar{y})}{\sqrt{\sum_i (x_i - \bar{x})^2 \sum_i (y_i - \bar{y})^2}}$$
End of explanation
"""
baseball.corr()
"""
Explanation: Try running corr on the entire baseball DataFrame to see what is returned:
End of explanation
"""
mb.head()
mb.sum(level='Taxon')
"""
Explanation: it returns the correlation matrix for all features
If we have a DataFrame with a hierarchical index (or indices), summary statistics can be applied with respect to any of the index levels:
End of explanation
"""
mb.to_csv("mb.csv")
"""
Explanation: Writing Data to Files
As well as being able to read several data input formats, Pandas can also export data to a variety of storage formats. We will bring your attention to just a couple of these.
End of explanation
"""
baseball.to_pickle("baseball_pickle")
"""
Explanation: The to_csv method writes a DataFrame to a comma-separated values (csv) file. You can specify custom delimiters (via sep argument), how missing values are written (via na_rep argument), whether the index is writen (via index argument), whether the header is included (via header argument), among other options.
An efficient way of storing data to disk is in binary format. Pandas supports this using Python’s built-in pickle serialization.
End of explanation
"""
pd.read_pickle("baseball_pickle")
"""
Explanation: The complement to to_pickle is the read_pickle function, which restores the pickle to a DataFrame or Series:
End of explanation
"""
|
jamesfolberth/NGC_STEM_camp_AWS | notebooks/data8_notebooks/lab10/lab10.ipynb | bsd-3-clause | # Run this cell to set up the notebook, but please don't change it.
# These lines import the Numpy and Datascience modules.
import numpy as np
from datascience import *
# These lines do some fancy plotting magic.
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import warnings
warnings.simplefilter('ignore', FutureWarning)
# This line loads the visualization code for this lab.
import visualizations
# These lines load the tests.
from client.api.assignment import load_assignment
tests = load_assignment('lab10.ok')
"""
Explanation: Conditional Probability
This lab is an introduction to visualizing conditional probabilities. We will cover icon arrays. These do not appear in the textbook and will not appear on any exam, but they will help you gain intuition about conditional probability.
Administrative details
This lab will not be collected. Conditional probability will appear on the final exam, and this is an opportunity to understand it better. We recommend going through at least part 2. You can complete the rest later as an exercise when you're studying.
End of explanation
"""
probability_large_green = ...
_ = tests.grade("q21")
"""
Explanation: 1. What is conditional probability good for?
Suppose we have a known population, like all dogs in California. So far, we've seen 3 ways of predicting something about an individual in that population, given incomplete knowledge about the identity of the individual:
If we know nothing about the individual dog, we could predict that its speed is the average or median of all the speeds in the population.
If we know the dog's height but not its speed, we could use linear regression to predict its speed from its height. The resulting prediction is still imperfect, but it might be more accurate than the population average.
If we know the dog's breed, height, and age, we could use nearest-neighbor classification to predict its speed by comparing to a collection of dogs with known speed.
Computing conditional probabilities is a different way of making predictions. It differs in at least two important ways from the methods we've seen:
1. Rather than producing a single answer that might be wrong, we just figure out how likely each possible answer is.
2. In the simple (but important) cases we'll look at today, conditional probabilities can be calculated exactly from assumptions rather than estimated from data. By contrast, there are many techniques for classification, and even once we choose k-Nearest Neighbors, we get different results for different values of k.
2. Icon arrays
Parts 3 and 4 of this lab are about cancer, but first let's start with a simple, contrived example.
Imagine you are a marble. You don't know what you look like (since you obviously have no eyes), but you know that Samantha drew you uniformly at random from a bag that contained the following marbles:
* 4 large green marbles,
* 1 large red marble,
* 6 small green marbles, and
* 2 small red marbles.
Question 2.1. Knowing only what we've told you so far, what's the probability that you're a large green marble?
End of explanation
"""
marbles = Table.read_table("marbles.csv")
marbles
"""
Explanation: Here's a table with those marbles:
End of explanation
"""
# Run this cell.
#######################################################################
# The functions you'll need to actually use are in here. Each is a
# way of making an icon array from a differently-formatted table.
#######################################################################
def display_icon_array(table, groups, individuals_name):
"""
Given a table and some columns to group it on, displays an icon array
of the groups.
groups should be an array of labels of columns in table.
individuals_name is your name for the individual rows of table.
For example, if we're talking about a population of people,
individuals_name should be "people".
For example:
display_icon_array(marbles, make_array("color", "size"), "marbles")
"""
display_grouped_icon_array(table.groups(groups), individuals_name)
def display_grouped_icon_array(grouped_data, individuals_name):
"""
Given a table with counts for data grouped by 1 or more categories,
displays an icon array of the groups represented in the table.
grouped_data should be a table of frequencies or counts, such as
a table created by calling the groups method on some table.
individuals_name is your name for the individual members of the
dataset. For example, if we're talking about a population of
people, individuals_name should be "people".
For example:
display_grouped_icon_array(marbles.groups(make_array("color", "size")), "marbles")
"""
visualizations.display_combinations(grouped_data, individuals_name=individuals_name)
def display_crosstab_icon_array(crosstabulation, x_label, individuals_name):
"""
Given a crosstabulation table, displays an icon array of the groups
represented in the table.
crosstabulation should be a table of frequencies or counts created by
calling pivot on some table.
x_label should be the label of the categories listed as columns (on
the "x axis" when the crosstabulation table is printed).
individuals_name is your name for the individual members of the
dataset. For example, if we're talking about a population of
people, individuals_name should be "people".
For example:
display_crosstab_icon_array(marbles.pivot("color", "size"), "color", "marbles")
"""
display_grouped_icon_array(visualizations.pivot_table_to_groups(crosstabulation, x_label), individuals_name)
"""
Explanation: We've included some code to display something called an icon array. The functions in the cell below create icon arrays from various kinds of tables. Refer back to this cell later when you need to make an icon array.
End of explanation
"""
# Run this cell.
display_grouped_icon_array(marbles.groups(make_array("color", "size")), "marbles")
"""
Explanation: Here's an icon array of all the marbles, grouped by color and size:
End of explanation
"""
...
"""
Explanation: Note that the icon colors don't correspond to the colors of the marbles they represent.
You (the marble) should imagine that you are a random draw from these 13 icons.
Question 2.2. Make an icon array of the marbles, grouped only by color.
End of explanation
"""
probability_green = ...
_ = tests.grade("q23")
"""
Explanation: Knowing nothing else about yourself, you're equally likely to be any of the marbles pictured.
Question 2.3. What's the probability that you're a green marble? Calculate this by hand (using Python for arithmetic) by looking at your icon array.
End of explanation
"""
display_grouped_icon_array(marbles.groups(make_array("color", "size")), "marbles")
"""
Explanation: 2.1. Conditional probability
Suppose you overhear Samantha saying that you're a large marble. (Little-known fact: though marbles lack eyes, they possess rudimentary ears.) Does this somehow change the likelihood that you're green? Let's find out.
Go back to the full icon array, displayed below for convenience.
End of explanation
"""
# Just run this cell.
display_grouped_icon_array(marbles.where("size", "large").group("color"), "large marbles")
"""
Explanation: In question 2.3, we assumed you were equally likely to be any of the marbles, because we didn't know any better. That's why we looked at all the marbles to compute the probability you were green.
But assuming you're a large marble, we can eliminate some of these possibilities. In particular, you can't be a small green marble or a small red marble.
You're still equally likely to be any of the remaining marbles, because you don't know anything that says otherwise. So here's an icon array of those remaining possibilities:
End of explanation
"""
probability_green_given_large = ...
_ = tests.grade("q211")
"""
Explanation: Question 2.1.1. What's the probability you're a green marble, knowing that you're a large marble? Calculate it by hand, using the icon array.
End of explanation
"""
# Make an icon array to help you compute the answer.
...
# Now compute the answer.
probability_large_given_green = ...
_ = tests.grade("q212")
"""
Explanation: You should have found that this is different from the probability that you're a green marble, which you computed earlier. The distribution of colors among the large marbles is a little different from the distribution of colors among all the marbles.
Question 2.1.2. Suppose instead Samantha had said you're a green marble. What's the probability you're large? Make an icon array to help you compute this probability, then compute it.
Hint: Look at the code we wrote to generate an icon array for question 2.1.1.
End of explanation
"""
# Just run this cell. The next cell is where you should write your answer.
display_grouped_icon_array(marbles.groups(make_array("color", "size")), "marbles")
"""
Explanation: Question 2.1.3. How could you answer the last two questions just by looking at the full icon array? (You can run the cell below to see it again.)
End of explanation
"""
people = Table().with_columns(
"cancer status", make_array("sick", "sick", "healthy", "healthy"),
"test status", make_array("positive", "negative", "positive", "negative"),
"count", make_array(90, 10, 198, 9702))
people
"""
Explanation: Write your answer here, replacing this text.
3. Cancer screening
Now let's look at a much more realistic application.
Background
Medical tests are an important but surprisingly controversial topic. For years, women have been advised to get regular mammograms (tests for breast cancer). Today, there is controversy over whether the tests are useful at all.
Part of the problem with such tests is that they are not perfectly reliable. Someone without cancer, or with only a benign form of cancer, can see a positive result on a test for cancer. Someone with cancer can receive a negative result. ("Positive" means "pointing toward cancer," so in this context it's bad!) Doctors and patients often deal poorly with the first case, called false positives. For example, a patient may receive dangerous treatment like chemotherapy or radiation despite having no cancer or, as happens more frequently, having a cancer that would not have impacted her health.
Conditional probability is a good way to think about such situations. For example, you can compute the chance that you have cancer, given the result of a test, by combining information from different probability distributions. You'll see that the chance you have cancer can be far from 100% even if you have a positive test result from a test that is usually accurate.
3.1. Basic cancer statistics
Suppose that, in a representative group of 10,000 people who are tested for cancer ("representative" meaning that the frequencies of different things are the same as the frequencies in the whole population):
1. 100 have cancer.
2. Among those 100, 90 have positive results on a cancer test and 10 have negative results. ("Negative" means "not pointing toward cancer.")
3. The other 9,900 don't have cancer.
4. Among these, 198 have positive results on a cancer test and the other 9,702 have negative results. (So 198 see "false positive" results.)
Below we've generated a table with data from these 10,000 hypothetical people.
End of explanation
"""
cancer = ...
cancer
_ = tests.grade("q311")
"""
Explanation: One way to visualize this dataset is with a contingency table, which you've seen before.
Question 3.1.1. Create a contingency table that looks like this:
|cancer status|negative|positive|
|-|-|-|
|sick|||
|healthy||||
...with the count of each group filled in, according to what we've told you above. The counts in the 4 boxes should sum to 10,000.
Hint: Use pivot with the sum function.
End of explanation
"""
...
"""
Explanation: Question 3.1.2. Display the people data in an icon array. The name of the population members should be "people who've taken a cancer test".
End of explanation
"""
by_health = people.select(0, 2).group(0, sum).relabeled(1, 'count')
display_grouped_icon_array(by_health, "people who've taken a cancer test")
"""
Explanation: Now let's think about how you can use this kind of information when you're tested for cancer.
Before you know any information about yourself, you could imagine yourself as a uniform random sample of one of the 10,000 people in this imaginary population of people who have been tested.
What's the chance that you have cancer, knowing nothing else about yourself? It's $\frac{100}{10000}$, or 1%. We can see that more directly with this icon array:
End of explanation
"""
# We first made an icon array in the 2 lines below.
by_test = ...
display_grouped_icon_array(by_test, "people who've taken a cancer test")
# Fill in the probabiliy of having a positive test result.
probability_positive_test = ...
_ = tests.grade("q313")
"""
Explanation: Question 3.1.3. What's the chance that you have a positive test result, knowing nothing else about yourself?
Hint: Make an icon array.
End of explanation
"""
# Just run this cell.
display_grouped_icon_array(people.where("test status", are.equal_to("positive")).drop(1), "people who have a positive test result")
"""
Explanation: 3.2. Interpreting test results
Suppose you have a positive test result. This means you can now narrow yourself down to being part of one of two groups:
1. The people with cancer who have a positive test result.
2. The people without cancer who have a positive test result.
Here's an icon array for those two groups:
End of explanation
"""
# Set this to one of the numbers above.
rough_prob_sick_given_positive = ...
_ = tests.grade("q321")
"""
Explanation: The conditional probability that you have cancer given your positive test result is the chance that you're in the first group, assuming you're in one of these two groups.
Question 3.2.1. Eyeballing it, is the conditional probability that you have cancer given your positive test result closest to:
1. 9/10
2. 2/3
3. 1/2
4. 1/3
5. 1/100
End of explanation
"""
prob_sick_given_positive = ...
prob_sick_given_positive
_ = tests.grade("q322")
"""
Explanation: Question 3.2.2. Now write code to calculate that probability exactly, using the original contingency table you wrote.
End of explanation
"""
# The full icon array is given here for your convenience.
# Write your answer in the next cell.
display_grouped_icon_array(people, "people who've taken a cancer test")
"""
Explanation: Question 3.2.3. Look at the full icon array again. Using that, how would you compute (roughly) the conditional probability of cancer given a positive test?
End of explanation
"""
# Hint: You may find these two tables useful:
has_cancer = cancer.where("cancer status", are.equal_to("sick"))
no_cancer = cancer.where("cancer status", are.equal_to("healthy"))
X = .01
Y = ...
Z = ...
print('X:', X, ' Y:', Y, ' Z:', Z)
_ = tests.grade("q41")
# For your convenience, you can run this cell to run all the tests at once!
import os
_ = [tests.grade(q[:-3]) for q in os.listdir("tests") if q.startswith('q')]
# Run this cell to submit your work *after* you have passed all of the test cells.
# It's ok to run this cell multiple times. Only your final submission will be scored.
!TZ=America/Los_Angeles jupyter nbconvert --output=".lab10_$(date +%m%d_%H%M)_submission.html" lab10.ipynb
"""
Explanation: Write your answer here, replacing this text.
Question 3.2.4. Is your answer to question 3.2.2 bigger than the overall proportion of people in the population who have cancer? Does that make sense?
Write your answer here, replacing this text.
4. Tree diagrams
A tree diagram is another useful visualization for conditional probability. It is easiest to draw a tree diagram when the probabilities are presented in a slightly different way. For example, people often summarize the information in your cancer table using 3 numbers:
The overall probability of having cancer is X. (This is called the base rate or marginal probability of the disease.)
Given that you have cancer, the probability of a positive test result is Y. (This is called the sensitivity of the test. Higher values of Y mean the test is more useful.)
Given that you don't have cancer, the probability of a positive test result is Z. (This is called the false positive rate of the test. Higher values of Z mean the test is less useful.)
This corresponds to this tree diagram:
/\
/ \
1-X / \ X
/ \
no cancer cancer
/ \1-Z / \
Z / \ Y/ \ 1-Y
/ \ / \
+ - + -
You already saw that the base rate of cancer (which we'll call X for short) was .01 in the previous section. Y and Z can be computed using the same method you used to compute the conditional probability of cancer given a positive test result.
Question 4.1. Compute Y and Z for the data in section 3. You can use an icon array or compute them only with code. You can run the tests to see the right answers.
End of explanation
"""
|
allandieguez/teaching | Matplotlib e Seaborn/Modulo 2 - Scatter Plot + Text.ipynb | gpl-3.0 | import numpy as np
import os
import pandas as pd
""" habilitando plots no notebook """
%matplotlib inline
""" plot libs """
import matplotlib.pyplot as plt
import seaborn as sns
""" Configurando o Matplotlib para o modo manual """
plt.interactive(False)
"""
Explanation: Módulo 2: Scatter Plot + Text
Tutorial
Imports
End of explanation
"""
""" Dataset com Distribuição Normal 2D """
d1 = pd.DataFrame(
columns=["x", "y"],
data=np.random.randn(20, 2) + np.array([5, 5])
)
d2 = pd.DataFrame(
columns=["x", "y"],
data=np.random.randn(30, 2) + np.array([1, 2])
)
"""
Explanation: Scatter Plot
Variáveis
End of explanation
"""
plt.figure(figsize=(12, 8))
plt.scatter(d1.x, d1.y)
plt.show()
"""
Explanation: Plot dos datasets
scatter plot simples
End of explanation
"""
plt.figure(figsize=(12, 8))
plt.scatter(
d1.x, d1.y, # pares de coordenadas (x, y)
c="darkorange", # cor
s=100, # tamanho em pixels
marker="s" # simbolo a ser usado
)
plt.show()
"""
Explanation: Customizando formas, cores e tamanho
End of explanation
"""
plt.figure(figsize=(12, 8))
plt.scatter(d1.x, d1.y, c="darkorange", s=100, marker="s", label="golden squares")
plt.scatter(d2.x, d2.y, c="purple", s=200, marker="*", label="purple stars")
plt.legend() # captura os nomes das series em 'label'
plt.show()
"""
Explanation: Adicionando mais um dataset
End of explanation
"""
""" inicialização como antes """
plt.figure(figsize=(12, 8))
""" plt.gca() retorna a janela mais recente """
d1.plot(
ax=plt.gca(),
kind="scatter",
x="x", y="y",
c="k",
s=100,
marker="o",
label="black circles"
)
d2.plot(
ax=plt.gca(),
kind="scatter",
x="x", y="y",
c="cyan",
s=200,
marker="v",
label="cyan triangles"
)
""" visualizar com legenda """
plt.legend()
plt.show()
"""
Explanation: Facilidades no Pandas
End of explanation
"""
df = pd.DataFrame(
columns=["S1", "S2", "S3", "S4"],
data=np.random.randn(100, 4)
)
df["S2"] += 2 * df.S1
df["S3"] -= 2 * df.S2
df.describe()
""" Tabela de Correlação """
df.corr().unstack().drop_duplicates()
""" Visualização Simples """
df.corr().unstack().drop_duplicates()
"""
Explanation: Case: Visualização da Correlação
Correlação de Pearson:
Medida de quanto duas séries numéricas alinhadas, i.e., o quanto elas variam em relação uma à outra em comparação do quanto elas variam em relação a si mesmas.
Mais infos aqui: <a href="https://pt.wikipedia.org/wiki/Coeficiente_de_correla%C3%A7%C3%A3o_de_Pearson" target=_blank> https://pt.wikipedia.org/wiki/Coeficiente_de_correla%C3%A7%C3%A3o_de_Pearson <a/>
Matematicamente...
... é a covariância dividida pelo produto dos desvios padrão de ambas.
Também pode ser pensada como o cosseno do ângulo entre dois vetores multidimensionais.
A fórmula é dada por:
<img src="images/correlacao_pearson_formula.svg">
Na prática:
Número (float) entre -1 e 1 indicando (fonte: Wikipédia):
* 0.9 para mais ou para menos indica uma correlação muito forte.
* 0.7 a 0.9 positivo ou negativo indica uma correlação forte.
* 0.5 a 0.7 positivo ou negativo indica uma correlação moderada.
* 0.3 a 0.5 positivo ou negativo indica uma correlação fraca.
* 0 a 0.3 positivo ou negativo indica uma correlação desprezível.
Séries e suas Correlações
End of explanation
"""
""" Visualização por Scatter Plot """
pd.plotting.scatter_matrix(df, s=400, color="red", figsize=(13,13))
plt.show()
"""
Explanation: Visualização com Pandas + Matplotlib
End of explanation
"""
""" Visualização por Scatter Plot """
sns.pairplot(df)
plt.show()
"""
Explanation: Visualização com Pandas + Seaborn
End of explanation
"""
""" Plot Simples """
plt.figure(figsize=(12,8))
plt.text(x=0.5, y=0.5, s="Texto a ser mostrado")
plt.show()
""" Plot Simples """
plt.figure(figsize=(12,8))
plt.text(x=1, y=1, s="Texto a ser mostrado")
plt.show()
"""
Explanation: Text Plot
Posicionando Texto na Visualização
End of explanation
"""
plt.figure(figsize=(12,8))
font = {
'family': 'serif',
'color': 'darkred',
'weight': 'bold',
'size': 26,
}
plt.text(x=0.5, y=0.5, s="Texto a ser mostrado", fontdict=font)
plt.show()
"""
Explanation: Aumentando a fonte
End of explanation
"""
plt.figure(figsize=(12,8))
plt.xlim(-10, 10)
plt.ylim(-10, 10)
font = {
'family': 'serif',
'color': 'darkred',
'weight': 'bold',
'size': 26,
}
plt.text(x=-5, y=-2, s="Texto a ser mostrado", fontdict=font)
plt.show()
"""
Explanation: Mudando a Escala
End of explanation
"""
plt.figure(figsize=(12,8))
plt.xlim(-10, 10)
plt.ylim(-10, 10)
font = {
'family': 'serif',
'color': 'darkred',
'weight': 'bold',
'size': 26,
}
plt.text(x=-5, y=-2, s="Texto a ser mostrado", fontdict=font)
plt.xlabel("X Axis", fontdict=font)
plt.ylabel("Y Axis", fontdict=font)
plt.show()
"""
Explanation: Aplicando 'fontdict' em outros textos
End of explanation
"""
plt.figure(figsize=(12,8))
plt.xlim(-10, 10)
plt.ylim(-10, 10)
font = {
'family': 'serif',
'color': 'darkred',
'weight': 'bold',
'size': 26,
}
plt.text(x=-5, y=-2, s="Texto a ser mostrado", fontdict=font)
plt.scatter(x=-5, y=-2, s=400, c="darkorange", marker="o")
plt.xlabel("X Axis", fontdict=font)
plt.ylabel("Y Axis", fontdict=font)
plt.show()
"""
Explanation: Juntando com Scatter Plot
End of explanation
"""
""" Tamanho do Dataset """
df = pd.read_csv(
os.path.join("data", "produtos_ecommerce.csv"),
sep=";",
encoding="utf-8"
)
df[["coord_x", "coord_y"]] = df[["coord_x", "coord_y"]].astype(float)
df.head()
""" Tamanho da tabela """
df.shape
""" Contagem de Produtos por Categoria"""
df.label.value_counts()
"""
Explanation: Desafio
Objetivo:
Visualizar dados de um problema de classificação de produtos de e-commerce projetados em 2D.
Dataset:
End of explanation
"""
""" Escreva a a Solução Aqui """
"""
Explanation: [ A ]: Visualizar Todo o Dataset
Plotar todos os produtos usando as coordenadas coord_x e coord_y.
Cada categoria deve ser representada por uma cor diferente.
A figura deve ter uma legenda mostrando as categorias e suas cores.
End of explanation
"""
""" Escreva a a Solução Aqui """
"""
Explanation: [ B ]: Visualizar uma amostra com o texto
Plotar duas categorias, apenas 10 produtos de cada, usando o campo product para plotar também o texto.
A figura deve manter a legenda mostrando apenas as duas categorias e suas cores.
End of explanation
"""
|
shead-custom-design/pipecat | docs/gps-receivers.ipynb | gpl-3.0 | # nbconvert: hide
from __future__ import absolute_import, division, print_function
import sys
sys.path.append("../features/steps")
import test
socket = test.mock_module("socket")
path = "../data/gps"
client = "172.10.0.20"
socket.socket().recvfrom.side_effect = test.recvfrom_file(path=path, client=client, stop=6)
import pipecat.record
import pipecat.udp
pipe = pipecat.udp.receive(address=("0.0.0.0", 7777), maxsize=1024)
for record in pipe:
pipecat.record.dump(record)
"""
Explanation: .. gps-receivers:
GPS Receivers
Most GPS receivers have data logging capabilities that you can use with Pipecat to view navigational information. Some receivers connect to your computer via a serial port or a serial-over-USB cable that acts like a traditional serial port. Others can push data to a network socket. For this demonstration, we will receive GPS data sent from an iPhone to a UDP socket:
End of explanation
"""
# nbconvert: hide
socket.socket().recvfrom.side_effect = test.recvfrom_file(path=path, client=client, stop=6)
import pipecat.device.gps
pipe = pipecat.udp.receive(address=("0.0.0.0", 7777), maxsize=1024)
pipe = pipecat.device.gps.nmea(pipe, key="message")
for record in pipe:
pipecat.record.dump(record)
"""
Explanation: Here, we used :func:pipecat.udp.receive to open a UDP socket listening on port 7777 on all available network interfaces ("0.0.0.0") and convert the received messages into Pipecat :ref:records, which we dump to the console. Note that each record includes the address of the client (the phone in this case), along with a "message" field containing the raw data of the message. In this case the raw data is in NMEA format, a widely-used standard for exchanging navigational data. To decode the contents of each message, we add the appropriate Pipecat device to the end of the pipe:
End of explanation
"""
# nbconvert: hide
socket.socket().recvfrom.side_effect = test.recvfrom_file(path=path, client=client, start=100, stop=110)
pipe = pipecat.udp.receive(address=("0.0.0.0", 7777), maxsize=1024)
pipe = pipecat.device.gps.nmea(pipe, key="message")
for record in pipe:
if "latitude" in record:
print("Latitude:", record["latitude"], "Longitude:", record["longitude"])
"""
Explanation: As you can see, :func:pipecat.device.gps.nmea has converted the raw NMEA messages into records containing human-readable navigational fields with appropriate physical units. Note that unlike the :ref:battery-chargers example, not every record produced by the GPS receiver has the same fields. The NMEA standard includes many different types of messages, and most GPS receivers will produce more than one type. This will increase the complexity of our code - for example, we might have to test for the presence of a field before extracting it from a record:
End of explanation
"""
# nbconvert: hide
socket.socket().recvfrom.side_effect = test.recvfrom_file(path=path, client=client, start=100, stop=120)
pipe = pipecat.udp.receive(address=("0.0.0.0", 7777), maxsize=1024)
pipe = pipecat.device.gps.nmea(pipe, key="message")
for record in pipe:
if record["id"] == "PASHR":
print("Pitch:", record["pitch"])
"""
Explanation: Alternatively, we might use the record id field to key our code off a specific type of NMEA message:
End of explanation
"""
# nbconvert: hide
socket.socket().recvfrom.side_effect = test.recvfrom_file(path=path, client=client, start=100, stop=120)
import pipecat.filter
pipe = pipecat.udp.receive(address=("0.0.0.0", 7777), maxsize=1024)
pipe = pipecat.device.gps.nmea(pipe, key="message")
pipe = pipecat.filter.keep(pipe, key="id", value="GPGLL")
for record in pipe:
pipecat.record.dump(record)
"""
Explanation: Another alternative would be to add a filter to our pipe so we only receive records that match some criteria:
End of explanation
"""
# nbconvert: hide
socket.socket().recvfrom.side_effect = test.recvfrom_file(path=path, client=client, start=100, stop=120)
pipe = pipecat.udp.receive(address=("0.0.0.0", 7777), maxsize=1024)
pipe = pipecat.device.gps.nmea(pipe, key="message")
for record in pipe:
if "speed" in record:
print(record["speed"].to(pipecat.units.mph))
"""
Explanation: Note that :func:pipecat.filter.keep discards all records that don't meet the given criteria, which allows our downstream code to rely on the availability of specific fields.
Regardless of the logic you employ to identify fields of interest, Pipecat always makes it easy to convert units safely and explicitly:
End of explanation
"""
# nbconvert: hide
socket.socket().recvfrom.side_effect = test.recvfrom_file(path=path, client=client, start=100, stop=115)
import pipecat.utility
pipe = pipecat.udp.receive(address=("0.0.0.0", 7777), maxsize=1024)
pipe = pipecat.device.gps.nmea(pipe, key="message")
pipe = pipecat.filter.keep(pipe, key="id", value="GPGLL")
pipe = pipecat.utility.add_field(pipe, "serial", "1237V")
for record in pipe:
pipecat.record.dump(record)
"""
Explanation: Let's explore other things we can do with our pipe. To begin, you might want to add additional metadata to the records returned from a device. For example, if you were collecting data from multiple devices you might want to "tag" records with a user-specific unique identifier:
End of explanation
"""
# nbconvert: hide
socket.socket().recvfrom.side_effect = test.recvfrom_file(path=path, client=client)
import pipecat.store
pipe = pipecat.udp.receive(address=("0.0.0.0", 7777), maxsize=1024)
pipe = pipecat.device.gps.nmea(pipe, key="message")
pipe = pipecat.store.cache(pipe)
for record in pipe:
pass
print(pipe.table["speed"])
"""
Explanation: Now let's consider calculating some simple statistics, such as our average speed on a trip. When we iterate over the contents of a pipe using a for loop, we receive one record at-a-time until the pipe is empty. We could keep track of a "running" average during iteration, and there are use-cases where that is the best way to solve the problem. However, for moderately-sized data, Pipecat provides a more convenient approach:
End of explanation
"""
print("Average speed:", pipe.table["speed"].mean().to(pipecat.units.mph))
"""
Explanation: Here, :func:pipecat.store.cache creates an in-memory cache that stores every record it receives. We have a do-nothing for loop that reads data from the charger to populate the cache. Once that's complete, we can use the cache table attribute to retrieve data from the cache using the same keys and syntax we would use with a record. Unlike a record, the cache returns every value for a given key at once (using a Numpy array), which makes it easy to compute the statistics we're interested in:
End of explanation
"""
import toyplot
canvas = toyplot.Canvas(width=600, height=400)
axes = canvas.cartesian(grid=(2, 1, 0), xlabel="Record #", ylabel="Speed (MPH)")
axes.plot(pipe.table["speed"].to(pipecat.units.mph))
axes = canvas.cartesian(grid=(2, 1, 1), xlabel="Record #", ylabel="Track")
axes.plot(pipe.table["track"]);
"""
Explanation: Consolidating fields using the cache is also perfect for generating plots with a library like Toyplot (http://toyplot.readthedocs.io):
End of explanation
"""
|
vinitsamel/udacitydeeplearning | first-neural-network/Your_first_neural_network.ipynb | mit | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
End of explanation
"""
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
"""
Explanation: Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
End of explanation
"""
rides[:24*10].plot(x='dteday', y='cnt')
"""
Explanation: Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
End of explanation
"""
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
"""
Explanation: Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().
End of explanation
"""
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
"""
Explanation: Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
End of explanation
"""
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
"""
Explanation: Splitting the data into training, testing, and validation sets
We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
End of explanation
"""
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
"""
Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
End of explanation
"""
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = error * self.weights_hidden_to_output.T * (hidden_outputs * (1 - hidden_outputs))
# Weight step (input to hidden)
delta_weights_i_h += hidden_error * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += error * hidden_outputs[:, None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
"""
Explanation: Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
<img src="assets/neural_network.png" width=300px>
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
End of explanation
"""
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
"""
Explanation: Unit tests
Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.
End of explanation
"""
import sys
### Set the hyperparameters here ###
### Commented code used to identify hyper params - iterations and learning rate. Psuedo GridSearchCV type.
#iterations_list = [600,650,700]
#learning_rate_list = [1.6, 1.7, 1.8, 1.85, 1.9]
iterations = 1800
learning_rate = 1.7
hidden_nodes = 8
output_nodes = 1
#min_iter = 1000
#min_lr = 5
#min_val_loss = 1.0
#for iterations in iterations_list:
# for learning_rate in learning_rate_list:
#### Original Code Start
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
#### Original Code End
# if val_loss < min_val_loss:
# min_lr = learning_rate
# min_iter = iterations
# min_val_loss = val_loss
#print ("Learning Rate =", min_lr)
#print ("Iterations =", min_iter)
#print ("Validation Loss =", min_val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
"""
Explanation: Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of iterations
This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
"""
Explanation: Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.24/_downloads/81e58e463fcd949fd4ab7ab7ab8ef317/left_cerebellum_volume_source.ipynb | bsd-3-clause | # Author: Alan Leggitt <alan.leggitt@ucsf.edu>
#
# License: BSD-3-Clause
import os.path as op
import mne
from mne import setup_source_space, setup_volume_source_space
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'sample'
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
"""
Explanation: Generate a left cerebellum volume source space
Generate a volume source space of the left cerebellum and plot its vertices
relative to the left cortical surface source space and the FreeSurfer
segmentation file.
End of explanation
"""
# setup a cortical surface source space and extract left hemisphere
surf = setup_source_space(subject, subjects_dir=subjects_dir, add_dist=False)
lh_surf = surf[0]
# setup a volume source space of the left cerebellum cortex
volume_label = 'Left-Cerebellum-Cortex'
sphere = (0, 0, 0, 0.12)
lh_cereb = setup_volume_source_space(
subject, mri=aseg_fname, sphere=sphere, volume_label=volume_label,
subjects_dir=subjects_dir, sphere_units='m')
# Combine the source spaces
src = surf + lh_cereb
"""
Explanation: Setup the source spaces
End of explanation
"""
fig = mne.viz.plot_alignment(subject=subject, subjects_dir=subjects_dir,
surfaces='white', coord_frame='mri',
src=src)
mne.viz.set_3d_view(fig, azimuth=180, elevation=90,
distance=0.30, focalpoint=(-0.03, -0.01, 0.03))
"""
Explanation: Plot the positions of each source space
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.19/_downloads/e5c0288e15772e4fb31189b766e9d7be/plot_metadata_epochs.ipynb | bsd-3-clause | # Authors: Chris Holdgraf <choldgraf@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import mne
import numpy as np
import matplotlib.pyplot as plt
# Load the data from the internet
path = mne.datasets.kiloword.data_path() + '/kword_metadata-epo.fif'
epochs = mne.read_epochs(path)
# The metadata exists as a Pandas DataFrame
print(epochs.metadata.head(10))
"""
Explanation: Pandas querying and metadata with Epochs objects
Demonstrating pandas-style string querying with Epochs metadata.
For related uses of :class:mne.Epochs, see the starting tutorial
tut-epochs-class.
Sometimes you may have a complex trial structure that cannot be easily
summarized as a set of unique integers. In this case, it may be useful to use
the metadata attribute of :class:mne.Epochs objects. This must be a
:class:pandas.DataFrame where each row corresponds to an epoch, and each
column corresponds to a metadata attribute of each epoch. Columns must
contain either strings, ints, or floats.
In this dataset, subjects were presented with individual words
on a screen, and the EEG activity in response to each word was recorded.
We know which word was displayed in each epoch, as well as
extra information about the word (e.g., word frequency).
Loading the data
First we'll load the data. If metadata exists for an :class:mne.Epochs
fif file, it will automatically be loaded in the metadata attribute.
End of explanation
"""
av1 = epochs['Concreteness < 5 and WordFrequency < 2'].average()
av2 = epochs['Concreteness > 5 and WordFrequency > 2'].average()
joint_kwargs = dict(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
av1.plot_joint(show=False, **joint_kwargs)
av2.plot_joint(show=False, **joint_kwargs)
"""
Explanation: We can use this metadata attribute to select subsets of Epochs. This
uses the Pandas :meth:pandas.DataFrame.query method under the hood.
Any valid query string will work. Below we'll make two plots to compare
between them:
End of explanation
"""
words = ['film', 'cent', 'shot', 'cold', 'main']
epochs['WORD in {}'.format(words)].plot_image(show=False)
"""
Explanation: Next we'll choose a subset of words to keep.
End of explanation
"""
epochs['cent'].average().plot(show=False, time_unit='s')
"""
Explanation: Note that traditional epochs sub-selection still works. The traditional
MNE methods for selecting epochs will supersede the rich metadata querying.
End of explanation
"""
# Create two new metadata columns
metadata = epochs.metadata
is_concrete = metadata["Concreteness"] > metadata["Concreteness"].median()
metadata["is_concrete"] = np.where(is_concrete, 'Concrete', 'Abstract')
is_long = metadata["NumberOfLetters"] > 5
metadata["is_long"] = np.where(is_long, 'Long', 'Short')
epochs.metadata = metadata
"""
Explanation: Below we'll show a more involved example that leverages the metadata
of each epoch. We'll create a new column in our metadata object and use
it to generate averages for many subsets of trials.
End of explanation
"""
query = "is_long == '{0}' & is_concrete == '{1}'"
evokeds = dict()
for concreteness in ("Concrete", "Abstract"):
for length in ("Long", "Short"):
subset = epochs[query.format(length, concreteness)]
evokeds["/".join((concreteness, length))] = list(subset.iter_evoked())
# For the actual visualisation, we store a number of shared parameters.
style_plot = dict(
colors={"Long": "Crimson", "Short": "Cornflowerblue"},
linestyles={"Concrete": "-", "Abstract": ":"},
split_legend=True,
ci=.68,
show_sensors='lower right',
legend='lower left',
truncate_yaxis="auto",
picks=epochs.ch_names.index("Pz"),
)
fig, ax = plt.subplots(figsize=(6, 4))
mne.viz.plot_compare_evokeds(evokeds, axes=ax, **style_plot)
plt.show()
"""
Explanation: Now we can quickly extract (and plot) subsets of the data. For example, to
look at words split by word length and concreteness:
End of explanation
"""
letters = epochs.metadata["NumberOfLetters"].unique().astype(int).astype(str)
evokeds = dict()
for n_letters in letters:
evokeds[n_letters] = epochs["NumberOfLetters == " + n_letters].average()
style_plot["colors"] = {n_letters: int(n_letters)
for n_letters in letters}
style_plot["cmap"] = ("# of Letters", "viridis_r")
del style_plot['linestyles']
fig, ax = plt.subplots(figsize=(6, 4))
mne.viz.plot_compare_evokeds(evokeds, axes=ax, **style_plot)
plt.show()
"""
Explanation: To compare words which are 4, 5, 6, 7 or 8 letters long:
End of explanation
"""
evokeds = dict()
query = "is_concrete == '{0}' & NumberOfLetters == {1}"
for concreteness in ("Concrete", "Abstract"):
for n_letters in letters:
subset = epochs[query.format(concreteness, n_letters)]
evokeds["/".join((concreteness, n_letters))] = subset.average()
style_plot["linestyles"] = {"Concrete": "-", "Abstract": ":"}
fig, ax = plt.subplots(figsize=(6, 4))
mne.viz.plot_compare_evokeds(evokeds, axes=ax, **style_plot)
plt.show()
"""
Explanation: And finally, for the interaction between concreteness and continuous length
in letters:
End of explanation
"""
data = epochs.get_data()
metadata = epochs.metadata.copy()
epochs_new = mne.EpochsArray(data, epochs.info, metadata=metadata)
"""
Explanation: <div class="alert alert-info"><h4>Note</h4><p>Creating an :class:`mne.Epochs` object with metadata is done by passing
a :class:`pandas.DataFrame` to the ``metadata`` kwarg as follows:</p></div>
End of explanation
"""
|
yingchi/fastai-notes | deeplearning1/rnn/rnn-modu.ipynb | apache-2.0 | from theano.sandbox import cuda
cuda.use('gpu1')
%matplotlib inline
import utils;
from utils import *
from keras.layers import TimeDistributed, Activation
from keras.callbacks import ModelCheckpoint
from numpy.random import choice
"""
Explanation: Auto Generate Text for <<默读>>
End of explanation
"""
path = 'text/modu.txt'
text = open(path).read()
text = text.replace(' ', '')
text = text[-200000:]
print('corpus length:', len(text))
!tail {path} -n10
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars: ', vocab_size)
"""
Explanation: Setup
End of explanation
"""
chars.insert(0, "\0")
''.join(chars[:16])
char_indices = dict((c, i) for i,c in enumerate(chars))
indices_char = dict((i, c) for i,c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:20])
"""
Explanation: Sometimes it's useful to have a zero value in the dataset, e.g. for padding
End of explanation
"""
seq_length = 100
dataX = []
dataY = []
for i in range(0, len(idx) - seq_length, 1):
seq_in = idx[i:i+seq_length]
seq_out = idx[i+seq_length]
dataX.append(seq_in)
dataY.append(seq_out)
n_patterns = len(dataX)
n_patterns
"""
Explanation: Our LSTM RNN!
Now, we will try to implement the typical structure of RNN - i.e. the rolled one.
That is, we cannot use c1, c2, c.... Instead, we will need an array of inputs all at once.
End of explanation
"""
X = np.reshape(dataX, (n_patterns, seq_length, 1))
print(X.shape)
X = X / float(vocab_size)
y = np_utils.to_categorical(dataY)
print(y.shape)
"""
Explanation: Now that we have prepared our training data we need to transform it so that is it suitable for use with Keras.
First we must transform the list of input sequences into the form [samples, time steps, features] expected by an LSTM network
Next, we need to rescale the integers to [0, 1] to make the patterns easiers to learn by the LSTM network that uses the sigmoid activation function by default
Finally, we need to convert the output patterns into one-hot encoding. This is so that we can configure the network to predict the probability of each of the 47 different characters in the vocabulary (an easier representation) rather than trying to force it to predict precisely the next character.
End of explanation
"""
model = Sequential()
model.add(LSTM(512, input_shape=(X.shape[1], X.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam())
"""
Explanation: We can now define our LSTM model. Here we define a single hidden LSTM layer with 256 memory units. The network uses dropout with a probability of 20. The output layer is a Dense layer using the softmax activation function to output a probability prediction for each of the 3000+ characters between 0 and 1.
End of explanation
"""
# define the checkpoint
filepath = "weights-improvement-{epoch:02d}-{loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.summary()
model.fit(X, y, nb_epoch=4, batch_size=256, callbacks=callbacks_list)
# pick a random seed
start = np.random.randint(0, len(dataX)-1)
# start=-1
pattern = dataX[start]
print("Seed:")
print("\"", ''.join([indices_char[value] for value in pattern]), "\"")
# generate characters
for i in range(1000):
x = np.reshape(pattern, (1, len(pattern), 1))
x = x / float(n_vocab)
prediction = model.predict(x, verbose=0)
index = np.argmax(prediction)
result = indices_char[index]
seq_in = [indices_char[value] for value in pattern]
sys.stdout.write(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
print "\nDone."
"""
Explanation: The network is slow to train (about 300 seconds per epoch on an Nvidia K520 GPU). Because of the slowness and because of our optimization requirements, we will use model checkpointing to record all of the network weights to file each time an improvement in loss is observed at the end of the epoch. We will use the best set of weights (lowest loss) to instantiate our generative model in the next section.
End of explanation
"""
bs=64
model=Sequential([
Embedding(vocab_size, n_fac, input_length=cs, batch_input_shape=(bs,cs)),
BatchNormalization(),
LSTM(n_hidden, return_sequences=True, stateful=True),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
"""
Explanation: Stateful model with keras
stateful=True means that at end of each sequence, don't reset the hidden activations to 0, but leave them as they are. And also make sure that you pass shuffle=False when you train the model.
A stateful model is easy to create (just add "stateful=True") but harder to train. We had to add batchnorm and use LSTM to get reasonable results.
When using stateful in keras, you have to also add 'batch_input_shape' to the first layer, and fix the batch size there.
End of explanation
"""
mx = len(x_rnn)//bs*bs
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=10, shuffle=False)
"""
Explanation: Since we're using a fixed batch shape, we have to ensure our inputs and outputs are a even multiple of the batch size.
End of explanation
"""
def get_next_keras(inp):
idxs = [char_indices[c] for c in inp]
# np.newaxis is used to add 1 more dimention
arrs = np.array(idxs)[np.newaxis, :]
p = model.predict(arrs)[0]
return chars[np.argmax(p)]
model.predict(x_rnn[-64:])[0]
"""
Explanation: Test model
End of explanation
"""
|
tpin3694/tpin3694.github.io | python/pandas_select_rows_when_column_has_certain_values.ipynb | mit | # Import modules
import pandas as pd
# Set ipython's max row display
pd.set_option('display.max_row', 1000)
# Set iPython's max column width to 50
pd.set_option('display.max_columns', 50)
"""
Explanation: Title: Select Rows When Columns Contain Certain Values
Slug: pandas_select_rows_when_column_has_certain_values
Summary: Select Rows When Columns Contain Certain Values
Date: 2016-05-01 12:00
Category: Python
Tags: Data Wrangling
Authors: Chris Albon
Preliminaries
End of explanation
"""
# Create an example dataframe
data = {'name': ['Jason', 'Molly', 'Tina', 'Jake', 'Amy'],
'year': [2012, 2012, 2013, 2014, 2014],
'reports': [4, 24, 31, 2, 3]}
df = pd.DataFrame(data, index = ['Cochice', 'Pima', 'Santa Cruz', 'Maricopa', 'Yuma'])
df
"""
Explanation: Create an example dataframe
End of explanation
"""
value_list = ['Tina', 'Molly', 'Jason']
#Grab DataFrame rows where column has certain values
df[df.name.isin(value_list)]
#Grab DataFrame rows where column doesn't have certain values
df[~df.name.isin(value_list)]
"""
Explanation: Grab rows based on column values
End of explanation
"""
|
InsightLab/data-science-cookbook | 2019/12-spark/12-spark-intro/Actions.ipynb | mit | data = sc.parallelize(range(1, 11))
def summation(a, b): return a + b
def max(a, b): return a if a > b else b
# reduce to find the sum
print (data.reduce(summation))
# reduce to find the max
print (data.reduce(max))
"""
Explanation: reduce(func)
Agrega os elementos do RDD usando uma função func (que leva dois argumentos e retorna um). A função deve ser comutativa e associativa para que possa ser computada corretamente em paralelo.
Exemplo:
End of explanation
"""
data = sc.parallelize(range(1, 1001))
print 'data: ', data.filter(lambda i: i % 10 == 0).collect()
"""
Explanation: collect()
Retornar todos os elementos do RDD como uma lista (do python, no caso). Isso geralmente é útil após um filtro ou outra operação que retorna um subconjunto suficientemente pequeno dos dados.
Exemplo:
End of explanation
"""
data = sc.parallelize(range(1, 1001))
print ('data: ', data.count())
print ('data filtered: ', data.filter(lambda i: i % 10 == 0).count())
"""
Explanation: count()
Retorna o número de elementos no RDD.
Exemplo:
End of explanation
"""
data = sc.parallelize(range(1, 1001))
print ('data first 10: ', data.take(10))
"""
Explanation: take(n)
Retorna uma lista com os primeiros n elementos do RDD.
Exemplo:
End of explanation
"""
data = sc.parallelize(range(1, 11))
# sampling with replacement
print (data.takeSample(True, 4))
# sampling without replacement
print (data.takeSample(False, 4))
"""
Explanation: takeSample(withReplacement, num, [seed])
Retorna uma lista com uma amostra aleatória de num elementos do RDD, com ou sem substituição (withReplacement), opcionalmente pré-especificando uma semente (seed) de gerador de números aleatórios.
Exemplo:
End of explanation
"""
data = sc.parallelize(range(1, 1001))
data.saveAsTextFile("range_1_to_100")
# Visualize o diretório deste notebook. Existirá um pasta com o nome range_1_to_100.
# Nela haverá arquivos que do RDD. Cada partição do processamento tem um arquivo correspondente.
"""
Explanation: saveAsTextFile(path)
Escreve os elementos do RDD como um arquivo de texto (ou conjunto de arquivos de texto) em um determinado diretório no sistema de arquivos local, no HDFS ou qualquer outro sistema de arquivos suportado pelo Hadoop. O Spark chamará toString em cada elemento para convertê-lo em uma linha de texto no arquivo.
Exemplo:
End of explanation
"""
data = sc.parallelize(range(1, 1001))
# accumulator
summation_20_mult = sc.accumulator(0)
def conditional_print(i):
if i % 20 == 0:
print(i)
summation_20_mult.add(i)
data.foreach(conditional_print)
print ('Summation of multiple of 20 from 1 to 1000: ', summation_20_mult.value)
"""
Explanation: foreach(func)
Executa uma função func para cada elemento do RDD. Isso geralmente é feito para operações sem tanto efeito, como atualizar um acumulador ou interagir com sistemas de armazenamento externo.
Exemplo:
End of explanation
"""
|
keras-team/autokeras | docs/ipynb/load.ipynb | apache-2.0 | dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" # noqa: E501
local_file_path = tf.keras.utils.get_file(
origin=dataset_url, fname="image_data", extract=True
)
# The file is extracted in the same directory as the downloaded file.
local_dir_path = os.path.dirname(local_file_path)
# After check mannually, we know the extracted data is in 'flower_photos'.
data_dir = os.path.join(local_dir_path, "flower_photos")
print(data_dir)
"""
Explanation: Load Images from Disk
If the data is too large to put in memory all at once, we can load it batch by
batch into memory from disk with tf.data.Dataset. This
function
can help you build such a tf.data.Dataset for image data.
First, we download the data and extract the files.
End of explanation
"""
batch_size = 32
img_height = 180
img_width = 180
train_data = ak.image_dataset_from_directory(
data_dir,
# Use 20% data as testing data.
validation_split=0.2,
subset="training",
# Set seed to ensure the same split when loading testing data.
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
test_data = ak.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
"""
Explanation: The directory should look like this. Each folder contains the images in the
same class.
flowers_photos/
daisy/
dandelion/
roses/
sunflowers/
tulips/
We can split the data into training and testing as we load them.
End of explanation
"""
clf = ak.ImageClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=1)
print(clf.evaluate(test_data))
"""
Explanation: Then we just do one quick demo of AutoKeras to make sure the dataset works.
End of explanation
"""
dataset_url = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
local_file_path = tf.keras.utils.get_file(
fname="text_data",
origin=dataset_url,
extract=True,
)
# The file is extracted in the same directory as the downloaded file.
local_dir_path = os.path.dirname(local_file_path)
# After check mannually, we know the extracted data is in 'aclImdb'.
data_dir = os.path.join(local_dir_path, "aclImdb")
# Remove the unused data folder.
shutil.rmtree(os.path.join(data_dir, "train/unsup"))
"""
Explanation: Load Texts from Disk
You can also load text datasets in the same way.
End of explanation
"""
print(data_dir)
train_data = ak.text_dataset_from_directory(
os.path.join(data_dir, "train"), batch_size=batch_size
)
test_data = ak.text_dataset_from_directory(
os.path.join(data_dir, "test"), shuffle=False, batch_size=batch_size
)
clf = ak.TextClassifier(overwrite=True, max_trials=1)
clf.fit(train_data, epochs=2)
print(clf.evaluate(test_data))
"""
Explanation: For this dataset, the data is already split into train and test.
We just load them separately.
End of explanation
"""
N_BATCHES = 30
BATCH_SIZE = 100
N_FEATURES = 10
def get_data_generator(n_batches, batch_size, n_features):
"""Get a generator returning n_batches random data.
The shape of the data is (batch_size, n_features).
"""
def data_generator():
for _ in range(n_batches * batch_size):
x = np.random.randn(n_features)
y = x.sum(axis=0) / n_features > 0.5
yield x, y
return data_generator
dataset = tf.data.Dataset.from_generator(
get_data_generator(N_BATCHES, BATCH_SIZE, N_FEATURES),
output_types=(tf.float32, tf.float32),
output_shapes=((N_FEATURES,), tuple()),
).batch(BATCH_SIZE)
clf = ak.StructuredDataClassifier(overwrite=True, max_trials=1, seed=5)
clf.fit(x=dataset, validation_data=dataset, batch_size=BATCH_SIZE)
print(clf.evaluate(dataset))
"""
Explanation: Load Data with Python Generators
If you want to use generators, you can refer to the following code.
End of explanation
"""
|
hetaodie/hetaodie.github.io | assets/media/uda-ml/supervisedlearning/jc/为慈善机构寻找捐助者/charity_finish/charity/titanic_survival_exploration/titanic_survival_exploration.ipynb | mit | # 检查你的Python版本
from sys import version_info
if version_info.major != 2 and version_info.minor != 7:
raise Exception('请使用Python 2.7来完成此项目')
import numpy as np
import pandas as pd
# 数据可视化代码
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# 加载数据集
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# 显示数据列表中的前几项乘客数据
display(full_data.head())
"""
Explanation: 机器学习工程师纳米学位
机器学习基础
项目 0: 预测泰坦尼克号乘客生还率
1912年,泰坦尼克号在第一次航行中就与冰山相撞沉没,导致了大部分乘客和船员身亡。在这个入门项目中,我们将探索部分泰坦尼克号旅客名单,来确定哪些特征可以最好地预测一个人是否会生还。为了完成这个项目,你将需要实现几个基于条件的预测并回答下面的问题。我们将根据代码的完成度和对问题的解答来对你提交的项目的进行评估。
提示:这样的文字将会指导你如何使用 iPython Notebook 来完成项目。
点击这里查看本文件的英文版本。
了解数据
当我们开始处理泰坦尼克号乘客数据时,会先导入我们需要的功能模块以及将数据加载到 pandas DataFrame。运行下面区域中的代码加载数据,并使用 .head() 函数显示前几项乘客数据。
提示:你可以通过单击代码区域,然后使用键盘快捷键 Shift+Enter 或 Shift+ Return 来运行代码。或者在选择代码后使用播放(run cell)按钮执行代码。像这样的 MarkDown 文本可以通过双击编辑,并使用这些相同的快捷键保存。Markdown 允许你编写易读的纯文本并且可以转换为 HTML。
End of explanation
"""
# 从数据集中移除 'Survived' 这个特征,并将它存储在一个新的变量中。
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# 显示已移除 'Survived' 特征的数据集
display(data.head())
"""
Explanation: 从泰坦尼克号的数据样本中,我们可以看到船上每位旅客的特征
Survived:是否存活(0代表否,1代表是)
Pclass:社会阶级(1代表上层阶级,2代表中层阶级,3代表底层阶级)
Name:船上乘客的名字
Sex:船上乘客的性别
Age:船上乘客的年龄(可能存在 NaN)
SibSp:乘客在船上的兄弟姐妹和配偶的数量
Parch:乘客在船上的父母以及小孩的数量
Ticket:乘客船票的编号
Fare:乘客为船票支付的费用
Cabin:乘客所在船舱的编号(可能存在 NaN)
Embarked:乘客上船的港口(C 代表从 Cherbourg 登船,Q 代表从 Queenstown 登船,S 代表从 Southampton 登船)
因为我们感兴趣的是每个乘客或船员是否在事故中活了下来。可以将 Survived 这一特征从这个数据集移除,并且用一个单独的变量 outcomes 来存储。它也做为我们要预测的目标。
运行该代码,从数据集中移除 Survived 这个特征,并将它存储在变量 outcomes 中。
End of explanation
"""
def accuracy_score(truth, pred):
""" 返回 pred 相对于 truth 的准确率 """
# 确保预测的数量与结果的数量一致
if len(truth) == len(pred):
# 计算预测准确率(百分比)
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# 测试 'accuracy_score' 函数
predictions = pd.Series(np.ones(5, dtype = int)) #五个预测全部为1,既存活
print accuracy_score(outcomes[:5], predictions)
"""
Explanation: 这个例子展示了如何将泰坦尼克号的 Survived 数据从 DataFrame 移除。注意到 data(乘客数据)和 outcomes (是否存活)现在已经匹配好。这意味着对于任何乘客的 data.loc[i] 都有对应的存活的结果 outcome[i]。
计算准确率
为了验证我们预测的结果,我们需要一个标准来给我们的预测打分。因为我们最感兴趣的是我们预测的准确率,既正确预测乘客存活的比例。运行下面的代码来创建我们的 accuracy_score 函数以对前五名乘客的预测来做测试。
思考题:在前五个乘客中,如果我们预测他们全部都存活,你觉得我们预测的准确率是多少?
End of explanation
"""
def predictions_0(data):
""" 不考虑任何特征,预测所有人都无法生还 """
predictions = []
for _, passenger in data.iterrows():
# 预测 'passenger' 的生还率
predictions.append(0)
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_0(data)
"""
Explanation: 提示:如果你保存 iPython Notebook,代码运行的输出也将被保存。但是,一旦你重新打开项目,你的工作区将会被重置。请确保每次都从上次离开的地方运行代码来重新生成变量和函数。
最简单的预测
如果我们要预测泰坦尼克号上的乘客是否存活,但是我们又对他们一无所知,那么最好的预测就是船上的人无一幸免。这是因为,我们可以假定当船沉没的时候大多数乘客都遇难了。下面的 predictions_0 函数就预测船上的乘客全部遇难。
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: 问题1:对比真实的泰坦尼克号的数据,如果我们做一个所有乘客都没有存活的预测,这个预测的准确率能达到多少?
回答: 请用预测结果来替换掉这里的文字
提示:运行下面的代码来查看预测的准确率。
End of explanation
"""
survival_stats(data, outcomes, 'Sex')
"""
Explanation: 考虑一个特征进行预测
我们可以使用 survival_stats 函数来看看 Sex 这一特征对乘客的存活率有多大影响。这个函数定义在名为 titanic_visualizations.py 的 Python 脚本文件中,我们的项目提供了这个文件。传递给函数的前两个参数分别是泰坦尼克号的乘客数据和乘客的 生还结果。第三个参数表明我们会依据哪个特征来绘制图形。
运行下面的代码绘制出依据乘客性别计算存活率的柱形图。
End of explanation
"""
def predictions_1(data):
""" 只考虑一个特征,如果是女性则生还 """
predictions = []
for _, passenger in data.iterrows():
# TODO 1
# 移除下方的 'pass' 声明
# 输入你自己的预测条件
pass
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_1(data)
"""
Explanation: 观察泰坦尼克号上乘客存活的数据统计,我们可以发现大部分男性乘客在船沉没的时候都遇难了。相反的,大部分女性乘客都在事故中生还。让我们以此改进先前的预测:如果乘客是男性,那么我们就预测他们遇难;如果乘客是女性,那么我们预测他们在事故中活了下来。
将下面的代码补充完整,让函数可以进行正确预测。
提示:您可以用访问 dictionary(字典)的方法来访问船上乘客的每个特征对应的值。例如, passenger['Sex'] 返回乘客的性别。
End of explanation
"""
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
"""
Explanation: 问题2:当我们预测船上女性乘客全部存活,而剩下的人全部遇难,那么我们预测的准确率会达到多少?
回答: 用预测结果来替换掉这里的文字
提示:你需要在下面添加一个代码区域,实现代码并运行来计算准确率。
考虑两个特征进行预测
仅仅使用乘客性别(Sex)这一特征,我们预测的准确性就有了明显的提高。现在再看一下使用额外的特征能否更进一步提升我们的预测准确度。例如,综合考虑所有在泰坦尼克号上的男性乘客:我们是否找到这些乘客中的一个子集,他们的存活概率较高。让我们再次使用 survival_stats 函数来看看每位男性乘客的年龄(Age)。这一次,我们将使用第四个参数来限定柱形图中只有男性乘客。
运行下面这段代码,把男性基于年龄的生存结果绘制出来。
End of explanation
"""
def predictions_2(data):
""" 考虑两个特征:
- 如果是女性则生还
- 如果是男性并且小于10岁则生还 """
predictions = []
for _, passenger in data.iterrows():
# TODO 2
# 移除下方的 'pass' 声明
# 输入你自己的预测条件
pass
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_2(data)
"""
Explanation: 仔细观察泰坦尼克号存活的数据统计,在船沉没的时候,大部分小于10岁的男孩都活着,而大多数10岁以上的男性都随着船的沉没而遇难。让我们继续在先前预测的基础上构建:如果乘客是女性,那么我们就预测她们全部存活;如果乘客是男性并且小于10岁,我们也会预测他们全部存活;所有其它我们就预测他们都没有幸存。
将下面缺失的代码补充完整,让我们的函数可以实现预测。
提示: 您可以用之前 predictions_1 的代码作为开始来修改代码,实现新的预测函数。
End of explanation
"""
survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Age < 18"])
"""
Explanation: 问题3:当预测所有女性以及小于10岁的男性都存活的时候,预测的准确率会达到多少?
回答: 用预测结果来替换掉这里的文字
提示:你需要在下面添加一个代码区域,实现代码并运行来计算准确率。
你自己的预测模型
添加年龄(Age)特征与性别(Sex)的结合比单独使用性别(Sex)也提高了不少准确度。现在该你来做预测了:找到一系列的特征和条件来对数据进行划分,使得预测结果提高到80%以上。这可能需要多个特性和多个层次的条件语句才会成功。你可以在不同的条件下多次使用相同的特征。Pclass,Sex,Age,SibSp 和 Parch 是建议尝试使用的特征。
使用 survival_stats 函数来观测泰坦尼克号上乘客存活的数据统计。
提示: 要使用多个过滤条件,把每一个条件放在一个列表里作为最后一个参数传递进去。例如: ["Sex == 'male'", "Age < 18"]
End of explanation
"""
def predictions_3(data):
""" 考虑多个特征,准确率至少达到80% """
predictions = []
for _, passenger in data.iterrows():
# TODO 3
# 移除下方的 'pass' 声明
# 输入你自己的预测条件
pass
# 返回预测结果
return pd.Series(predictions)
# 进行预测
predictions = predictions_3(data)
"""
Explanation: 当查看和研究了图形化的泰坦尼克号上乘客的数据统计后,请补全下面这段代码中缺失的部分,使得函数可以返回你的预测。
在到达最终的预测模型前请确保记录你尝试过的各种特征和条件。
提示: 您可以用之前 predictions_2 的代码作为开始来修改代码,实现新的预测函数。
End of explanation
"""
|
LSSTC-DSFP/LSSTC-DSFP-Sessions | Sessions/Session13/Day2/02-Fast-GPs.ipynb | mit | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
from matplotlib import rcParams
rcParams["figure.dpi"] = 100
rcParams["figure.figsize"] = 12, 4
"""
Explanation: Fast GP implementations
End of explanation
"""
import numpy as np
np.random.seed(0)
t = np.linspace(0, 10, 10000)
y = np.random.randn(10000)
sigma = np.ones(10000)
"""
Explanation: Benchmarking GP codes
Implemented the right way, GPs can be super fast! Let's compare the time it takes to evaluate our GP likelihood and the time it takes to evaluate the likelihood computed with the snazzy george and celerite packages. We'll learn how to use both along the way. Let's create a large, fake dataset for these tests:
End of explanation
"""
def ExpSquaredCovariance(t, A=1.0, l=1.0, tprime=None):
"""
Return the ``N x M`` exponential squared
covariance matrix.
"""
if tprime is None:
tprime = t
TPrime, T = np.meshgrid(tprime, t)
return A ** 2 * np.exp(-0.5 * (T - TPrime) ** 2 / l ** 2)
def ln_gp_likelihood(t, y, sigma=0, A=1.0, l=1.0):
"""
Return the log of the GP likelihood for a datatset y(t)
with uncertainties sigma, modeled with a Squared Exponential
Kernel with amplitude A and lengthscale l.
"""
# The covariance and its determinant
npts = len(t)
K = ExpSquaredCovariance(t, A=A, l=l) + sigma ** 2 * np.eye(npts)
# The log marginal likelihood
log_like = -0.5 * np.dot(y.T, np.linalg.solve(K, y))
log_like -= 0.5 * np.linalg.slogdet(K)[1]
log_like -= 0.5 * npts * np.log(2 * np.pi)
return log_like
"""
Explanation: Our GP
End of explanation
"""
%%time
ln_gp_likelihood(t, y, sigma)
"""
Explanation: Time to evaluate the GP likelihood:
End of explanation
"""
import george
%%time
kernel = george.kernels.ExpSquaredKernel(1.0)
gp = george.GP(kernel)
gp.compute(t, sigma)
%%time
print(gp.log_likelihood(y))
"""
Explanation: george
Let's time how long it takes to do the same operation using the george package (pip install george).
The kernel we'll use is
python
kernel = amp ** 2 * george.kernels.ExpSquaredKernel(tau ** 2)
where amp = 1 and tau = 1 in this case.
To instantiate a GP using george, simply run
python
gp = george.GP(kernel)
The george package pre-computes a lot of matrices that are re-used in different operations, so before anything else, we'll ask it to compute the GP model for our timeseries:
python
gp.compute(t, sigma)
Note that we've only given it the time array and the uncertainties, so as long as those remain the same, you don't have to re-compute anything. This will save you a lot of time in the long run!
Finally, the log likelihood is given by gp.log_likelihood(y).
How do the speeds compare? Did you get the same value of the likelihood?
End of explanation
"""
%%time
gp = george.GP(kernel, solver=george.HODLRSolver)
gp.compute(t, sigma)
%%time
gp.log_likelihood(y)
"""
Explanation: george also offers a fancy GP solver called the HODLR solver, which makes some approximations that dramatically speed up the matrix algebra. Let's instantiate the GP object again by passing the keyword solver=george.HODLRSolver and re-compute the log likelihood. How long did that take? Did we get the same value for the log likelihood?
End of explanation
"""
import celerite
from celerite import terms
%%time
kernel = terms.Matern32Term(np.log(1), np.log(1))
gp = celerite.GP(kernel)
gp.compute(t, sigma)
%%time
gp.log_likelihood(y)
"""
Explanation: celerite
The george package is super useful for GP modeling, and I recommend you read over the docs and examples. It implements several different kernels that come in handy in different situations, and it has support for multi-dimensional GPs. But if all you care about are GPs in one dimension (in this case, we're only doing GPs in the time domain, so we're good), then celerite is what it's all about:
bash
pip install celerite
Check out the docs here, as well as several tutorials. There is also a paper that discusses the math behind celerite. The basic idea is that for certain families of kernels, there exist extremely efficient methods of factorizing the covariance matrices. Whereas GP fitting typically scales with the number of datapoints $N$ as $N^3$, celerite is able to do everything in order $N$ (!!!) This is a huge advantage, especially for datasets with tens or hundreds of thousands of data points. Using george or any homebuilt GP model for datasets larger than about 10,000 points is simply intractable, but with celerite you can do it in a breeze.
Next we repeat the timing tests, but this time using celerite. Note that the Exponential Squared Kernel is not available in celerite, because it doesn't have the special form needed to make its factorization fast. Instead, we'll use the Matern 3/2 kernel, which is qualitatively similar and can be approximated quite well in terms of the celerite basis functions:
python
kernel = celerite.terms.Matern32Term(np.log(1), np.log(1))
Note that celerite accepts the log of the amplitude and the log of the timescale. Other than this, we can compute the likelihood using the same syntax as george.
How much faster did it run? Is the value of the likelihood different from what you found above? Why?
End of explanation
"""
import matplotlib.pyplot as plt
t, y, yerr = np.loadtxt("data/sample_transit.txt", unpack=True)
plt.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0)
plt.xlabel("time")
plt.ylabel("relative flux");
"""
Explanation: <div style="background-color: #D6EAF8; border-left: 15px solid #2E86C1;">
<h1 style="line-height:2.5em; margin-left:1em;">Exercise (the one and only)</h1>
</div>
Let's use what we've learned about GPs in a real application: fitting an exoplanet transit model in the presence of correlated noise.
Here is a (fictitious) light curve for a star with a transiting planet:
End of explanation
"""
|
yevheniyc/Python | 1m_ML_Security/notebooks/day_1/Worksheet 1 - Working with One Dimensional Data.ipynb | mit | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: Worksheet 1: Working with One Dimensional Data
This worksheet covers concepts covered in the first half of Module 1 - Exploratory Data Analysis in One Dimension.
There are many ways to accomplish the tasks that you are presented with, however you will find that by using the techniques covered in class, the exercises should be relatively simple.
Import the Libraries
For this exercise, we will be using:
* Pandas (http://pandas.pydata.org/pandas-docs/stable/)
* Numpy (https://docs.scipy.org/doc/numpy/reference/)
* Matplotlib (http://matplotlib.org/api/pyplot_api.html)
End of explanation
"""
emails = ['alawrence0@prlog.org',
'blynch1@businessweek.com',
'mdixon2@cmu.edu',
'rvasquez3@1688.com',
'astone4@creativecommons.org',
'mcarter5@chicagotribune.com',
'dcole6@vinaora.com',
'kpeterson7@topsy.com',
'ewebb8@cnet.com',
'jtaylor9@google.ru',
'ecarra@buzzfeed.com',
'jjonesb@arizona.edu',
'jbowmanc@disqus.com',
'eduardo_sanchezd@npr.org',
'emooree@prweb.com',
'eberryf@brandeis.edu',
'sgardnerh@wikipedia.org',
'balvarezi@delicious.com',
'blewisj@privacy.gov.au']
#Your code here...
emails_records = pd.Series(emails)
emails_records[emails_records.str.contains('.edu')].str.split('@').str[0]
"""
Explanation: Exercise 1: Splitting and Filtering a Series
In this exercise, you are given a list of email addresses called emails. Your goal is to find the email accounts from domains that end in .edu. To accomplish this, you will need to:
1. Filter the series to remove the emails that do not end in .edu
2. Extract the accounts.
If you get stuck, refer to the documentation for Pandas string manipulation (http://pandas.pydata.org/pandas-docs/stable/text.html) or the slides. Note that there are various functions to accomplish this task.
End of explanation
"""
weights = [31.09, 46.48, 24.0, 39.99, 19.33, 39.61, 40.91, 52.24, 30.77, 17.23, 34.87 ]
pd.Series(weights).apply(lambda x: x * 0.45359237)
"""
Explanation: Exercise 2: Applying a Function
In this exercise you are given a list of weights in pounds and a function to convert the measures into kilograms. For this exercise, apply the conversion function to the original series and convert the measures into kilograms.
End of explanation
"""
df_hosts = pd.Series([
'192.168.1.2', '10.10.10.2', '172.143.23.34',
'34.34.35.34', '172.15.0.1', '172.17.0.1'])
df_hosts[df_hosts.apply(lambda x: ipaddress.ip_address(x).is_private)]
"""
Explanation: Exercise 3: Putting it all together
You are given a Series of IP Addresses and the goal is to limit this data to private IP addresses. Python has an ipaddress module which provides the capability to create, manipulate and operate on IPv4 and IPv6 addresses and networks. Complete documentation is available here: https://docs.python.org/3/library/ipaddress.html.
Here are some examples of how you might use this module:
```python
import ipaddress
myIP = ipaddress.ip_address( '192.168.0.1' )
myNetwork = ipaddress.ip_network( '192.168.0.0/28' )
Check membership in network
if myIP in myNetwork: #This works
print "Yay!"
Loop through CIDR blocks
for ip in myNetwork:
print( ip )
192.168.0.0
192.168.0.1
…
…
192.168.0.13
192.168.0.14
192.168.0.15
Testing to see if an IP is private
if myIP.is_private:
print( "This IP is private" )
else:
print( "Routable IP" )
```
First, write a function which takes an IP address and returns true if the IP is private, false if it is public. HINT: use the ipaddress module.
Next, use this to create a Series of true/false values in the same sequence as your original Series.
Finally, use this to filter out the original Series so that it contains only private IP addresses.
End of explanation
"""
|
jsignell/MpalaTower | inspection/.ipynb_checkpoints/inspect_raw_netcdf-checkpoint.ipynb | mit | usr = 'Julia'
FILEDIR = 'C:/Users/%s/Dropbox (PE)/KenyaLab/Data/Tower/TowerData/'%usr
NETCDFLOC = FILEDIR + 'raw_netcdf_output/'
DATALOC = 'F:/towerdata/'
"""
Explanation: Inspect Raw Netcdf
Playing around with efficient ways to merge and view netcdf data from the tower. This ipython notebook depends on the python script of the same name.
End of explanation
"""
import datetime as dt
from inspect_raw_netcdf import *
import matplotlib.pyplot as plt
%matplotlib inline
ds, start, end = process(NETCDFLOC)
L, places, ps, depths, colors, data_options = clean_Table1(ds)
data, data_list = pick_type(L, data_options)
fig = make_plots(FILEDIR,ds,start,end,places,ps,depths,colors,data,data_list)
"""
Explanation: Using the xray dataset directly (only works for Table1)
End of explanation
"""
from __future__ import print_function
import pandas as pd
import datetime as dt
import xray
def one_week(input_dir):
datas = ['lws','licor','Table1','Table1_rain']
#start = dt.datetime.utcnow()-dt.timedelta(7)
start = dt.datetime(2014,01,1)
end = dt.datetime(2014,01,10)
for data in datas:
try:
ds,df,params = inspect_raw(input_dir,data,start,end)
except:
print('\nThere doesn\'t seem to be any %s data for this interval'%data)
return ds,df,params
def inspect_raw(input_dir,data,start,end):
ds = grabDateRange(input_dir,data,start,end)
df = ds.to_dataframe().dropna(axis=1,how='all')
non_null = set(df.columns)
params = set(ds.vars)
null_params = list(params - non_null)
null_params.sort()
print('\n%s data ranges from:\n'%data,
ds.coords['time'].values[0], 'to\n',
ds.coords['time'].values[-1],
'\n and contains null values for:' )
for p in null_params:
print(' ', p)
return ds,df,params
"""
Explanation: Using a pandas dataframe with xray
End of explanation
"""
|
muatik/dm | SVM-comparision.ipynb | mit | from sklearn import svm, linear_model, neighbors, ensemble
from sklearn import cross_validation, grid_search
from sklearn import datasets
import numpy as npes
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pylab as plt
import time
from IPython.display import YouTubeVideo
%matplotlib inline
"""
Explanation: Mustafa Atik - SWE 577
End of explanation
"""
x1 = [1, 2, 3, 4, 5, 6]
y1 = [12, 18, 14, 8, 10, 6]
x2 = [4, 5, 7, 8]
y2 = [47, 37, 22, 24]
plt.plot(x1, y1, "o", color="yellow")
plt.plot(x2, y2, "x", color="green")
plt.plot(range(3, 8),[ i * (-14.2) + 100 for i in range(3, 8)], color="blue")
#plt.plot(range(12),[ i * - 4.4 + 45 for i in range(12)], color="blue")
plt.plot(range(9),[ i * - 2.0 + 24 for i in range(9)], color="red")
plt.plot(range(9),[ i * - 4.6 + 44 for i in range(9)], color="k")
plt.title("Figure 1: possible hyperplanes")
"""
Explanation: Support vector machines (SVM)
SVM is a method that is widely used for classification and regression tasks. The most important characteristic of svm is that it tries to find a decision boundary which separates classes with equal distance. SVM decision boundaries are as wide as possible so that's why sometimes this method is called widest street approach, or large margin classifier.
SVM has other nice properties such as it is convex, theoretically motivated, nonlinear with kernels.
Let's have a look at the space shown below.
End of explanation
"""
x1 = [1, 2, 3, 4, 5, 6]
y1 = [12, 18, 14, 8, 10, 6]
x2 = [4, 5, 7, 8]
y2 = [47, 37, 22, 24]
plt.plot(x1, y1, "o", color="yellow")
plt.plot(x2, y2, "x", color="green")
plt.plot([4], [40], "x", color="red")
plt.plot([2], [22], "o", color="red")
plt.plot(range(3, 8),[ i * (-14.2) + 100 for i in range(3, 8)], color="blue")
plt.plot(range(9),[ i * - 4.6 + 44 for i in range(9)], color="k")
plt.plot(range(9),[ i * - 2.0 + 24 for i in range(9)], color="red")
plt.title("Figure 2: misclassification")
"""
Explanation: As it can be seen, all the lines separate the classes perfectly. What's more, we can draw more straight lines still separating perfectly. But only one of them will give us the minimum misclassification error. So which line is that?
End of explanation
"""
x1 = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
y1 = [12.0, 18.0, 14.0, 8.0, 10.0, 6.0]
x2 = [4, 5, 7, 8]
y2 = [47, 37, 22, 24]
plt.plot(x1, y1, "o", color="yellow")
plt.plot(x2, y2, "x", color="green")
plt.plot([4], [40], "x", color="red")
plt.plot([2], [22], "o", color="red")
plt.plot(range(9),[ i * - 4.6 + 54 for i in range(9)], ":", color="blue")
plt.plot(range(9),[ i * - 4.6 + 44 for i in range(9)], color="blue")
plt.plot(range(8),[ i * - 4.6 + 34 for i in range(8)], ":", color="blue")
plt.title("Figure 3: maximum margin")
"""
Explanation: Since the red and blue lines are too close to the classes, they are prone to misclassification. However, intuitively, the black one that has the largest margin to the nearest training data points does a better job at classification.
So next question is how do we get the black line?
End of explanation
"""
YouTubeVideo("3liCbRZPrZA")
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_estimator(estimator, X, y, title=""):
try:
X, y = X.values, y.values
except AttributeError:
pass
estimator.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = estimator.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.axis('tight')
plt.axis('off')
plt.title(title)
plt.tight_layout()
def classify(clfs, X, y):
for title, clf in clfs:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.3, random_state=0)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
plot_estimator(clf, X, y, "{} - score: {}".format(title, score))
plt.show()
def timeit(clfs, X, y):
results = []
for title, clf in clfs:
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.3, random_state=0)
trial = 500
startTime = time.time()
for i in range(trial):
clf.fit(X_train, y_train)
trainingTime = time.time() - startTime
print "{} times training {} with the data that has shape {} took {}".format(
trial, title, X_train.shape, trainingTime)
startTime = time.time()
for i in range(trial):
for i in X_test:
score = clf.predict(i.reshape(1, -1))
predictionTime = time.time() - startTime
print "{} prediction with {} took {}".format(trial * X_test.shape[0], title, predictionTime)
print
results.append([title, trainingTime, predictionTime])
return results
def compareWRTScore(X, y):
return classify(
[
("Logistic Regression", linear_model.LogisticRegression()),
("SVM with Poly Kernel", svm.SVC(kernel="poly", degree=3)),
("SVM with RBF Kernel", svm.SVC(kernel="rbf", gamma=10)),
("SVM with Linear Kernel", svm.SVC(kernel="linear")),
("KNN with k=5", neighbors.KNeighborsClassifier()),
("Random Forest", ensemble.RandomForestClassifier()),
],
X, y
)
def compareWRTTime(X, y):
return timeit(
[
("Logistic Regression", linear_model.LogisticRegression()),
("SVM with Poly Kernel", svm.SVC(kernel="poly", degree=3)),
("SVM with RBF Kernel", svm.SVC(kernel="rbf", gamma=10)),
("SVM with Linear Kernel", svm.SVC(kernel="linear")),
("KNN with k=5", neighbors.KNeighborsClassifier()),
("Random Forest", ensemble.RandomForestClassifier()),
],
X, y
)
"""
Explanation: Let w is a vector perpendicular to the median line. But we do not know its length yet. And u is an unknown example. We project the u vector down on to the w vector so that we have a distance. It goes further out, it ends up on the right side of the street. Or it is a short distance which means it is on the left side. So we can formulate it like this:
$$ \vec{w} \cdot \vec{u} \geq c $$
or can be rewritten as: $$ c = -b $$
$$ \vec{w} \cdot \vec{u}{+} + b \geq 1 \ \ \ {{for + samples}}$$
$$ \vec{w} \cdot \vec{u}{-} + b \leq -1 \ \ \ {{for - samples}}$$
to make it mathematically convenient, reformulate it as:
$$ y \in \left { -1, 1 \right } \ \ \ and \ \ \ y(\vec{w} \cdot \vec{u} + b) + 1 = 0 $$
It is equal to 0 when the sample is on the street.
$$ width\ of\ the\ street = (\vec{x}{+} - \vec{x}{-}) \cdot \frac{\vec{w}}{\left \| \vec{w} \right \|}$$
$$ width\ of\ the\ street = \frac{2}{\left \| \vec{w} \right \|}$$
So we want to maximize the margin, or the minimize the following:
$$ \frac{1}{2} \left \| \vec{w} \right \|^{2} $$
So far, we've come up with the two constraints. By using Lagrange multipliers, we can get a formula which gives maximum/minimum without thinking about the constraints anymore.
$$ L = \frac{1}{2} \left \| \vec{w} \right \|^{2} -
\sum \lambda \left [ y_{i} (\vec{w} \cdot \vec{x_{i}+b} - 1 ) \right ] $$
After takin the derivates of L with respect to w and b:
$$ \frac{\partial L}{\partial \vec{w}} = \vec{w} - \sum \lambda \cdot y_{i} \cdot \vec{x_{i}} $$
$$ \frac{\partial L}{\partial b} = - \sum \vec{x_{i}} \cdot y_{i} = 0$$
Now the decision rule will be as:
$$ \sum \lambda {i} \cdot y{i} \cdot \vec{x_{i}} \cdot \vec{u} + b \geq 0 $$
Kernel trick
SVM can also work with non-linear data with the help of kernel trick. Kernel trick maps data points to a higher dimension in which classification problem becomes linearly separable.
Most popular kernel tricks are:
linear: $$\langle x, x'\rangle$$.
polynomial: $$(\gamma \langle x, x'\rangle + r)^d.d$$ is specified by keyword degree, r by coef0.
rbf: $$\exp(-\gamma |x-x'|^2). \gamma$$ is specified by keyword gamma, must be greater than 0.
sigmoid: $$(\tanh(\gamma \langle x,x'\rangle + r))$$, where r is specified by coef0.
End of explanation
"""
iris = datasets.load_iris()
compareWRTScore(iris.data[:,:2], iris.target)
"""
Explanation: Comparing classifiers
1. Iris data
End of explanation
"""
wine = pd.read_table("data/wine/wine.txt", sep='\s+')
attributes = ['Alcohol',
'Malic acid',
'Ash',
'Alcalinity of ash',
'Magnesium',
'Total phenols',
'Flavanoids',
'Nonflavanoid phenols',
'Proanthocyanins',
'Color intensity',
'Hue',
'OD280/OD315 of diluted wines',
'Proline']
grape = wine.pop('region')
y = grape.values
wine.columns = attributes
X = wine[['Alcohol', 'Proline']].values
compareWRTScore(X, y)
"""
Explanation: 2. Wine Classification
End of explanation
"""
from sklearn.datasets.samples_generator import make_circles
X, y = make_circles(100, factor=.3, noise=.3)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')
compareWRTScore(X, y)
"""
Explanation: 3. Not-linearly separable data
End of explanation
"""
results = compareWRTTime(X, y)
results = np.array(results)
index = np.arange(results.shape[0])
bar_width = 0.30
opacity = 0.6
rects1 = plt.bar(index, results[:, 1], bar_width, color="r", alpha=opacity)
rects2 = plt.bar(index + bar_width, results[:, 2], bar_width, color="b", alpha=opacity)
plt.legend((rects1[0], rects2[0]), ('Training', 'Prediction'), loc=2)
plt.xticks(index + bar_width, results[:,0], rotation=70)
plt.grid(True)
results[0, 1], results[0, 2]
"""
Explanation: Benchmark
End of explanation
"""
df = pd.read_csv("data/tinanic/train.csv")
"""
VARIABLE DESCRIPTIONS:
survival Survival
(0 = No; 1 = Yes)
pclass Passenger Class
(1 = 1st; 2 = 2nd; 3 = 3rd)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare
cabin Cabin
embarked Port of Embarkation
(C = Cherbourg; Q = Queenstown; S = Southampton)
SPECIAL NOTES:
Pclass is a proxy for socio-economic status (SES)
1st ~ Upper; 2nd ~ Middle; 3rd ~ Lower
Age is in Years; Fractional if Age less than One (1)
If the Age is Estimated, it is in the form xx.5
With respect to the family relation variables (i.e. sibsp and parch)
some relations were ignored. The following are the definitions used
for sibsp and parch.
Sibling: Brother, Sister, Stepbrother, or Stepsister of Passenger Aboard Titanic
Spouse: Husband or Wife of Passenger Aboard Titanic (Mistresses and Fiances Ignored)
Parent: Mother or Father of Passenger Aboard Titanic
Child: Son, Daughter, Stepson, or Stepdaughter of Passenger Aboard Titanic
Other family relatives excluded from this study include cousins,
nephews/nieces, aunts/uncles, and in-laws. Some children travelled
only with a nanny, therefore parch=0 for them. As well, some
travelled with very close friends or neighbors in a village, however,
the definitions do not support such relations.
"""
df.sample(6)
df.info()
"""
Explanation: 4. Case study: Predict survivors in Titanic data
End of explanation
"""
sns.countplot(data=df, hue="Survived", x="Embarked")
sns.barplot(data=df, x="Embarked", y="Survived")
sns.countplot(data=df, x="Age")
sns.boxplot(data=df, x="Survived", y="Age")
sns.stripplot(x="Survived", y="Age", data=df, jitter=True, edgecolor="gray")
sns.FacetGrid(df, hue="Survived", size=6).map(sns.kdeplot, "Age").add_legend()
"""
Explanation: Embarked feature
End of explanation
"""
sns.countplot(data=df, x="Sex")
sns.countplot(data=df, hue="Survived", x="Sex")
"""
Explanation: Sex feature
First, let's have a look at which gender is dominant in the population by a countplot.
End of explanation
"""
sns.barplot(data=df, x="Sex", y="Survived")
"""
Explanation: According to sex vs. survived chart, most of men did not survived while the majority of women did. The following chart also supports this claim by showing us that 70% of women survived.
End of explanation
"""
sns.countplot(data=df, hue="Survived", x="Pclass")
sns.countplot(data=df[df['Pclass'] == 1], hue="Survived", x="Sex")
"""
Explanation: The inference is that this sex feature can be used in a classification task to determine whether a given person survived or not.
Pclass feature
This stands for Passenger Class. There are three classes as 1 = 1st; 2 = 2nd; 3 = 3rd. We can make a guess saying most probably the first class passengers survived thanks to their nobility. This guess is based on the domain knowledge; in that time classes among the people is more obvious and severe than now. Let's have a look at the data to see the truth.
End of explanation
"""
sns.countplot(data=df[df['Pclass'] == 3], hue="Survived", x="Sex")
sns.barplot(x="Sex", y="Survived", hue="Pclass", data=df);
def titanicFit(df):
X = df[["Sex", "Age", "Pclass", "Embarked"]]
y = df["Survived"]
X.Age.fillna(X.Age.mean(), inplace=True)
X.Sex.replace(to_replace="male", value=1, inplace=True)
X.Sex.replace(to_replace="female", value=0, inplace=True)
X.Embarked.replace(to_replace="S", value=1, inplace=True)
X.Embarked.replace(to_replace="C", value=2, inplace=True)
X.Embarked.replace(to_replace="Q", value=3, inplace=True)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
X, y, test_size=0.3, random_state=0)
clf = svm.SVC(kernel="rbf")
parameters = [
{
"kernel" :["linear"]
}, {
"kernel" :["rbf"],
"C":[1, 10, 100],
'"gamma":[0.001, 0.002, 0.01]}
]'
clf = grid_search.GridSearchCV(
svm.SVC(), param_grid=parameters, cv=5).fit(X, y)
return clf
#print clf.score(X_test, y_test)
clf = titanicFit(df[df.Embarked.isnull() == False])
clf.grid_scores_
"""
Explanation: The chart above corrects the guess: unfortunatelly, passenger class plays a crucial role.
End of explanation
"""
|
WNoxchi/Kaukasos | FADL1/lesson3-rossman-Copy1-old.ipynb | mit | %matplotlib inline
%reload_ext autoreload
%autoreload 2
from fastai.structured import *
from fastai.column_data import *
np.set_printoptions(threshold=50, edgeitems=20)
PATH='data/rossmann/'
"""
Explanation: Structured and time series data
This notebook contains an implementation of the third place result in the Rossman Kaggle competition as detailed in Guo/Berkhahn's Entity Embeddings of Categorical Variables.
The motivation behind exploring this architecture is it's relevance to real-world application. Most data used for decision making day-to-day in industry is structured and/or time-series data. Here we explore the end-to-end process of using neural networks with practical structured data problems.
End of explanation
"""
def concat_csvs(dirname):
path = f'{PATH}{dirname}'
filenames=glob.glob(f"{path}/*.csv")
wrote_header = False
with open(f"{path}.csv","w") as outputfile:
for filename in filenames:
name = filename.split(".")[0]
with open(filename) as f:
line = f.readline()
if not wrote_header:
wrote_header = True
outputfile.write("file,"+line)
for line in f:
outputfile.write(name + "," + line)
outputfile.write("\n")
# concat_csvs('googletrend')
# concat_csvs('weather')
"""
Explanation: Create datasets
In addition to the provided data, we will be using external datasets put together by participants in the Kaggle competition. You can download all of them here.
For completeness, the implementation used to put them together is included below.
End of explanation
"""
table_names = ['train', 'store', 'store_states', 'state_names',
'googletrend', 'weather', 'test']
"""
Explanation: Feature Space:
* train: Training set provided by competition
* store: List of stores
* store_states: mapping of store to the German state they are in
* List of German state names
* googletrend: trend of certain google keywords over time, found by users to correlate well w/ given data
* weather: weather
* test: testing set
End of explanation
"""
tables = [pd.read_csv(f'{PATH}{fname}.csv', low_memory=False) for fname in table_names]
from IPython.display import HTML
"""
Explanation: We'll be using the popular data manipulation framework pandas. Among other things, pandas allows you to manipulate tables/data frames in python as one would in a database.
We're going to go ahead and load all of our csv's as dataframes into the list tables.
End of explanation
"""
for t in tables: display(t.head())
"""
Explanation: We can use head() to get a quick look at the contents of each table:
* train: Contains store information on a daily basis, tracks things like sales, customers, whether that day was a holdiay, etc.
* store: general info about the store including competition, etc.
* store_states: maps store to state it is in
* state_names: Maps state abbreviations to names
* googletrend: trend data for particular week/state
* weather: weather conditions for each state
* test: Same as training table, w/o sales and customers
End of explanation
"""
for t in tables: display(DataFrameSummary(t).summary())
"""
Explanation: This is very representative of a typical industry dataset.
The following returns summarized aggregate information to each table accross each field.
End of explanation
"""
train, store, store_states, state_names, googletrend, weather, test = tables
len(train),len(test)
"""
Explanation: Data Cleaning / Feature Engineering
As a structured data problem, we necessarily have to go through all the cleaning and feature engineering, even though we're using a neural network.
End of explanation
"""
train.StateHoliday = train.StateHoliday!='0'
test.StateHoliday = test.StateHoliday!='0'
"""
Explanation: We turn state Holidays to booleans, to make them more convenient for modeling. We can do calculations on pandas fields using notation very similar (often identical) to numpy.
End of explanation
"""
def join_df(left, right, left_on, right_on=None, suffix='_y'):
if right_on is None: right_on = left_on
return left.merge(right, how='left', left_on=left_on, right_on=right_on,
suffixes=("", suffix))
"""
Explanation: join_df is a function for joining tables on specific fields. By default, we'll be doing a left outer join of right on the left argument using the given fields for each table.
Pandas does joins using the merge method. The suffixes argument describes the naming convention for duplicate fields. We've elected to leave the duplicate field names on the left untouched, and append a "_y" to those on the right.
End of explanation
"""
weather = join_df(weather, state_names, "file", "StateName")
"""
Explanation: Join weather/state names.
End of explanation
"""
googletrend['Date'] = googletrend.week.str.split(' - ', expand=True)[0]
googletrend['State'] = googletrend.file.str.split('_', expand=True)[2]
googletrend.loc[googletrend.State=='NI', "State"] = 'HB,NI'
"""
Explanation: In pandas you can add new columns to a dataframe by simply defining it. We'll do this for googletrends by extracting dates and state names from the given data and adding those columns.
We're also going to replace all instances of state name 'NI' to match the usage in the rest of the data: 'HB,NI'. This is a good opportunity to highlight pandas indexing. We can use .loc[rows, cols] to select a list of rows and a list of columns from the dataframe. In this case, we're selecting rows w/ statename 'NI' by using a boolean list googletrend.State=='NI' and selecting "State".
End of explanation
"""
add_datepart(weather, "Date", drop=False)
add_datepart(googletrend, "Date", drop=False)
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
add_datepart(googletrend, "Date", drop=False)
"""
Explanation: The following extracts particular date fields from a complete datetime for the purpose of constructing categoricals.
You should always consider this feature extraction step when working with date-time. Without expanding your date-time into these additional fields, you can't capture any trend/cyclical behavior as a function of time at any of these granularities. We'll add to every table with a date field.
End of explanation
"""
trend_de = googletrend[googletrend.file == 'Rossmann_DE']
"""
Explanation: The Google trends data has a special category for the whole of the US - we'll pull that out so we can use it explicitly.
End of explanation
"""
store = join_df(store, store_states, "Store")
len(store[store.State.isnull()])
joined = join_df(train, store, "Store")
joined_test = join_df(test, store, "Store")
len(joined[joined.StoreType.isnull()]),len(joined_test[joined_test.StoreType.isnull()])
joined = join_df(joined, googletrend, ["State","Year", "Week"])
joined_test = join_df(joined_test, googletrend, ["State","Year", "Week"])
len(joined[joined.trend.isnull()]),len(joined_test[joined_test.trend.isnull()])
joined = joined.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
joined_test = joined_test.merge(trend_de, 'left', ["Year", "Week"], suffixes=('', '_DE'))
len(joined[joined.trend_DE.isnull()]),len(joined_test[joined_test.trend_DE.isnull()])
joined = join_df(joined, weather, ["State","Date"])
joined_test = join_df(joined_test, weather, ["State","Date"])
len(joined[joined.Mean_TemperatureC.isnull()]),len(joined_test[joined_test.Mean_TemperatureC.isnull()])
for df in (joined, joined_test):
for c in df.columns:
if c.endswith('_y'):
if c in df.columns: df.drop(c, inplace=True, axis=1)
"""
Explanation: Now we can outer join all of our data into a single dataframe. Recall that in outer joins everytime a value in the joining field on the left table does not have a corresponding value on the right table, the corresponding row in the new table has Null values for all right table fields. One way to check that all records are consistent and complete is to check for Null values post-join, as we do here.
Aside: Why note just do an inner join?
If you are assuming that all records are complete and match on the field you desire, an inner join will do the same thing as an outer join. However, in the event you are wrong or a mistake is made, an outer join followed by a null-check will catch it. (Comparing before/after # of rows for inner join is equivalent, but requires keeping track of before/after row #'s. Outer join is easier.)
End of explanation
"""
for df in (joined,joined_test):
df['CompetitionOpenSinceYear'] = df.CompetitionOpenSinceYear.fillna(1900).astype(np.int32)
df['CompetitionOpenSinceMonth'] = df.CompetitionOpenSinceMonth.fillna(1).astype(np.int32)
df['Promo2SinceYear'] = df.Promo2SinceYear.fillna(1900).astype(np.int32)
df['Promo2SinceWeek'] = df.Promo2SinceWeek.fillna(1).astype(np.int32)
"""
Explanation: Next we'll fill in missing values to avoid complications with NA's. NA (not available) is how Pandas indicates missing values; many models have problems when missing values are present, so it's always important to think about how to deal with them. In these cases, we are picking an arbitrary signal value that doesn't otherwise appear in the data.
End of explanation
"""
for df in (joined,joined_test):
df["CompetitionOpenSince"] = pd.to_datetime(dict(year=df.CompetitionOpenSinceYear,
month=df.CompetitionOpenSinceMonth, day=15))
df["CompetitionDaysOpen"] = df.Date.subtract(df.CompetitionOpenSince).dt.days
"""
Explanation: Next we'll extract features "CompetitionOpenSince" and "CompetitionDaysOpen". Note the use of apply() in mapping a function across dataframe values.
End of explanation
"""
for df in (joined,joined_test):
df.loc[df.CompetitionDaysOpen<0, "CompetitionDaysOpen"] = 0
df.loc[df.CompetitionOpenSinceYear<1990, "CompetitionDaysOpen"] = 0
"""
Explanation: We'll replace some erroneous / outlying data.
End of explanation
"""
for df in (joined,joined_test):
df["CompetitionMonthsOpen"] = df["CompetitionDaysOpen"]//30
df.loc[df.CompetitionMonthsOpen>24, "CompetitionMonthsOpen"] = 24
joined.CompetitionMonthsOpen.unique()
"""
Explanation: We add "CompetitionMonthsOpen" field, limiting the maximum to 2 years to limit number of unique categories.
End of explanation
"""
for df in (joined,joined_test):
df["Promo2Since"] = pd.to_datetime(df.apply(lambda x: Week(
x.Promo2SinceYear, x.Promo2SinceWeek).monday(), axis=1).astype(pd.datetime))
df["Promo2Days"] = df.Date.subtract(df["Promo2Since"]).dt.days
for df in (joined,joined_test):
df.loc[df.Promo2Days<0, "Promo2Days"] = 0
df.loc[df.Promo2SinceYear<1990, "Promo2Days"] = 0
df["Promo2Weeks"] = df["Promo2Days"]//7
df.loc[df.Promo2Weeks<0, "Promo2Weeks"] = 0
df.loc[df.Promo2Weeks>25, "Promo2Weeks"] = 25
df.Promo2Weeks.unique()
joined.to_feather(f'{PATH}joined')
joined_test.to_feather(f'{PATH}joined_test')
"""
Explanation: Same process for Promo dates.
End of explanation
"""
def get_elapsed(fld, pre):
day1 = np.timedelta64(1, 'D')
last_date = np.datetime64()
last_store = 0
res = []
for s,v,d in zip(df.Store.values,df[fld].values, df.Date.values):
if s != last_store:
last_date = np.datetime64()
last_store = s
if v: last_date = d
res.append(((d-last_date).astype('timedelta64[D]') / day1).astype(int))
df[pre+fld] = res
"""
Explanation: Durations
It is common when working with time series data to extract data that explains relationships across rows as opposed to columns, e.g.:
* Running averages
* Time until next event
* Time since last event
This is often difficult to do with most table manipulation frameworks, since they are designed to work with relationships across columns. As such, we've created a class to handle this type of data.
We'll define a function get_elapsed for cumulative counting across a sorted dataframe. Given a particular field fld to monitor, this function will start tracking time since the last occurrence of that field. When the field is seen again, the counter is set to zero.
Upon initialization, this will result in datetime na's until the field is encountered. This is reset every time a new store is seen. We'll see how to use this shortly.
End of explanation
"""
columns = ["Date", "Store", "Promo", "StateHoliday", "SchoolHoliday"]
df = train[columns]
df = test[columns]
"""
Explanation: We'll be applying this to a subset of columns:
End of explanation
"""
fld = 'SchoolHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
"""
Explanation: Let's walk through an example.
Say we're looking at School Holiday. We'll first sort by Store, then Date, and then call add_elapsed('SchoolHoliday', 'After'):
This will apply to each row with School Holiday:
* A applied to every row of the dataframe in order of store and date
* Will add to the dataframe the days since seeing a School Holiday
* If we sort in the other direction, this will count the days until another holiday.
End of explanation
"""
fld = 'StateHoliday'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
fld = 'Promo'
df = df.sort_values(['Store', 'Date'])
get_elapsed(fld, 'After')
df = df.sort_values(['Store', 'Date'], ascending=[True, False])
get_elapsed(fld, 'Before')
"""
Explanation: We'll do this for two more fields.
End of explanation
"""
df = df.set_index("Date")
"""
Explanation: We're going to set the active index to Date.
End of explanation
"""
columns = ['SchoolHoliday', 'StateHoliday', 'Promo']
for o in ['Before', 'After']:
for p in columns:
a = o+p
df[a] = df[a].fillna(0)
"""
Explanation: Then set null values from elapsed field calculations to 0.
End of explanation
"""
bwd = df[['Store']+columns].sort_index().groupby("Store").rolling(7, min_periods=1).sum()
fwd = df[['Store']+columns].sort_index(ascending=False
).groupby("Store").rolling(7, min_periods=1).sum()
"""
Explanation: Next we'll demonstrate window functions in pandas to calculate rolling quantities.
Here we're sorting by date (sort_index()) and counting the number of events of interest (sum()) defined in columns in the following week (rolling()), grouped by Store (groupby()). We do the same in the opposite direction.
End of explanation
"""
bwd.drop('Store',1,inplace=True)
bwd.reset_index(inplace=True)
fwd.drop('Store',1,inplace=True)
fwd.reset_index(inplace=True)
df.reset_index(inplace=True)
"""
Explanation: Next we want to drop the Store indices grouped together in the window function.
Often in pandas, there is an option to do this in place. This is time and memory efficient when working with large datasets.
End of explanation
"""
df = df.merge(bwd, 'left', ['Date', 'Store'], suffixes=['', '_bw'])
df = df.merge(fwd, 'left', ['Date', 'Store'], suffixes=['', '_fw'])
df.drop(columns,1,inplace=True)
df.head()
"""
Explanation: Now we'll merge these values onto the df.
End of explanation
"""
df.to_feather(f'{PATH}df')
df = pd.read_feather(f'{PATH}df', index_col=0)
df["Date"] = pd.to_datetime(df.Date)
df.columns
joined = join_df(joined, df, ['Store', 'Date'])
joined_test = join_df(joined_test, df, ['Store', 'Date'])
"""
Explanation: It's usually a good idea to back up large tables of extracted / wrangled features before you join them onto another one, that way you can go back to it easily if you need to make changes to it.
End of explanation
"""
joined = joined[joined.Sales!=0]
"""
Explanation: The authors also removed all instances where the store had zero sale / was closed. We speculate that this may have cost them a higher standing in the competition. One reason this may be the case is that a little exploratory data analysis reveals that there are often periods where stores are closed, typically for refurbishment. Before and after these periods, there are naturally spikes in sales that one might expect. By ommitting this data from their training, the authors gave up the ability to leverage information about these periods to predict this otherwise volatile behavior.
End of explanation
"""
joined.reset_index(inplace=True)
joined_test.reset_index(inplace=True)
joined.to_feather(f'{PATH}joined')
joined_test.to_feather(f'{PATH}joined_test')
"""
Explanation: We'll back this up as well.
End of explanation
"""
joined = pd.read_feather(f'{PATH}joined')
joined_test = pd.read_feather(f'{PATH}joined_test')
joined.head().T.head(40)
"""
Explanation: We now have our final set of engineered features.
While these steps were explicitly outlined in the paper, these are all fairly typical feature engineering steps for dealing with time series data and are practical in any similar setting.
Create features
End of explanation
"""
cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen',
'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',
'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw',
'SchoolHoliday_fw', 'SchoolHoliday_bw']
contin_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',
'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h',
'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',
'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']
n = len(joined); n
dep = 'Sales'
joined_test[dep] = 0
joined = joined[cat_vars+contin_vars+[dep, 'Date']].copy()
joined_test = joined_test[cat_vars+contin_vars+[dep, 'Date', 'Id']].copy()
for v in cat_vars: joined[v] = joined[v].astype('category').cat.as_ordered()
apply_cats(joined_test, joined)
for v in contin_vars:
joined[v] = joined[v].astype('float32')
joined_test[v] = joined_test[v].astype('float32')
"""
Explanation: Now that we've engineered all our features, we need to convert to input compatible with a neural network.
This includes converting categorical variables into contiguous integers or one-hot encodings, normalizing continuous features to standard normal, etc...
End of explanation
"""
idxs = get_cv_idxs(n, val_pct=150000/n)
joined_samp = joined.iloc[idxs].set_index("Date")
samp_size = len(joined_samp); samp_size
"""
Explanation: We're going to run on a sample.
End of explanation
"""
samp_size = n
joined_samp = joined.set_index("Date")
"""
Explanation: To run on the full dataset, use this instead:
End of explanation
"""
joined_samp.head(2)
df, y, nas, mapper = proc_df(joined_samp, 'Sales', do_scale=True)
yl = np.log(y)
joined_test = joined_test.set_index("Date")
df_test, _, nas, mapper = proc_df(joined_test, 'Sales', do_scale=True, skip_flds=['Id'],
mapper=mapper, na_dict=nas)
df.head(2)
"""
Explanation: We can now process our data...
End of explanation
"""
train_ratio = 0.75
# train_ratio = 0.9
train_size = int(samp_size * train_ratio); train_size
val_idx = list(range(train_size, len(df)))
"""
Explanation: In time series data, cross-validation is not random. Instead, our holdout data is generally the most recent data, as it would be in real application. This issue is discussed in detail in this post on our web site.
One approach is to take the last 25% of rows (sorted by date) as our validation set.
End of explanation
"""
val_idx = np.flatnonzero(
(df.index<=datetime.datetime(2014,9,17)) & (df.index>=datetime.datetime(2014,8,1)))
val_idx=[0]
"""
Explanation: An even better option for picking a validation set is using the exact same length of time period as the test set uses - this is implemented here:
End of explanation
"""
|
yhilpisch/dx | 07_dx_portfolio_risk.ipynb | agpl-3.0 | import dx
import datetime as dt
import time
import numpy as np
"""
Explanation: <img src="http://hilpisch.com/tpq_logo.png" alt="The Python Quants" width="45%" align="right" border="4">
Derivatives Portfolio Risk Statistics
From a risk management perspective it is important to know how sensitive derivatives portfolios are with regard to certain parameter values (market quotes, model assumptions, etc.). This part illustrates how to generate certain risk reports for derivatives_portfolio objects.
End of explanation
"""
# constant short rate
r = dx.constant_short_rate('r', 0.01)
# market environment
me_gbm_1 = dx.market_environment('gbm_1', dt.datetime(2015, 1, 1))
# geometric Brownian motion
me_gbm_1.add_constant('initial_value', 40.)
me_gbm_1.add_constant('volatility', 0.2)
me_gbm_1.add_constant('currency', 'EUR')
me_gbm_1.add_constant('model', 'gbm')
me_gbm_2 = dx.market_environment('gbm_2', me_gbm_1.pricing_date)
# valuation environment
val_env = dx.market_environment('val_env', dt.datetime(2015, 1, 1))
val_env.add_constant('paths', 25000)
# 25,000 paths
val_env.add_constant('frequency', 'W')
# weekly frequency
val_env.add_curve('discount_curve', r)
val_env.add_constant('starting_date', dt.datetime(2015, 1, 1))
val_env.add_constant('final_date', dt.datetime(2015, 12, 31))
# add valuation environment to market environments
me_gbm_1.add_environment(val_env)
me_gbm_2.add_environment(me_gbm_1)
me_gbm_2.add_constant('initial_value', 40.)
me_gbm_2.add_constant('volatility', 0.5)
# higher volatility
risk_factors = {'gbm_1' : me_gbm_1, 'gbm_2' : me_gbm_2}
# market with two risk factors
"""
Explanation: Risk Factors
The example is based on two risk factors, both modeled as geometric Brownian motions.
End of explanation
"""
# market environment for the options
me_option = dx.market_environment('put', dt.datetime(2015, 1, 1))
me_option.add_constant('maturity', dt.datetime(2015, 12, 31))
me_option.add_constant('currency', 'EUR')
me_option.add_environment(val_env)
"""
Explanation: Derivatives Positions
We are going to model total of 6 derivatives positions.
Market Environment
All derivatives instruments (positions) share the same market_environment object.
End of explanation
"""
positions = {}
half = 3 # 2 times that many options
for i in range(half):
name = 'am_put_pos_%s' %i # same name for position key and name
positions[name] = dx.derivatives_position(
name=name,
quantity=1,
underlyings=['gbm_1'],
mar_env=me_option,
otype='American single',
payoff_func='np.maximum(instrument_values - 40., 0)')
multi_payoff = "np.maximum(np.maximum(maturity_value['gbm_1'], maturity_value['gbm_2']) - 40., 0)"
for i in range(half, 2 * half):
name = 'multi_pos_%s' %i # same name for position key and name
positions[name] = dx.derivatives_position(
name=name,
quantity=1,
underlyings=['gbm_1', 'gbm_2'],
mar_env=me_option,
otype='European multi',
payoff_func=multi_payoff)
"""
Explanation: Derivatives Positions
Two different kinds of derivatives make up the portfolio---an American put option and a European maximum call option. Both types of derivatives populate three positions, respectively.
End of explanation
"""
portfolio = dx.derivatives_portfolio(
name='portfolio',
positions=positions,
val_env=val_env,
risk_factors=risk_factors,
correlations=None,
parallel=False)
%time res = portfolio.get_values(fixed_seed=True)
"""
Explanation: Portfolio Modeling and Valuation
The instantiation of the derivatives_portfolio object is as usual.
End of explanation
"""
res
"""
Explanation: Here, the value estimates from the Monte Carlo simulation and valuation.
End of explanation
"""
portfolio.val_env.get_list('cholesky_matrix')
"""
Explanation: Portfolio Risk Reports
Portfolio risk reports are meant to provide a broad overview of how sensitive the value of a portfolio is with regard to the value of certain input parameters (market data, model parameters). While Greeks provide the same information with regard to marginal changes in the input paramters, risk reports provide a wider range input-output (parameter-portfolio value) combinations.
No Correlation
First, consider the portfolio from before, i.e. without correlation.
End of explanation
"""
%%time
vegas, benchvalue = portfolio.get_port_risk(Greek='Vega',
fixed_seed=True)
"""
Explanation: Calling the method get_port_risk and providing a key for the respetive Greek yields sensitivities with regard to all risk factors (here: gbm_1 and gbm_2).
End of explanation
"""
vegas
"""
Explanation: The return object is a pandas Panel object.
End of explanation
"""
dx.risk_report(vegas)
"""
Explanation: Using the helper funtion risk_report allows the easy, readable printout of the results, i.e. the portfolio volatility sensitivities. In this case you can see that, for example, the increase in the first risk fator's (gbm_1) volatility by 10% leads to a portfolio value increase bya bit less than 1 currency unit. Decreasing the same input parameter by 10% reduces the portfolio value by a bit less than 1 currency unit.
End of explanation
"""
%time deltas, benchvalue = portfolio.get_port_risk(Greek='Delta', fixed_seed=True)
"""
Explanation: Of course, you can generate the same risk report for the portfolio initial value sensitivities.
End of explanation
"""
deltas
deltas.loc(axis=0)[:, 'value'] - benchvalue
"""
Explanation: For example, increasing the initial value of the first risk factor (gbm_1) by 10% increases the portfolio value by about 11 currency units.
End of explanation
"""
correlations = [['gbm_1', 'gbm_2', -0.9]]
portfolio = dx.derivatives_portfolio(
'portfolio', positions, val_env,
risk_factors, correlations, parallel=False)
portfolio.val_env.get_list('cholesky_matrix')
"""
Explanation: With Correlation
Consider now a highly negative correlation case.
End of explanation
"""
%time portfolio.get_values(fixed_seed=True)
"""
Explanation: Since the value of the European maximum call option is dependent on the risk factor correlation you see a significant change in this derivative's value estimate.
End of explanation
"""
%%time
deltas, benchvalue = portfolio.get_port_risk(Greek='Delta',
fixed_seed=True,
step=0.05)
"""
Explanation: Via the step parameter, you can influence the granularity of the risk report.
End of explanation
"""
deltas
deltas.loc(axis=0)[:, 'value'] - benchvalue
"""
Explanation: In this case, an increase in the intial value of the first risk factor (gbm_1) by 10% leads to a much higher increase
in the portfolio value of about 15 currency units.
End of explanation
"""
|
liquidSVM/liquidSVM | bindings/python/demo.ipynb | agpl-3.0 | from liquidSVM import *
"""
Explanation: liquidSVM for Python
We give a demonstration of the capabilities of liquidSVM from a Python viewpoint.
More information can be found in the help (e.g. ?mcSVM).
Disclaimer: liquidSVM and the Python-bindings are in general quite stable and well tested by several people.
However, use in production is at your own risk.
If you run into problems please check first the documentation for more details,
or report the bug to the maintainer.
Table of Contents:
Installation
liquidSVM in one Minute
LS-Regression
Multi-class
Cells
Saving and loading Solutions
Learning Scenarios
Multiclass classification
Quantile regression
Expectile regression
Neyman-Pearson-Learning
ROC curve
LiquidData
Installation
Install it using any of the following variants:
easy_install --user --upgrade liquidSVM
pip install --user --upgrade liquidSVM
If you want to compile liquidSVM for your machine download http://www.isa.uni-stuttgart.de/software/python/liquidSVM-python.tar.gz.
For Windows there are binaries at liquidSVM-python.win-amd64.zip,
for Mac at liquidSVM-python.macosx.tar.gz
More information on the installation is in the README.
liquidSVM in one Minute
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: Some stuff we need for this notebook
End of explanation
"""
# Load test and training data
reg = LiquidData('reg-1d')
"""
Explanation: LS-Regression
End of explanation
"""
model = lsSVM(reg.train,display=1)
"""
Explanation: Now reg.train contains the training data and reg.test the testing data.
Both have the labels in its .target and the feature is in .data.
To train on the data and select the best hyperparameters do
(display=1 gives some information as the training progresses, but only on the command line, not in this jupyter notebook.)
End of explanation
"""
result, err = model.test(reg.test)
err[0,0]
"""
Explanation: Now you can test with any test set:
End of explanation
"""
plt.plot(reg.test.data, reg.test.target, '.')
x = np.linspace(-.2,1.4)
y = model.predict(x)
plt.plot(x, y, 'r-', linewidth=2)
plt.ylim(-.2,.8);
"""
Explanation: We also can plot the regression:
End of explanation
"""
model = lsSVM(reg, display=1)
result, err = model.lastResult
err[0,0]
"""
Explanation: As a convenience, since reg already contains .train and .test
you can do the whole experiment in one line.
Then the result is stored in model.lastResult:
End of explanation
"""
banana = LiquidData('banana-mc')
"""
Explanation: Multi-class
End of explanation
"""
model = mcSVM(banana.train)
print(banana.train.data.T.shape)
plt.scatter(banana.train.data[:,0],banana.train.data[:,1], c=banana.train.target)
x = np.arange(-1.1,1.1,.05)
X,Y = np.meshgrid(x, x)
z = np.array(np.meshgrid(x,x)).reshape(2,-1).T
print(x.shape,X.shape, z.shape)
Z = model.predict(z).reshape(len(x),len(x))
#contour(x,x,z, add=T, levels=1:4,col=1,lwd=4)
CS = plt.contour(X, Y, Z, 4, linewidth=4)
"""
Explanation: The following performs multi-class classification
End of explanation
"""
result,err = model.test(banana.test)
err[:,0]
"""
Explanation: In this case err[:,0] shows both the global miss-classification error as well the errors
of the underlying binary tasks, for more details see [Multiclass classification]:
End of explanation
"""
covtype = LiquidData('covtype.5000')
model = mcSVM(covtype, display=1, useCells=True)
result, err = model.lastResult
err[0,0]
"""
Explanation: Cells
If data gets too big for the memory on your machine:
End of explanation
"""
co = LiquidData('covtype.10000')
%time mcSVM(co.train);
%time mcSVM(co.train, useCells=True);
"""
Explanation: A major issue with SVMs is that for larger sample sizes the kernel matrix
does not fit into the memory any more.
Classically this gives an upper limit for the class of problems that traditional
SVMs can handle without significant runtime increase.
The concept of cells makes it possible to circumvent these issues.
If you specify useCells=True then the sample space $X$ gets partitioned into
a number of cells.
The training is done first for cell 1 then for cell 2 and so on.
Now, to predict the label for a value $x\in X$ liquidSVM first finds out
to which cell this $x$ belongs and then uses the SVM of that cell to predict
a label for it.
We first consider a medium size sample of the covtype data set.
LiquidData will download this from http://www.isa.uni-stuttgart.de/LiquidData/:
End of explanation
"""
co = LiquidData('covtype.50000')
%time mcSVM(co.train,useCells=True);
"""
Explanation: This is about 5 times faster! (The user time is about three times the elapsed time since we are using 2 threads.)
By using the partitioning facility of liquidSVM you can even bigger problems:
End of explanation
"""
co = LiquidData('covtype-full')
%time mcSVM(co.train,useCells=True);
"""
Explanation: Note that with this data set useCells=F here only works if your system has enough free memory (~26GB).
Even the full covtype data set with over 460'000 rows
(about 110'000 samples retained for testing) is now treatable in under 9 minutes from within python:
End of explanation
"""
banana = LiquidData('banana-mc')
for mcType in ["AvA_hinge", "OvA_hinge", "AvA_ls", "OvA_hinge"]:
print("\n======", mcType, "======")
model = mcSVM(banana.train, mcType=mcType)
result, err = model.test(banana.test)
print("global err:", err[0,0])
print("task errs:", err[1:,0])
print(result[:3,])
"""
Explanation: If you run into memory issues turn cells on: useCells=True.
If you have less than 10GB of RAM use store_solutions_internally=False for the latter.
Learning Scenarios
liquidSVM organizes its work into tasks:
E.g. in multiclass classification the problem has to be reduced into
several binary classification problems. Or in Quantile regression,
the SVM is learned simultaneously for different weights and
then the selection of hyperparameters produces different tasks.
Behind the scenes svm(formula, data, ...) does the following:
python
model = SVM(data)
model.train(...)
model.select(...)
The following learning scenarios hide these in higher level functions.
Multiclass classification
Multiclass classification has to be reduced to binary classification
There are two strategies for this:
all-vs-all: for every pairing of classes a binary SVM is trained
one-vs-all: for every class a binary SVM is trained with that class as one label and all other classes
are clumped together to another label
Then for any point in the test set, the winning label is chosen.
A second choice to make is whether the hinge or the least-squares loss should be used
for the binary classification problems.
Let us look at the example dataset banana-mc which has 4 labels.
Since there are 6 pairings, AvA trains 6 tasks, whereas
OvA trains 4 tasks:
End of explanation
"""
banana_bc = LiquidData('banana-bc')
m = mcSVM(banana_bc.train, mcType="OvA_ls",display=1)
result, err = m.test(banana_bc.test)
probs = (result+1) / 2.0
print(probs[:5,:])
plt.hist(probs, 100);
"""
Explanation: The first element in the errors gives the overall test error.
The other errors correspond to the tasks.
Also the result displays in the first column the final decision for
a test sample, and in the other columns the results of the binary classifications.
One can see nicely how the final prediction vote for any sample
is based on the 4 or 6 binary tasks.
NOTE AvA is usually faster, since every binary SVM just trains
on the data belonging to only two labels.
On the other hand OvA_ls can give better results at the cost of longer training time.
OvA_hinge should not be used as it is not universally consistent.
Probability estimation
If labels have values -1 or 1, then using the least-squares will estimate the conditional expectation.
Hence, this can be used to estimate probabilities:
End of explanation
"""
banana = LiquidData('banana-mc')
m = mcSVM(banana.train, mcType="OvA_ls",display=1)
result, err = m.test(banana.test)
probs = (result[:,1:]+1) / 2.0
print(result.shape, probs.shape)
print(np.hstack((result,probs))[:5,:].round(2))
"""
Explanation: And for multi-class classification it is similar:
End of explanation
"""
reg = LiquidData('reg-1d')
quantiles_list = [ 0.05, 0.1, 0.5, 0.9, 0.95 ]
model = qtSVM(reg.train, weights=quantiles_list)
result, err = model.test(reg.test)
err[:,0]
plt.plot(reg.test.data,reg.test.target,'.')
plt.ylim(-.2,.8)
x = np.arange(-0.2,1.4,0.05).reshape((-1,1))
lines = model.predict(x)
for i in range(len(quantiles_list)):
plt.plot(x, lines[:,i], '-', linewidth=2)
"""
Explanation: Quantile regression
This uses the quantile solver with pinball loss and performs selection for every quantile provided.
End of explanation
"""
reg = LiquidData('reg-1d')
expectiles_list = [ .05, 0.1, 0.5, 0.9, 0.95 ]
model = exSVM(reg.train, weights=expectiles_list)
result, err = model.test(reg.test)
err[:,0]
plt.plot(reg.test.data,reg.test.target,'.')
plt.ylim(-.2,.8)
x = np.arange(-0.2,1.4,0.05).reshape((-1,1))
lines = model.predict(x)
for i in range(len(expectiles_list)):
plt.plot(x, lines[:,i], '-', linewidth=2)
"""
Explanation: In this plot you see estimations for two lower and upper quantiles as well as the median
of the distribution of the label $y$ given $x$.
Expectile regression
This uses the expectile solver with weighted least squares loss and performs selection for every weight.
The 0.5-expectile in fact is just the ordinary least squares regression and hence estimates
the mean of $y$ given $x$.
And in the same way as quantiles generalize the median, expectiles generalize the mean.
End of explanation
"""
banana = LiquidData('banana-bc')
constraint = 0.08
constraintFactors = np.array([1/2,2/3,1,3/2,2])
# class=-1 specifies the normal class
model = nplSVM(banana.train, nplClass=-1, constraintFactors=constraintFactors, constraint=constraint)
result, err = model.test(banana.test)
false_alarm_rate = (result[banana.test.target==-1,]==1).mean(0)
detection_rate = (result[banana.test.target==1,]==1).mean(0)
np.vstack( (constraint * constraintFactors,false_alarm_rate,detection_rate) ).round(3)
"""
Explanation: Neyman-Pearson-Learning
Neyman-Pearson-Learning attempts classification under the constraint
that the probability of false positives (Type-I error) is bound by a significance
level alpha, which is called here the NPL-constraint.
End of explanation
"""
banana = LiquidData('banana-bc')
model = rocSVM(banana.train,display=1)
result, err = model.test(banana.test)
false_positive_rate = (result[banana.test.target==-1,:]==1).mean(0)
detection_rate = (result[banana.test.target==1,]==1).mean(0)
print(err.T.round(3))
print("1-DR:", 1-detection_rate)
print("FPR:",false_positive_rate)
plt.plot(false_positive_rate, detection_rate, 'x-')
plt.xlim(0,1); plt.ylim(0,1)
plt.plot([0,1],[0,1], '--');
"""
Explanation: You can see that the false alarm rate in the test set meet the
NPL-constraints quite nicely, and on the other hand
the the detection rate is increasing.
ROC curve
Receiver Operating Characteristic curve (ROC curve) plots trade-off between
the false alarm rate and the detection rate
for different weights (default is 9 weights).
End of explanation
"""
LiquidData('reg-1d');
"""
Explanation: This shows nice learning, since the ROC curve is near the north-west corner.
LiquidData
As a convenience we provide several datasets prepared for training and testing.
http://www.isa.uni-stuttgart.de/LiquidData
They can be imported by name e.g. using:
End of explanation
"""
|
srnas/barnaba | examples/example_03_annotate.ipynb | gpl-3.0 | import barnaba as bb
# annotate
pdb = "../test/data/SARCIN.pdb"
stackings, pairings, res = bb.annotate(pdb)
# list base pairings
print("BASE-PAIRS")
for p in range(len(pairings[0][0])):
res1 = res[pairings[0][0][p][0]]
res2 = res[pairings[0][0][p][1]]
interaction = pairings[0][1][p]
print("%10s %10s %4s" % (res1,res2,interaction))
# list base-stackings
print()
print("STACKING")
for p in range(len(stackings[0][0])):
res1 = res[stackings[0][0][p][0]]
res2 = res[stackings[0][0][p][1]]
interaction = stackings[0][1][p]
print("%10s %10s %4s" % (res1,res2,interaction))
"""
Explanation: Annotate structures and simulations
We here show how to find base-pairs and stacking interactions in structures and simulations.
The function
python
stackings, pairings, res = bb.annotate(pdb)
returns three lists:
- a list of stacking interactions
- a list of pairing interactions
- the list of residue names following the usual convention RESNAME_RESNUMBER_CHAININDEX
stackings and pairings contain the list of interactions for the N frames in the PDB/trajectory file and it is organized in the following way: for a given frame $i=1..N$ there are $k=1..Q$ interactions between residues with index pairings[i][0][k][0] and pairings[i][0][k][1]. The type of interaction is specified at the element pairings[i][1][k].
But let's make an example by annotating a PDB file containing a sarcin-ricin motif:
End of explanation
"""
dotbr, seq = bb.dot_bracket(pairings,res)
print(">",seq)
for j in range(len(dotbr)):
print(dotbr[j])
"""
Explanation: Decypher the annotation
Base-pairing are classified according to the Leontis-Westhof classification, where
- W = Watson-Crick edge
- H = Hoogsteeen edge
- S= Sugar edge
- c/t = cis/trans
- XXx = when two bases are close in space, but they do not fall in any of the categories. This happens frequently for low-resolution structures or from molecular simulations.
WWc pairs between complementary bases are called WCc or GUc.
Stacking are classified according to the MCannotate classification:
- ">>" Upward
- "<<" Downward
- "<>" Outward
- "><" Inward
Criteria for stacking/pairing
First, we consider only bases that are "close" in space, i.e. $R_{ij} < 1.7$ and $R_{ji} < 1.7$.
$R_{ij} = (x_{ij}/5, y_{ij}/5, z_{ij}/3)$ is the SCALED position vector with components ${x,y,z}$ (in $\mathring{A}$) of base j constructed on base i.
The criteria for base-stacking are the following:
$( |z_{ij}| \; AND \; |z_{ji}| > 2 \mathring{A} ) \; AND \;
(\rho_{ij} \; OR\; \rho_{ji} < 2.5 \mathring{A}) \; AND\;
(|\theta_{ij}| < 40^{\circ} ) $
where
- $ \rho_{ij} = \sqrt{x_{ij}^2 + y_{ij}^2} $
- $\theta_{ij}$ = angle between the vectors normal to the base plane
The criteria for base-pairing are the following:
non stacked AND $|\theta_{ij}| < 60^{\circ}$ AND (number of hydrogen bonds $> 0$)
The number of hydrogen bonds is calculated as the number of donor-acceptor pairs with distance $< 3.3 \mathring{A}$.
If bases are complementary and the number of hydrogen bonds is > 1 (AU/GU) or > 2 (GC), the pair is considered WCc (or GUc).
cis/trans is calculated according to the value of the dihedral angle defined by $C1'{i}-N1/N9{i}-N1/N9_{j}-C1'_{j}$
edges are definded according to the value of $\psi = \arctan{(\hat{y}{ij}/\hat{x}{ij})}$.
Watson-Crick edge: $0.16 <\psi \le 2.0 rad$
Hoogsteen edge: $2.0 <\psi \le 4.0 rad $.
Sugar edge: $\psi > 4.0, \psi \le 0.16$
ATT!
- These criteria are slightly different from the one used in other popular software for annotating three-dimensional structures (e.g. X3DNA, MCAnnotate, Fr3D, etc.). From my experience, all these packages give slightly different results, especially for non-Watson-Crick base-pairs.
- Stacking is also problematic, as it relies on arbitrary criteria.
- In all cases, criteria for stacking and pairing were calibrated to work well for high resolution structures. These criteria might not be optimal for low-resolution structures and to describe nearly-formed interactions such the ones that are often encountered in molecular simulations.
Dot-bracket annotation
From the list of base-pairing, we can obtain the dot-bracket annotation using the function
python
dotbracket = bb.dot_bracket(pairings,res)
this function returns a string for each frame in the PDB/simulation. Let's see this in action:
End of explanation
"""
|
numenta/nupic.research | projects/archive/dynamic_sparse/notebooks/ExperimentAnalysis-TestRestoration.ipynb | agpl-3.0 | %load_ext autoreload
%autoreload 2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import tabulate
import pprint
import click
import numpy as np
import pandas as pd
from ray.tune.commands import *
from nupic.research.frameworks.dynamic_sparse.common.browser import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rcParams
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set(style="whitegrid")
sns.set_palette("colorblind")
"""
Explanation: Experiment: test_restoration
Evaluate if restoration affected existing capabilities. Comparing two approaches to calculate coactivations to see if they are getting to the same values.
Conclusion
End of explanation
"""
exps = ['test_restoration_5']
paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps]
df = load_many(paths)
df.head(5)
# replace hebbian prine
# df['hebbian_prune_perc'] = df['hebbian_prune_perc'].replace(np.nan, 0.0, regex=True)
# df['weight_prune_perc'] = df['weight_prune_perc'].replace(np.nan, 0.0, regex=True)
df.columns
df.shape
df.iloc[1]
df.groupby('model')['model'].count()
"""
Explanation: Load and check data
End of explanation
"""
num_epochs=100
# Did any trials failed?
df[df["epochs"]<num_epochs]["epochs"].count()
# Removing failed or incomplete trials
df_origin = df.copy()
df = df_origin[df_origin["epochs"]>=num_epochs]
df.shape
# which ones failed?
# failed, or still ongoing?
df_origin['failed'] = df_origin["epochs"]<num_epochs
df_origin[df_origin['failed']]['epochs']
# helper functions
def mean_and_std(s):
return "{:.3f} ± {:.3f}".format(s.mean(), s.std())
def round_mean(s):
return "{:.0f}".format(round(s.mean()))
stats = ['min', 'max', 'mean', 'std']
def agg(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
"""
Explanation: ## Analysis
Experiment Details
End of explanation
"""
agg(['on_perc', 'network'])
"""
Explanation: Does improved weight pruning outperforms regular SET
End of explanation
"""
|
bureaucratic-labs/yargy | docs/index.ipynb | mit | from yargy import Parser, rule, and_
from yargy.predicates import gram, is_capitalized, dictionary
GEO = rule(
and_(
gram('ADJF'), # так помечается прилагательное, остальные пометки описаны в
# http://pymorphy2.readthedocs.io/en/latest/user/grammemes.html
is_capitalized()
),
gram('ADJF').optional().repeatable(),
dictionary({
'федерация',
'республика'
})
)
parser = Parser(GEO)
text = '''
В Чеченской республике на день рождения ...
Донецкая народная республика провозгласила ...
Башня Федерация — одна из самых высоких ...
'''
for match in parser.findall(text):
print([_.value for _ in match.tokens])
"""
Explanation: Yargy
Yargy — библиотека для извлечения структурированной информации из текстов на русском языке. Правила описываются контекстно-свободными грамматиками и словарями ключевых слов. Банк готовых правил для имён, дат, адресов и других сущностей доступен в репозитории <a href="https://github.com/natasha/natasha">Natasha</a>.
Парсер реализует алгоритм <a href="https://en.wikipedia.org/wiki/Earley_parser">Earley parser</a>. Библиотека написана на чистом Python, поддерживает Python 3.5+ и Pypy 3, использует <a href="https://pymorphy2.readthedocs.io/en/latest/">Pymorphy2</a> для работы с морфологией.
Томита-парсер
<a href="https://tech.yandex.ru/tomita/">Томита-парсер</a> — популярный инструмент для извлечения структурированный информации из текстов на русском языке. Грубо говоря, Yargy — версия Томита-парсера для Python, написанная с нуля:
<table>
<tr>
<th>Томита-парсер</th>
<th>Yargy</th>
</tr>
<tr>
<td>Разрабатывался много лет внутри Яндекса</td>
<td>Open source, разрабатывается сообществом</td>
</tr>
<tr>
<td>10 000+ строк кода на C++</td>
<td>1000+ на Python</td>
</tr>
<tr>
<td>CLI</td>
<td>Python-библиотека</td>
</tr>
<tr>
<td>Protobuf + конфигурационные файлы</td>
<td>Python DSL</td>
</tr>
<tr>
<td>Нет готовых правил</td>
<td><a href="https://github.com/natasha/natasha">Natasha</a> — готовые правила для извлечения имён, дат, адресов и других сущностей</td>
</tr>
<tr>
<td>Медленный</td>
<td>Очень медленный</td>
</tr>
</table>
Примеры
Для работы с русским языком в Yargy встроен морфологический анализатор <a href="http://pymorphy2.readthedocs.io/en/latest">Pymorphy2</a>. Найдём в тексте топонимы, которые начинаются прилагательным и заканчиваются словами "федерация" или "республика". Например, "Российская Федерация", "Донецкая народная республика":
End of explanation
"""
from yargy import Parser
from yargy.predicates import gram
from yargy.pipelines import morph_pipeline
from yargy.interpretation import fact
from IPython.display import display
Person = fact(
'Person',
['position', 'name']
)
Name = fact(
'Name',
['first', 'last']
)
POSITION = morph_pipeline([
'премьер министр',
'президент'
])
NAME = rule(
gram('Name').interpretation(
Name.first.inflected()
),
gram('Surn').interpretation(
Name.last.inflected()
)
).interpretation(
Name
)
PERSON = rule(
POSITION.interpretation(
Person.position.inflected()
),
NAME.interpretation(
Person.name
)
).interpretation(
Person
)
parser = Parser(PERSON)
text = '''
12 марта по приказу президента Владимира Путина ...
'''
for match in parser.findall(text):
display(match.fact)
"""
Explanation: Настоящие сложные грамматики для топонимов собраны в репозитории <a href="https://github.com/natasha/natasha">Natasha</a>.
Найти подстроку в тексте не достаточно, нужно разбить её на поля и нормализовать. Например, из фразы "12 марта по приказу президента Владимира Путина ...", извлечём объект Person(position='президент', Name(first='Владимир', last='Путин')).
End of explanation
"""
from yargy.tokenizer import MorphTokenizer
tokenizer = MorphTokenizer()
text = '''Ростов-на-Дону
Длительностью 18ч. 10мин.
Яндекс.Такси
π ≈ 3.1415
1 500 000$
http://vk.com
'''
for line in text.splitlines():
print([_.value for _ in tokenizer(line)])
"""
Explanation: Грамматики для имён собраны в репозитории Natasha
Токенизатор
Парсер работает с последовательностью токенов. Встроенный в Yargy токенизатор простой и предсказуемый:
End of explanation
"""
tokenizer = MorphTokenizer()
list(tokenizer('марки стали'))
"""
Explanation: Для каждого токена Pymorph2 возвращает набор граммем. Например, "NOUN, sing, femn" — "существительное в единственном числе женского рода". Полный список в <a href="https://pymorphy2.readthedocs.io/en/latest/user/grammemes.html">документации Pymorph2</a>.
Вне контекста слово имеет несколько вариантов разбора. Например, "стали" — глагол (VERB) во фразе "мы стали лучше" и существительное (NOUN) в "марки стали":
End of explanation
"""
from yargy import and_, not_
from yargy.tokenizer import MorphTokenizer
from yargy.predicates import is_capitalized, eq
tokenizer = MorphTokenizer()
token = next(tokenizer('Стали'))
predicate = is_capitalized()
assert predicate(token) == True
predicate = and_(
is_capitalized(),
not_(eq('марки'))
)
assert predicate(token) == True
"""
Explanation: Токенизатор работает на правилах. В <a href="ref.ipynb#Токенизатор">справочнике</a> показано, как менять стандартные правила и добавлять новые.
Предикаты
Предикат принимает токен, возвращает True или False. В Yargy встроен <a href="ref.ipynb#Предикаты">набор готовых предикатов</a>. Операторы and_, or_ и not_ комбинируют предикаты:
End of explanation
"""
from pymorphy2.shapes import is_roman_number
from yargy.parser import Context
from yargy.tokenizer import Tokenizer
from yargy.predicates import custom
tokenizer = Tokenizer()
token = next(tokenizer('XL'))
predicate = custom(is_roman_number, types='LATIN')
predicate = predicate.activate(Context(tokenizer)) # проверяется, что tokenizer поддерживает тип 'LATIN'
assert predicate(token) == True
token = next(tokenizer('XS'))
assert predicate(token) == False
"""
Explanation: <a href="ref.ipynb#predicates.custom">custom</a> создаёт предикат из произвольной функции. Например, предикат для римских цифр:
End of explanation
"""
from yargy import or_, rule
from yargy.predicates import normalized
RULE = or_(
rule(normalized('dvd'), '-', normalized('диск')),
rule(normalized('видео'), normalized('файл'))
)
"""
Explanation: Газеттир
Газеттир работает с последовательностью слов. Например, вместо:
End of explanation
"""
from yargy import Parser
from yargy.pipelines import morph_pipeline
RULE = morph_pipeline([
'dvd-диск',
'видео файл',
'видеофильм',
'газета',
'электронный дневник',
'эссе',
])
parser = Parser(RULE)
text = 'Видео файл на dvd-диске'
for match in parser.findall(text):
print([_.value for _ in match.tokens])
"""
Explanation: удобно использовать morph_pipeline:
End of explanation
"""
from yargy import rule, or_
KEY = or_(
rule('р', '.'),
rule('размер')
).named('KEY')
VALUE = or_(
rule('S'),
rule('M'),
rule('L'),
).named('VALUE')
SIZE = rule(
KEY,
VALUE
).named('SIZE')
SIZE.normalized.as_bnf
"""
Explanation: Список газеттиров в <a href="ref.ipynb#Газеттир">справочнике</a>.
Грамматики
В Yargy контекстно-свободная грамматика описывается конструкциями Python. Например, традиционная запись грамматики размеров одежды:
KEY -> р. | размер
VALUE -> S | M | L
SIZE -> KEY VALUE
Так она выглядит в Yargy:
End of explanation
"""
from yargy.predicates import in_
VALUE = rule(
in_('SML')
).named('VALUE')
SIZE = rule(
KEY,
VALUE
).named('SIZE')
SIZE.normalized.as_bnf
"""
Explanation: В Yargy терминал грамматики — предикат. Используем встроенный предикат in_, сократим запись VALUE:
End of explanation
"""
from yargy import forward
EXPR = forward()
EXPR.define(or_(
rule('a'),
rule('(', EXPR, '+', EXPR, ')')
).named('EXPR'))
EXPR.normalized.as_bnf
"""
Explanation: Как быть, когда правая часть правила ссылается на левую? Например:
EXPR -> a | ( EXPR + EXPR )
В Python нельзя использовать необъявленные переменные. Для рекурсивных правил, есть конструкция forward:
End of explanation
"""
from yargy import not_
from yargy.predicates import eq
WORD = not_(eq('»'))
TEXT = forward()
TEXT.define(or_(
rule(WORD),
rule(WORD, TEXT)
))
TITLE = rule(
'«',
TEXT,
'»'
).named('TITLE')
TITLE.normalized.as_bnf
"""
Explanation: Рекурсивные правила описывают последовательности токенов произвольной длины. Грамматика для текста в кавычках:
End of explanation
"""
TITLE = rule(
'«',
not_(eq('»')).repeatable(),
'»'
).named('TITLE')
TITLE.normalized.as_bnf
"""
Explanation: Для удобства в Yargy есть метод repeatable с ним запись короче. Библиотека автоматически добавит forward:
End of explanation
"""
parser = Parser(
or_(
PERSON,
TITLE
)
)
text = 'Президент Владимир Путин в фильме «Интервью с Путиным» ..'
for match in parser.findall(text):
print([_.value for _ in match.tokens])
"""
Explanation: Парсер
У парсера есть два метода: findall и match. findall находит все непересекающиеся подстроки, которые удовлетворяют грамматике:
End of explanation
"""
match = parser.match('Президент Владимир Путин')
print([_.value for _ in match.tokens])
match = parser.match('Президент Владимир Путин 25 мая')
print(match)
"""
Explanation: match — пытается разобрать весь текст целиком:
End of explanation
"""
from IPython.display import display
from yargy.predicates import (
lte,
gte,
dictionary
)
MONTHS = {
'январь',
'февраль',
'март',
'апрель',
'мая',
'июнь',
'июль',
'август',
'сентябрь',
'октябрь',
'ноябрь',
'декабрь'
}
MONTH_NAME = dictionary(MONTHS)
MONTH = and_(
gte(1),
lte(12)
)
DAY = and_(
gte(1),
lte(31)
)
YEAR = and_(
gte(1900),
lte(2100)
)
DATE = or_(
rule(DAY, MONTH_NAME, YEAR),
rule(YEAR, '-', MONTH, '-', DAY),
rule(YEAR, 'г', '.')
).named('DATE')
parser = Parser(DATE)
text = '''2015г.
18 июля 2016
2016-01-02
'''
for line in text.splitlines():
match = parser.match(line)
display(match.tree.as_dot)
"""
Explanation: Интерпретация
Результат работы парсера — это дерево разбора. Грамматика и деревья разбора для дат:
End of explanation
"""
from yargy.interpretation import fact
Date = fact(
'Date',
['year', 'month', 'day']
)
DATE = or_(
rule(
DAY.interpretation(
Date.day
),
MONTH_NAME.interpretation(
Date.month
),
YEAR.interpretation(
Date.year
)
),
rule(
YEAR.interpretation(
Date.year
),
'-',
MONTH.interpretation(
Date.month
),
'-',
DAY.interpretation(
Date.day
)
),
rule(
YEAR.interpretation(
Date.year
),
'г', '.'
)
).interpretation(
Date
).named('DATE')
parser = Parser(DATE)
for line in text.splitlines():
match = parser.match(line)
display(match.tree.as_dot)
"""
Explanation: Интерпретация — процесс преобразования дерева разбора в объект с набором полей. Для даты, например, нужно получить структуры вида Date(year=2016, month=1, day=2). Пользователь размечает дерево на вершины-атрибуты и вершины-конструкторы методом interpretation:
End of explanation
"""
for line in text.splitlines():
match = parser.match(line)
display(match.fact)
"""
Explanation: Из размеченного дерева библиотека собирает объект:
End of explanation
"""
MONTHS = {
'январь': 1,
'февраль': 2,
'март': 3,
'апрель': 4,
'мая': 5,
'июнь': 6,
'июль': 7,
'август': 8,
'сентябрь': 9,
'октябрь': 10,
'ноябрь': 11,
'декабрь': 12
}
DATE = rule(
DAY.interpretation(
Date.day.custom(int)
),
MONTH_NAME.interpretation(
Date.month.normalized().custom(MONTHS.get)
),
YEAR.interpretation(
Date.year.custom(int)
)
).interpretation(
Date
)
parser = Parser(DATE)
match = parser.match('18 июня 2016')
match.fact
"""
Explanation: Подробнее об интерпретации в <a href="#ref.ipynb#Интерпретация">справочнике</a>.
Нормализация
Содержание полей фактов нужно нормировать. Например, не Date('июня', '2018'), а Date(6, 2018); не Person('президента', Name('Владимира', 'Путина')), а Person('президент', Name('Владимир', 'Путин')). В Yargy пользователь при разметке дерева разбора указывает, как нормировать вершины-атрибуты. В примере слово "июня" будет приведено к нормальной форме "июнь" и заменится на число "6" с помощью словаря MONTHS. Год и день просто приводятся к int:
End of explanation
"""
NAME = rule(
gram('Name').interpretation(
Name.first.inflected()
),
gram('Surn').interpretation(
Name.last.inflected()
)
).interpretation(
Name
)
"""
Explanation: Подробнее в <a href="ref.ipynb#Нормализация">справочнике</a>.
Согласование
Примитивная грамматика имён:
End of explanation
"""
parser = Parser(NAME)
for match in parser.findall('Илье Ивановым, Павлом Семенов'):
print([_.value for _ in match.tokens])
"""
Explanation: У неё есть две проблемы. Она срабатывает на словосочетаниях, где имя и фамилия в разных падежах:
End of explanation
"""
parser = Parser(NAME)
for match in parser.findall('Сашу Иванову, Саше Иванову'):
display(match.fact)
"""
Explanation: Имя и фамилия приводятся к нормальной форме независимо, получается женщина "Иванов":
End of explanation
"""
from yargy.relations import gnc_relation
gnc = gnc_relation()
NAME = rule(
gram('Name').interpretation(
Name.first.inflected()
).match(gnc),
gram('Surn').interpretation(
Name.last.inflected()
).match(gnc)
).interpretation(
Name
)
parser = Parser(NAME)
for match in parser.findall('Илье Ивановым, Павлом Семенов, Саша Быков'):
print([_.value for _ in match.tokens])
parser = Parser(NAME)
for match in parser.findall('Сашу Иванову, Саше Иванову'):
display(match.fact)
"""
Explanation: В Yargy связь между словами и словосочетаниями устанавливается методом match. Для согласования по числу в match передаём number_relation, для согласования по падежу, роду и числу — gnc_relation:
End of explanation
"""
|
jdhp-docs/python-notebooks | photography_fr.ipynb | mit | %matplotlib inline
import math
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets
from ipywidgets import interact
"""
Explanation: Photographie
TODO
* ...
End of explanation
"""
CAPTEUR_DICT = {"APS-C Canon (15x23 mm)": (14.9, 22.3),
"Full frame (24x36 mm)": (24, 36)}
"""
Explanation: Constantes
Sources:
* Obtenez le maximum du Canon EOS 80D, Vincent Burgeon, ed. Dunod, p.14
End of explanation
"""
def angle(focale, capteur):
angle = 2. * math.atan(float(capteur)/(2. * focale))
return angle
@interact(focale=(8, 300, 1), capteur=CAPTEUR_DICT)
def square(focale, capteur):
print("Capteur: {}x{} mm".format(capteur[0], capteur[1]))
print("Focale: {} mm".format(focale))
angle_verticale = round(math.degrees(angle(focale, capteur[0])), 1)
angle_horizontale = round(math.degrees(angle(focale, capteur[1])), 1)
print("Angle horizontale: {}°".format(angle_horizontale))
print("Angle verticale: {}°".format(angle_verticale))
"""
Explanation: Angle de champ
$$\text{angle} = 2 \arctan \left( \frac{\frac{D}{2}}{f} \right) = 2 \arctan \left( \frac{D}{2f} \right)$$
avec $D$ la dimension (horizontale, verticale ou diagonale) du capteur et $f$ la focale de l'objectif.
Sources:
* Tous photographes !, Jacques Croizer, ed. Dunod, p.247
End of explanation
"""
|
gschivley/ERCOT_power | Group classification/Group classification.ipynb | mit | %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import sklearn as sk
from cluster import Clusters
import os
filename = 'Cluster_Data_2.csv'
path = '../Clean Data'
fullpath = os.path.join(path, filename)
cluster = Clusters(fullpath)
cluster.make_clusters(n_clusters=range(4,26))
cluster.evaluate_clusters()
"""
Explanation: Notebook to classify power plants
Variables include:
- Fuel type
- Historical ramp speeds
- Historical efficiency
- Location?
End of explanation
"""
cluster_labels = cluster.export_cluster_id(k=16)
cluster_labels[:5]
labeled_plants = cluster.label_and_export(k=16)
labeled_plants.to_clipboard()
export_df = fossil_with_ramp.loc[:,['year', 'plant_id', 'cluster_id_6']]
export_df.to_csv('Cluster labels.csv', index = False)
"""
Explanation: From the figures above, it is difficult to determine an optimal number of clusters. The silhouette score clearly shows that we need more than 5 clusters. 6 looks like a good number, but one of the clusters shows odd behavior when plotting generation change vs net demand change (looks like 2 solid regression lines in the figure).
End of explanation
"""
# cols = ['year', 'fuel type', '1-hr ramp rate', '3-hr ramp rate',
# 'efficiency', 'efficiency std', 'CF', 'CF std']
# #Add index of power plant IDs
# df = pd.DataFrame(columns=cols)
filename = 'Cluster_Data.csv'
path = '../Clean Data'
fullpath = os.path.join(path, filename)
cluster_df = pd.read_csv(fullpath)
"""
Explanation: PREVIOUS WORK BELOW
Code above now does everything needed, below is kept for reference
Variable definitions
These variables will be used to cluster the power plants.
- year: year that the data is for
- fuel type: primary fuel type used at the facility (SUB, LIG, NG, etc)
- 1-hr ramp rate: maximum or 95 percentile rate of generation increase over 1 hour
- 3-hr ramp rate: maximum or 95 percentile rate of generation increase over 3 hours
- efficiency: might just change this to heat rate (MMBTU/MWh)
- efficiency std: standard deviation in efficiency based on monthly values for the year
- CF: capacity factor, which is the amount generated in a year divided by the theoretical maximum generation possible $MWh/(Capacity * 8760)$ (or hours per month for monthly values used in calculating the standard deviation)
- CF std: standard deviation of CF based on monthly values for the year
End of explanation
"""
cluster_df[cluster_df.plant_id==127]
cluster_df[cluster_df.plant_id==3466]
cluster_df[cluster_df.plant_id==3584]
cluster_df['fuel_type'].unique()
"""
Explanation: missing '1-hr ramp rate'
End of explanation
"""
fossil_codes = ['SUB', 'LIG', 'NG', 'DFO', 'PC']
fossil_df = cluster_df.loc[cluster_df['fuel_type'].isin(fossil_codes)]
fossil_df.describe()
# Unique plants
len(fossil_df.dropna().loc[:,'plant_id'].unique())
sns.distplot(fossil_df['capacity'].dropna())
fossil_with_ramp = fossil_df.dropna()
sns.distplot(fossil_with_ramp['capacity'])
"""
Explanation: Filter out non-fossil plants
End of explanation
"""
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score, calinski_harabaz_score
from sklearn import preprocessing
fossil_with_ramp.columns
sns.pairplot(fossil_with_ramp, hue='fuel_type', vars=[u'capacity', u'capacity_factor', u'efficiency',
u'ramp_rate'])
"""
Explanation: Start classifying only with all nan values dropped
End of explanation
"""
cluster_data = pd.DataFrame(index=range(3,15), columns=['n_clusters', 'score', 'silhouette'])
cluster_labels = {}
X = fossil_with_ramp[['capacity', 'capacity_factor', 'efficiency', 'ramp_rate']]
X_scaled = preprocessing.StandardScaler().fit_transform(X)
for idx, n_clusters in enumerate(range(3,15)):
cluster_data.loc[n_clusters, 'n_clusters'] = n_clusters
clusterer = KMeans(n_clusters, random_state=42)
# fit_clusters = clusterer.fit(X)
# cluster_labels = clusterer.fit_predict(X)
cluster_labels[n_clusters] = clusterer.fit_predict(X_scaled)
# http://scikit-learn.org/stable/modules/clustering.html#calinski-harabaz-index
# The score is higher when clusters are dense and well separated
# score[idx] = metrics.calinski_harabaz_score(X, cluster_labels)
cluster_data.loc[n_clusters, 'score'] = calinski_harabaz_score(X_scaled, cluster_labels[n_clusters])
# silhouette[idx] = silhouette_score(X, cluster_labels)
cluster_data.loc[n_clusters, 'silhouette'] = silhouette_score(X_scaled, cluster_labels[n_clusters])
# print 'For ', n_clusters, ' clusters, the average silhouette score is :', silhouette[idx], \
# ' and the score is :', score[idx]
"""
Explanation: The code below begins by scaling the X-vector data to mean 0 and a standard variance. It then loops through a wide range of k values for the number of clusters (3 to 14), calculating the labels for each plant, the Calinski Harabaz score, and the Silhouette score for each value of k.
End of explanation
"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,3))
cluster_data.plot(y='score', ax=ax1)
# ax1.plot(range(3,15), cluster_data['score'])
ax1.set_title('Calinski Harabaz score\nHigher is better')
cluster_data.plot(y='silhouette', ax=ax2)
# ax2.plot(range(3,15), silhouette)
ax2.set_title('Silhouette score\nLower is better')
"""
Explanation: The two score values are plotted below. They don't agree exactly on the optimal number of centers, but it looks like 6 or 8 is probably best.
End of explanation
"""
for n_clusters in cluster_labels.keys():
fossil_with_ramp.loc[:,'cluster_id_{}'.format(n_clusters)] = cluster_labels[n_clusters]
fossil_with_ramp.head()
sns.pairplot(fossil_with_ramp, hue='cluster_id_6', vars=[u'capacity', u'capacity_factor', u'efficiency',
u'ramp_rate'])
"""
Explanation: The integer appended to each of the cluster_id column labels is equal to the number of clusters.
End of explanation
"""
drop_columns = ['cluster_id_{}'.format(i) for i in [3,4,5,7,8,9,10,11,12,13,14]]
grouped = fossil_with_ramp.drop(drop_columns, axis=1).groupby(['cluster_id_6', 'fuel_type'])
grouped.mean()
grouped.count()
grouped.std()
sns.countplot('cluster_id_6', hue='fuel_type', data=fossil_with_ramp)
plt.title('Count of plants in each cluster')
sns.barplot('cluster_id_6', 'capacity', data=fossil_with_ramp, hue='fuel_type',
estimator=sum)
plt.title('Total Capacity of plants in each cluster')
# data = fossil_with_ramp.drop(drop_columns, axis=1)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize=(10,8))
sns.barplot('cluster_id_6', 'capacity', data=fossil_with_ramp, hue='fuel_type', ax=ax1)
ax1.set_title('Mean Capacity')
sns.barplot('cluster_id_6', 'capacity_factor', data=fossil_with_ramp, hue='fuel_type', ax=ax2)
ax2.set_title('Mean Capacity Factor')
sns.barplot('cluster_id_6', 'efficiency', data=fossil_with_ramp, hue='fuel_type', ax=ax3)
ax3.set_title('Mean Efficiency')
sns.barplot('cluster_id_6', 'ramp_rate', data=fossil_with_ramp, hue='fuel_type', ax=ax4)
ax4.set_title('Mean Ramp Rate')
# sns.barplot('Climate', 'HDD65', data=house, ax=ax2)
# # Shrink the point sizes (scale), change the estimator from mean to median
# sns.pointplot('Climate', 'HDD65', data=house, scale=0.7, estimator=np.median, ax=ax3)
# # Adjust the bandwidth (smoothing)
# sns.violinplot('Climate', 'HDD65', data=house, ax=ax4, bw=0.4)
plt.tight_layout()
"""
Explanation: Some basic information about each of the clusters
I've created a bunch of tables and figures to better understand each of the clusters (k=6). The tables show mean, count, and variance. The first figure shows the number of power plants in each group (remember that each power plant is included for every year it operates and data is available). The second looks at the size (capacity). Finally, there is a figure with facets for mean Capacity, Capacity Factor, Efficiency, and Ramp Rate.
End of explanation
"""
|
leoferres/prograUDD | clases/09-Iteradores.ipynb | mit | for i in range(10):
print(i, end=' ')
"""
Explanation: Iteradores
Una de las cosas más maravillosas de las compus es que podemos repetir un mismo cálculo para muchos valores de forma automática. Ya hemos visto al menos un iterator (iterador), que no es una lista... es otro objeto.
End of explanation
"""
for value in [2, 4, 6, 8, 10]:
# do some operation
print(value + 1, end=' ')
"""
Explanation: Pero range, la verdad no es una lista. Es un iterador y aprender cómo funciona es útil en varios ámbitos.
Iterar sobre listas
End of explanation
"""
iter([2, 4, 6, 8, 10])
I = iter([2, 4, 6, 8, 10])
print(next(I))
print(next(I))
"""
Explanation: En este caso, lo primero que hace el iterador es chequear si el objeto del otro lado del in es un iterador. Esto se puede chequear con la función iter, parecida a type.
End of explanation
"""
range(10)
iter(range(10))
"""
Explanation: range(): Una lista no siempre es una lista
range() como una lista, expone un iterador:
End of explanation
"""
N = 10 ** 12
for i in range(N):
if i >= 10: break
print(i, end=', ')
"""
Explanation: Y es así como Python lo trata como si fuera una lista:
End of explanation
"""
L = [2, 4, 6, 8, 10]
for i in range(len(L)):
print(i, L[i])
"""
Explanation: Si una lista fuera a crear un trillón de valores ($10^{12}$), necesitaríamos terabytes de memoria para almacenarlos.
Algunos iteradores útiles
enumerate
Algunas veces queremos no solo iterar sobre los valores en una lista, sino también imprimir el índice de ellos.
End of explanation
"""
for i, val in enumerate(L):
print(i, val)
"""
Explanation: Pero hay una sintaxis más limpia para esto:
End of explanation
"""
L = [2, 4, 6, 8, 10]
R = [3, 6, 9, 12, 15]
for lval, rval in zip(L, R):
print(lval, rval)
"""
Explanation: zip
La función zip itera sobre dos iterables y produce una tupla:
End of explanation
"""
# find the first 10 square numbers
square = lambda x: x ** 2
for val in map(square, range(10)):
print(val, end=' ')
"""
Explanation: Si las listas son de diferente largo, el largo del zip va a estar dado por la lista más corta.
map y filter
Un poco más intenso: el iterador map toma una función y la aplica sobre todos los valores de un iterador:
End of explanation
"""
# find values up to 10 for which x % 2 is zero
is_even = lambda x: x % 2 == 0
for val in filter(is_even, range(10)):
print(val, end=' ')
"""
Explanation: El iterador filter toma una función y la aplica sobre todos los valores de un iterador devolviendo sólo aquellos valores que "pasan" el filtro.
End of explanation
"""
from itertools import permutations
p = permutations(range(3))
print(*p)
from itertools import combinations
c = combinations(range(4), 2)
print(*c)
from itertools import product
p = product('ab', range(3))
print(*p)
"""
Explanation: iteradores especializados: itertools
Ya vimos el count de itertools. Este módulo contiene un montón de funciones útiles. Por ejemplo, aqui veremos itertools.permutations, itertools.combinations, itertools.product.
End of explanation
"""
|
google-research/google-research | privacy_poison/svm_pois_mi.ipynb | apache-2.0 | import sklearn
import numpy as np
from sklearn import svm
from tensorflow.keras.datasets import fashion_mnist
(trn_x, trn_y), (tst_x, tst_y) = fashion_mnist.load_data()
twoclass_inds = np.where(trn_y<=1)[0]
trn_x, trn_y = trn_x[twoclass_inds], trn_y[twoclass_inds]
trn_x = trn_x.reshape((trn_x.shape[0], -1))/255.0 - .5
trn_ct, pois_ct = 2000, 200
print(trn_x.shape, trn_y.shape)
"""
Explanation: Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
End of explanation
"""
# linear
print("Total Samples: {}".format(trn_ct))
total_sv = []
for _ in range(5):
fm_trn_inds = np.random.choice(trn_x.shape[0], trn_ct, replace=False)
fm_trn_x, fm_trn_y = trn_x[fm_trn_inds], trn_y[fm_trn_inds]
clean_linear = svm.SVC(kernel='linear')
clean_linear.fit(fm_trn_x, fm_trn_y)
duals = np.array(clean_linear.dual_coef_)
zero_duals = np.isclose(duals, 0).astype(np.int)
is_sv = (zero_duals.sum(axis=0)<=1).astype(np.int)
total_sv.append(is_sv.sum())
print("Total Support Vectors: {}".format(np.mean(total_sv)))
# linear
print("Total Samples: {}".format(trn_ct))
total_sv = []
for _ in range(5):
fm_trn_inds = np.random.choice(trn_x.shape[0], trn_ct, replace=False)
fm_trn_x, fm_trn_y = trn_x[fm_trn_inds], trn_y[fm_trn_inds]
clean_poly = svm.SVC(kernel='poly')
clean_poly.fit(fm_trn_x, fm_trn_y)
duals = np.array(clean_poly.dual_coef_)
zero_duals = np.isclose(duals, 0).astype(np.int)
is_sv = (zero_duals.sum(axis=0)<=1).astype(np.int)
total_sv.append(is_sv.sum())
print("Total Support Vectors: {}".format(np.mean(total_sv)))
"""
Explanation: FMNIST clean
End of explanation
"""
# linear
print("Total Samples: {}".format(trn_ct))
total_sv = []
for _ in range(5):
fm_inds = np.random.choice(trn_x.shape[0], trn_ct+pois_ct, replace=False)
fm_trn_x, fm_trn_y = trn_x[fm_inds[:trn_ct]], trn_y[fm_inds[:trn_ct]]
fm_pois_x = trn_x[fm_inds[trn_ct:]]
fm_pois_y = 1-trn_y[fm_inds[trn_ct:]]
fm_poised_x = np.concatenate((fm_trn_x, fm_pois_x))
fm_poised_y = np.concatenate((fm_trn_y, fm_pois_y))
pois_linear = svm.SVC(kernel='linear')
pois_linear.fit(fm_poised_x, fm_poised_y)
trn_support_inds = np.where(pois_linear.support_<trn_ct)[0]
duals = np.array(pois_linear.dual_coef_[:, trn_support_inds])
zero_duals = np.isclose(duals, 0).astype(np.int)
is_sv = (zero_duals.sum(axis=0)<=1).astype(np.int)
total_sv.append(is_sv.sum())
print("Total Support Vectors: {}".format(np.mean(total_sv)))
# poly
print("Total Samples: {}".format(fm_trn_x.shape[0]))
total_sv = []
for _ in range(5):
fm_inds = np.random.choice(trn_x.shape[0], trn_ct+pois_ct, replace=False)
fm_trn_x, fm_trn_y = trn_x[fm_inds[:trn_ct]], trn_y[fm_inds[:trn_ct]]
fm_pois_x = trn_x[fm_inds[trn_ct:]]
fm_pois_y = 1-trn_y[fm_inds[trn_ct:]]
fm_poised_x = np.concatenate((fm_trn_x, fm_pois_x))
fm_poised_y = np.concatenate((fm_trn_y, fm_pois_y))
pois_poly = svm.SVC(kernel='poly')
pois_poly.fit(fm_poised_x, fm_poised_y)
trn_support_inds = np.where(pois_poly.support_<fm_trn_x.shape[0])[0]
duals = np.array(pois_poly.dual_coef_[:, trn_support_inds])
zero_duals = np.isclose(duals, 0).astype(np.int)
is_sv = (zero_duals.sum(axis=0)<=1).astype(np.int)
total_sv.append(is_sv.sum())
print("Total Support Vectors: {}".format(np.mean(total_sv)))
trn_ct, pois_ct = 1000, 1
fm_inds = np.random.choice(trn_x.shape[0], trn_ct+1, replace=False)
fm_trn_x, fm_trn_y = trn_x[fm_inds[:trn_ct]], trn_y[fm_inds[:trn_ct]]
targ_x, targ_y = trn_x[fm_inds[trn_ct:trn_ct+1]], trn_y[fm_inds[trn_ct:trn_ct+1]]
pois_x, pois_y = np.concatenate([targ_x.copy() for _ in range(pois_ct)]), np.ones(pois_ct)-targ_y
print(fm_trn_x.shape, fm_trn_y.shape, targ_x.shape, targ_y.shape, pois_x.shape, pois_y.shape)
clean_svm = svm.SVC(kernel='linear')
clean_svm.fit(fm_trn_x, fm_trn_y)
in_x, in_y = np.concatenate([targ_x, fm_trn_x]), np.concatenate([targ_y, fm_trn_y])
in_svm = svm.SVC(kernel='linear')
in_svm.fit(in_x, in_y)
print(in_svm.support_)
poised_x, poised_y = np.concatenate([pois_x, fm_trn_x]), np.concatenate([pois_y, fm_trn_y])
poised_inx, poised_iny = np.concatenate([targ_x, pois_x, fm_trn_x]), np.concatenate([targ_y, pois_y, fm_trn_y])
p_svm = svm.SVC(kernel='linear')
p_svm.fit(poised_x, poised_y)
print(p_svm.support_)
pin_svm = svm.SVC(kernel='linear')
pin_svm.fit(poised_inx, poised_iny)
print(pin_svm.support_)
def poison(model, target_x, target_y):
pass
"""
Explanation: FMNIST poison
End of explanation
"""
|
snth/split-apply-combine | The Split-Apply-Combine Pattern in Data Science and Python.ipynb | mit | import os
import gzip
import ujson as json
directory = 'data/github_archive'
filename = '2015-01-29-16.json.gz'
path = os.path.join(directory, filename)
with gzip.open(path) as f:
events = [json.loads(line) for line in f]
#print json.dumps(events[0], indent=4)
"""
Explanation: The Split-Apply-Combine Pattern in Data Science and Python
Tobias Brandt
<img src="img/argon_logo.png" align=left width=200>
<!-- <img src="http://www.argonassetmanagement.co.za/css/img/logo.png" align=left width=200> -->
<img src='img/argon_website.png' align='middle'>
Google trends chart
Data Science
According to https://en.wikipedia.org/wiki/Data_science:
In November 1997, C.F. Jeff Wu gave the inaugural lecture entitled "Statistics = Data Science?"[5] for his appointment to the H. C. Carver Professorship at the University of Michigan.[6] In this lecture, he characterized statistical work as a trilogy of data collection, data modeling and analysis, and decision making. In his conclusion, he initiated the modern, non-computer science, usage of the term "data science" and advocated that statistics be renamed data science and statisticians data scientists.[5]
The Github Archive Dataset
https://www.githubarchive.org/
Open-source developers all over the world are working on millions of projects: writing code & documentation, fixing & submitting bugs, and so forth. GitHub Archive is a project to record the public GitHub timeline, archive it, and make it easily accessible for further analysis.
GitHub provides 20+ event types, which range from new commits and fork events, to opening new tickets, commenting, and adding members to a project. These events are aggregated into hourly archives, which you can access with any HTTP client:
gzipped json files
yyyy-mm-dd-HH.json.gz
End of explanation
"""
new_repo_count = 0
for event in events:
new_repo_count += \
1 if event['type']=="CreateEvent" else 0
print new_repo_count
"""
Explanation: <pre>
{
<b>"payload": {</b>
"master_branch": "master",
"ref_type": "branch",
"ref": "disable_dropdown",
"description": "OOI UI Source Code",
"pusher_type": "user"
},
<b>"created_at": "2015-01-29T16:00:00Z", </b>
"actor": {
"url": "https://api.github.com/users/birdage",
<b>"login": "birdage", </b>
"avatar_url": "https://avatars.githubusercontent.com/u/547228?",
"id": 547228,
"gravatar_id": ""
},
"id": "2545235518",
"repo": {
"url": "https://api.github.com/repos/birdage/ooi-ui",
"id": 23796192,
<b>"name": "birdage/ooi-ui"</b>
},
<b>"type": "CreateEvent", </b>
"public": true
}
</pre>
Typical Questions
How many Github repositories are created per hour/day/month?
To which repositories are the most commits are pushed per hour/day/month?
Which projects receive the most pull requests?
What are the most popular languages on Github?
Example 1 - Number of Repositories Created
End of explanation
"""
repo_commits = {}
for event in events:
if event['type']=="PushEvent":
repo = event['repo']['name']
commits = event['payload']['size']
repo_commits[repo] = \
repo_commits.get(repo, 0) + commits
def print_top_items(dct, N=5):
sorted_items = sorted(
dct.iteritems(), key=lambda t: t[1], reverse=True)
for key, value in sorted_items[:N]:
print "{:40} {}".format(key, value)
print_top_items(repo_commits)
"""
Explanation: Example 2 - Number of commits pushed per repository
End of explanation
"""
from IPython.display import HTML
HTML('<iframe src="http://www.jstatsoft.org/v40/i01" width=800 height=400></iframe>')
"""
Explanation: The Split-Apply-Combine Pattern
Hadley Wickham <img src="http://pix-media.s3.amazonaws.com/blog/1001/HadleyObama.png" width=250 align=left>
Hadley Wickham, the man who revolutionized R
*If you don’t spend much of your time coding in the open-source statistical programming language R,
his name is likely not familiar to you -- but the statistician Hadley Wickham is,
in his own words, “nerd famous.” The kind of famous where people at statistics conferences
line up for selfies, ask him for autographs, and are generally in awe of him.
End of explanation
"""
repo_commits = {}
for event in events:
if event['type']=="PushEvent":
repo = event['repo']['name']
commits = event['payload']['size']
repo_commits[repo] = \
repo_commits.get(repo, 0) + commits
print_top_items(repo_commits)
"""
Explanation: <img src="http://i.imgur.com/CoJHnAF.jpg">
StackOverflow: split-apply-combine tag
Pandas documentation: Group By: split-apply-combine
PyTools documentation: Split-apply-combine with groupby and reduceby
Blaze documentation: Split-Apply-Combine - Grouping
R plyr: plyr: Tools for Splitting, Applying and Combining Data
Julia documentation: The Split-Apply-Combine Strategy
The Basic Pattern
Split the data by some grouping variable
Apply some function to each group independently
Combine the data into some output dataset
The apply step is usually one of
aggregate
transform
or filter
Example 2 - examined
End of explanation
"""
import numpy as np
import pandas as pd
from collections import namedtuple
GithubEvent = namedtuple('GithubEvent', ['type_', 'user', 'repo', 'created_at', 'commits'])
def make_record(event):
return GithubEvent(
event['type'], event['actor']['login'],
event['repo']['name'], pd.Timestamp(event['created_at']),
event['payload']['size'] if event['type']=='PushEvent' else np.nan
)
df = pd.DataFrame.from_records(
(make_record(ev) for ev in events),
columns=GithubEvent._fields, index='created_at')
df.head()
"""
Explanation: This
filters out only the "PushEvent"s
splits the dataset by repository
sums the commits for each group
combines the groups and their sums into a dictionary
Pandas - Python Data Analysis Library
<p><a href="http://pandas.pydata.org/"><img src="http://pandas.pydata.org/_static/pandas_logo.png" align=right width=400></a></p>
Provides high-performance, easy-to-use data structures and data analysis tools.
Provides core data structure DataFrame
pandas.DataFrame
Basically in-memory database tables (or spreadsheets!)
Tabular data that allows for columns of different dtypes
Labeled rows and columns (index)
Hierarchical indexing allows for representing Panel data
End of explanation
"""
df[df.type_=='CreateEvent'].head()
len(df[df.type_=='CreateEvent'])
"""
Explanation: Example 1 (using Pandas) - Number of Repositories Created
End of explanation
"""
repo_commits = {}
for event in events:
if event['type']=="PushEvent":
repo = event['repo']['name']
commits = event['payload']['size']
repo_commits[repo] = \
repo_commits.get(repo, 0) + commits
print_top_items(repo_commits)
repo_commits = df[df.type_=='PushEvent'].groupby('repo').commits.sum()
repo_commits.sort(ascending=False)
repo_commits.head(5)
"""
Explanation: Example 2 (using Pandas) - Number of commits pushed per repo
End of explanation
"""
event_counts = df.groupby('type_').repo.count()
event_counts.sort(ascending=False)
event_counts.head()
"""
Explanation: Example 1 - revisited
End of explanation
"""
new_repo_count = 0
for event in events:
new_repo_count += \
1 if event['type']=="CreateEvent" else 0
print new_repo_count
reduce(lambda x,y: x+y,
map(lambda ev: 1 if ev['type']=='CreateEvent' else 0,
events))
"""
Explanation: Great for interactive work:
tab-completion!
inspect data with df.head() & df.tail()
quick overview of data ranges with df.describe()
<img src="http://i.imgur.com/6b2AF7e.jpg" width=300 align='middle'>
However ...
Pandas currently only handles in-memory datasets!
So not suitable for big data!
<img src="img/devops_borat.jpg">
MapReduce
"If you want to process Big Data, you need some MapReduce framework like one of the following"
<p>
<a href="https://hadoop.apache.org/"><img src="https://hadoop.apache.org/images/hadoop-logo.jpg" width=200 align=left></a>
<a href="http://spark.apache.org/"><img src="http://spark.apache.org/images/spark-logo.png" width=100 align=left></a>
</p>
<img src="https://mitpress.mit.edu/sicp/full-text/book/cover.jpg" align=right width=150>
The key to these frameworks is adopting a functional [programming] mindset. In Python this means, think iterators!
See The Structure and Interpretation of Computer Programs
(the "Wizard book")
in particular Chapter 2 Building Abstractions with Data
and Section 2.2.3 Sequences as Conventional Interfaces
Luckily, the Split-Apply-Combine pattern is well suited to this!
Example 1 - revisited
End of explanation
"""
def datapipe(data, *transforms):
for transform in transforms:
data = transform(data)
return data
datapipe(
events,
lambda events: map(lambda ev: 1 if ev['type']=='CreateEvent' else 0, events),
lambda counts: reduce(lambda x,y: x+y, counts)
)
"""
Explanation: Would prefer to write
events | map(...) | reduce(...)
Example 1 - pipelined
End of explanation
"""
from toolz.curried import pipe, map, reduce
pipe(events,
map(lambda ev: 1 if ev['type']=='CreateEvent' else 0),
reduce(lambda x,y: x+y)
)
"""
Explanation: PyToolz
Example 1 - pipeline using PyToolz
End of explanation
"""
repo_commits = {}
for event in events:
if event['type']=="PushEvent":
repo = event['repo']['name']
commits = event['payload']['size']
repo_commits[repo] = \
repo_commits.get(repo, 0) + commits
print_top_items(repo_commits)
from toolz.curried import filter, reduceby
pipe(events,
filter(lambda ev: ev['type']=='PushEvent'),
reduceby(lambda ev: ev['repo']['name'],
lambda commits, ev: commits+ev['payload']['size'],
init=0),
print_top_items
)
"""
Explanation: Example 2 - pipelined with PyToolz
End of explanation
"""
def count_commits(filename):
import gzip
import json
from toolz.curried import pipe, filter, reduceby
with gzip.open(filename) as f:
repo_commits = pipe(
map(json.loads, f),
filter(lambda ev: ev['type']=='PushEvent'),
reduceby(lambda ev: ev['repo']['name'],
lambda commits, e: commits+e['payload']['size'],
init=0)
)
return repo_commits
print_top_items(count_commits(path))
import glob
files = glob.glob('C:/ARGO/talks/split-apply-combine/data/github_archive/2015-01-*')
print len(files)
N = 24 #len(files) # 10
%%time
from toolz.curried import reduceby
from __builtin__ import map as pmap
repo_commits = \
pipe(pmap(count_commits, files[:N]),
lambda lst: reduce(lambda out, dct: out + dct.items(), lst, []),
reduceby(lambda t: t[0], lambda s,t: s+t[1], init=0)
)
print_top_items(repo_commits)
%%time
# Remember to start the ipcluster!
# ipcluster start -n 4
from IPython.parallel import Client
p = Client()[:]
pmap = p.map_sync
repo_commits = \
pipe(pmap(count_commits, files[:N]),
lambda lst: reduce(lambda out, dct: out + dct.items(), lst, []),
reduceby(lambda t: t[0], lambda s,t: s+t[1], init=0)
)
print_top_items(repo_commits)
"""
Explanation: The Point of Learning Patterns
From Cosma Shalizi's Statistical Computing course:
Distinguish between what you want to do and how you want to do it.
Focusing on what brings clarity to intentions.
How also matters, but can obscure the high level problem.
Learn the pattern, recognize the pattern, love the pattern!
Re-use good solutions!
Iteration Considered Unhelpful
Could always do the same thing with for loops, but those are
verbose - lots of "how" obscures the "what"
painful/error-prone bookkeeping (indices, placeholders, ...)
clumsy - hard to parallelize
Out-of-core processing - toolz example
End of explanation
"""
repo_commits = df[df.type_=='PushEvent'].groupby('repo').commits.sum()
repo_commits.sort(ascending=False)
repo_commits.head(5)
from blaze import Symbol, by
event = Symbol('event', 'var * {created_at: datetime, type_: string, user: string, repo: string, commits: int}')
push_events = event[event.type_=='PushEvent']
repo_commits = by(push_events.repo, commits=push_events.commits.sum())
top_repos = repo_commits.sort('commits', ascending=False).head(5)
from blaze import compute
print compute(top_repos, df)
"""
Explanation: New tools
Blaze
<img src="img/blaze_med.png" width=400>
Dask
<img src="img/dask-collections-schedulers.png">
Example 2 - using blaze (and pandas)
End of explanation
"""
from odo import odo
uri = 'sqlite:///data/github_archive.sqlite::event'
odo(df, uri)
from blaze import Data
db = Data(uri)
compute(top_repos, db)
import os
if os.path.exists('data/github_archive.sqlite'):
os.remove('data/github_archive.sqlite')
"""
Explanation: You can run the same computation on different backends!
End of explanation
"""
from castra import Castra
castra = Castra('data/github_archive.castra',
template=df, categories=categories)
castra.extend_sequence(map(to_df, files), freq='1h')
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
pbar = ProgressBar()
pbar.register()
df = dd.from_castra('data/github_archive.castra')
df.head()
df.type.value_counts().nlargest(5).compute()
df[df.type=='PushEvent'].groupby('repo').commits.resample('h', how='count').compute()
"""
Explanation: Dask and Castra
End of explanation
"""
|
trungdong/datasets-provanalytics-dmkd | Cross Validation Code.ipynb | mit | # The 'combined' list has all the 22 metrics
feature_names_combined = (
'entities', 'agents', 'activities', # PROV types (for nodes)
'nodes', 'edges', 'diameter', 'assortativity', # standard metrics
'acc', 'acc_e', 'acc_a', 'acc_ag', # average clustering coefficients
'mfd_e_e', 'mfd_e_a', 'mfd_e_ag', # MFDs
'mfd_a_e', 'mfd_a_a', 'mfd_a_ag',
'mfd_ag_e', 'mfd_ag_a', 'mfd_ag_ag',
'mfd_der', # MFD derivations
'powerlaw_alpha' # Power Law
)
# The 'generic' list has 6 generic network metrics (that do not take provenance information into account)
feature_names_generic = (
'nodes', 'edges', 'diameter', 'assortativity', # standard metrics
'acc',
'powerlaw_alpha' # Power Law
)
# The 'provenance' list has 16 provenance-specific network metrics
feature_names_provenance = (
'entities', 'agents', 'activities', # PROV types (for nodes)
'acc_e', 'acc_a', 'acc_ag', # average clustering coefficients
'mfd_e_e', 'mfd_e_a', 'mfd_e_ag', # MFDs
'mfd_a_e', 'mfd_a_a', 'mfd_a_ag',
'mfd_ag_e', 'mfd_ag_a', 'mfd_ag_ag',
'mfd_der', # MFD derivations
)
# The utitility of above threes set of metrics will be assessed in our experiements to
# understand whether provenance type information help us improve data classification performance
feature_name_lists = (
('combined', feature_names_combined),
('generic', feature_names_generic),
('provenance', feature_names_provenance)
)
"""
Explanation: Common Cross Validation Test Code
We used the same cross validation test procedure for the three applications described in the paper. This document provides explanations for the code in analytics.py used in those tests.
See the tests carried out in each application:
* Application 1: ProvStore Documents
* Application 2: CollabMap
* Applicaiton 3: HAC-ER Messages
Lists of features
In our experiments, we first test our trained classifiers using all 22 provenance network metrics as defined in the paper. We then repeat the test using only the generic network metrics (6) and only the provenance-specific network metrics (16). Comparing the performance from all three tests will help verify whether the provenance-specific
network metrics bring added benefits to the classification application being discussed.
The lists of metrics combined, generic, and provenance are defined below.
End of explanation
"""
from imblearn.over_sampling import SMOTE
from collections import Counter
def balance_smote(df):
X = df.drop('label', axis=1)
Y = df.label
print('Original data shapes:', X.shape, Y.shape)
smoX, smoY = X, Y
c = Counter(smoY)
while (min(c.values()) < max(c.values())): # check if all classes are balanced, if not balance the first minority class
smote = SMOTE(ratio="auto", kind='regular')
smoX, smoY = smote.fit_sample(smoX, smoY)
c = Counter(smoY)
print('Balanced data shapes:', smoX.shape, smoY.shape)
df_balanced = pd.DataFrame(smoX, columns=X.columns)
df_balanced['label'] = smoY
return df_balanced
"""
Explanation: Balancing Data
This section defines the data balancing function by over-sampling using the SMOTE algorithm (see SMOTE: Synthetic Minority Over-sampling Technique).
It takes a dataframe where each row contains the label (in column label) and the feature vector corresponding to that label. It returns a new dataframe of the same format, but with added rows resulted from the SMOTE oversampling process.
End of explanation
"""
def t_confidence_interval(an_array, alpha=0.95):
s = np.std(an_array)
n = len(an_array)
return stats.t.interval(alpha=alpha, df=(n - 1), scale=(s / np.sqrt(n)))
"""
Explanation: The t_confidence_interval method below calculate the 95% confidence interval for a given list of values.
End of explanation
"""
def cv_test(X, Y, n_iterations=1000, test_id=""):
accuracies = []
importances = []
while len(accuracies) < n_iterations:
skf = model_selection.StratifiedKFold(n_splits=10, shuffle=True)
for train, test in skf.split(X, Y):
clf = tree.DecisionTreeClassifier()
clf.fit(X.iloc[train], Y.iloc[train])
accuracies.append(clf.score(X.iloc[test], Y.iloc[test]))
importances.append(clf.feature_importances_)
print("Accuracy: %.2f%% ±%.4f <-- %s" % (np.mean(accuracies) * 100, t_confidence_interval(accuracies)[1] * 100, test_id))
return accuracies, importances
"""
Explanation: Cross Validation Methodology
The following cv_test function carries out the cross validation test over n_iterations times and returns the accuracy scores and importance scores (for each feature). The cross validation steps are as follow:
* Split the input dataset (X, Y) into a training set and a test set using Stratified K-fold method with k = 10
* Train the Decision Tree classifier clf using the training set
* Score the accuracy of the classifier clf on the test set
* (Repeat the above until having done the required number of iterations)
End of explanation
"""
def test_classification(df, n_iterations=1000):
results = pd.DataFrame()
imps = pd.DataFrame()
Y = df.label
for feature_list_name, feature_names in feature_name_lists:
X = df[list(feature_names)]
accuracies, importances = cv_test(X, Y, n_iterations, test_id=feature_list_name)
rs = pd.DataFrame(
{
'Metrics': feature_list_name,
'Accuracy': accuracies}
)
results = results.append(rs, ignore_index=True)
if feature_list_name == "combined": # we are interested in the relevance of all features (i.e. 'combined')
imps = pd.DataFrame(importances, columns=feature_names)
return results, imps
"""
Explanation: Experiments: Having defined the cross validation method above, we now run it on the dataset (df) using all the features (combined), only the generic network metrics (generic), and only the provenance-specific network metrics (provenance).
End of explanation
"""
|
bspalding/research_public | lectures/linear_regression/Linear Regression.ipynb | apache-2.0 | # Import libraries
import numpy as np
from statsmodels import regression
import statsmodels.api as sm
import matplotlib.pyplot as plt
import math
"""
Explanation: Linear Regression
By Evgenia "Jenny" Nitishinskaya and Delaney Granizo-Mackenzie with example algorithms by David Edwards
Part of the Quantopian Lecture Series:
www.quantopian.com/lectures
github.com/quantopian/research_public
Notebook released under the Creative Commons Attribution 4.0 License.
Linear regression is a technique that measures the relationship between two variables. If we have an independent variable $X$, and a dependent outcome variable $Y$, linear regression allows us to determine which linear model $Y = \alpha + \beta X$ best explains the data. As an example, let's consider TSLA and SPY. We would like to know how TSLA varies as a function of how SPY varies, so we will take the daily returns of each and regress them against each other.
Python's statsmodels library has a built-in linear fit function. Note that this will give a line of best fit; whether or not the relationship it shows is significant is for you to determine. The output will also have some statistics about the model, such as R-squared and the F value, which may help you quantify how good the fit actually is.
End of explanation
"""
def linreg(X,Y):
# Running the linear regression
X = sm.add_constant(X)
model = regression.linear_model.OLS(Y, X).fit()
a = model.params[0]
b = model.params[1]
X = X[:, 1]
# Return summary of the regression and plot results
X2 = np.linspace(X.min(), X.max(), 100)
Y_hat = X2 * b + a
plt.scatter(X, Y, alpha=0.3) # Plot the raw data
plt.plot(X2, Y_hat, 'r', alpha=0.9); # Add the regression line, colored in red
plt.xlabel('X Value')
plt.ylabel('Y Value')
return model.summary()
"""
Explanation: First we'll define a function that performs linear regression and plots the results.
End of explanation
"""
start = '2014-01-01'
end = '2015-01-01'
asset = get_pricing('TSLA', fields='price', start_date=start, end_date=end)
benchmark = get_pricing('SPY', fields='price', start_date=start, end_date=end)
# We have to take the percent changes to get to returns
# Get rid of the first (0th) element because it is NAN
r_a = asset.pct_change()[1:]
r_b = benchmark.pct_change()[1:]
linreg(r_b.values, r_a.values)
"""
Explanation: Now we'll get pricing data on TSLA and SPY and perform a regression.
End of explanation
"""
X = np.random.rand(100)
Y = np.random.rand(100)
linreg(X, Y)
"""
Explanation: Each point on the above graph represents a day, with the x-coordinate being the return of SPY, and the y-coordinate being the return of TSLA. As we can see, the line of best fit tells us that for every 1% increased return we see from the SPY, we should see an extra 1.92% from TSLA. This is expressed by the parameter $\beta$, which is 1.9271 as estimated. Of course, for decresed return we will also see about double the loss in TSLA, so we haven't gained anything, we are just more volatile.
Linear Regression vs. Correlation
Linear regression gives us a specific linear model, but is limited to cases of linear dependence.
Correlation is general to linear and non-linear dependencies, but doesn't give us an actual model.
Both are measures of covariance.
Linear regression can give us relationship between Y and many independent variables by making X multidimensional.
Knowing Parameters vs. Estimates
It is very important to keep in mind that all $\alpha$ and $\beta$ parameters estimated by linear regression are just that - estimates. You can never know the underlying true parameters unless you know the physical process producing the data. The paremeters you estimate today may not be the same as the same analysis done including tomorrow's data, and the underlying true parameters may be moving. As such it is very important when doing actual analysis to pay attention to the standard error of the parameter estimates. More material on the standard error will be presented in a later lecture. One way to get a sense of how stable your paremeter estimates are is to estimates them using a rolling window of data and see how much variance there is in the estimates.
Ordinary Least Squares
Regression works by optimizing the placement of the line of best fit (or plane in higher dimensions). It does so by defining how bad the fit is using an objective function. In ordinary least squares regression, a very common type and what we use here, the objective function is:
$$\sum_{i=1}^n (Y_i - a - bX_i)^2$$
That is, for each point on the line of best fit, compare it with the real point and take the square of the difference. This function will decrease as we get better paremeter estimates. Regression is a simple case of numerical optimzation that has a closed form solution and does not need any optimizer.
Example case
Now let's see what happens if we regress two purely random variables.
End of explanation
"""
# Generate ys correlated with xs by adding normally-destributed errors
Y = X + 0.2*np.random.randn(100)
linreg(X,Y)
"""
Explanation: The above shows a fairly uniform cloud of points. It is important to note that even with 100 samples, the line has a visible slope due to random chance. This is why it is crucial that you use statistical tests and not visualizations to verify your results.
Now let's make Y dependent on X plus some random noise.
End of explanation
"""
import seaborn
start = '2014-01-01'
end = '2015-01-01'
asset = get_pricing('TSLA', fields='price', start_date=start, end_date=end)
benchmark = get_pricing('SPY', fields='price', start_date=start, end_date=end)
# We have to take the percent changes to get to returns
# Get rid of the first (0th) element because it is NAN
r_a = asset.pct_change()[1:]
r_b = benchmark.pct_change()[1:]
seaborn.regplot(r_b.values, r_a.values);
"""
Explanation: In a situation like the above, the line of best fit does indeed model the dependent variable Y quite well (with a high $R^2$ value).
Evaluating and reporting results
The regression model relies on several assumptions:
* The independent variable is not random.
* The variance of the error term is constant across observations. This is important for evaluating the goodness of the fit.
* The errors are not autocorrelated. The Durbin-Watson statistic detects this; if it is close to 2, there is no autocorrelation.
* The errors are normally distributed. If this does not hold, we cannot use some of the statistics, such as the F-test.
If we confirm that the necessary assumptions of the regression model are satisfied, we can safely use the statistics reported to analyze the fit. For example, the $R^2$ value tells us the fraction of the total variation of $Y$ that is explained by the model.
When making a prediction based on the model, it's useful to report not only a single value but a confidence interval. The linear regression reports 95% confidence intervals for the regression parameters, and we can visualize what this means using the seaborn library, which plots the regression line and highlights the 95% (by default) confidence interval for the regression line:
End of explanation
"""
|
GoogleCloudPlatform/ai-platform-samples | notebooks/samples/pytorch/text_classification/text_classification_using_pytorch_and_ai_platform.ipynb | apache-2.0 | import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
if 'google.colab' in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
else:
%env GOOGLE_APPLICATION_CREDENTIALS ''
"""
Explanation: <table align="left">
<td>
<a href="https://colab.research.google.com/github.com/GoogleCloudPlatform/ai-platform-samples/blob/main/notebooks/samples/pytorch/text_classification_using_pytorch_and_ai_platform.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/main/notebooks/samples/pytorch/text_classification_using_pytorch_and_ai_platform.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
Overview
This notebook illustrates the new feature of serving custom model prediction code on AI Platform. It allows us to execute arbitrary python pre-processing code prior to invoking a model, as well as post-processing on the produced predictions. In addition, you can use a model build by your favourite Python-based ML framework!
This is all done server-side so that the client can pass data directly to AI Platform Serving in the unprocessed state.
We will take advantage of this for text classification because it involves pre-processing that is not easily accomplished using native TensorFlow. Instead we will execute the the non TensorFlow pre-processing via python code on the server side.
We will build a text classification model using PyTorch, while performing text preproessing using Keras. PyTorch is an open source deep learning platform that provides a seamless path from research prototyping to production deployment.
Dataset
Hacker News is one of many public datasets available in BigQuery. This dataset includes titles of articles from several data sources. For the following tutorial, we extracted the titles that belong to either GitHub, The New York Times, or TechCrunch, and saved them as CSV files in a publicly shared Cloud Storage bucket at the following location: gs://cloud-training-demos/blogs/CMLE_custom_prediction
Objective
The goal of this tutorial is to:
1. Process the data for text classification.
2. Train a PyTorch Text Classifier (locally).
3. Deploy the PyTorch Text Classifier, along with the preprocessing artifacts, to AI Platform Serving, using the Custom Online Prediction code.
This tutorial focuses more on using this model with AI Platform Serving than on the design of the text classification model itself. For more details about text classification, please refer to Google developer's Guide to Text Classification.
Costs
This tutorial uses billable components of Google Cloud Platform (GCP):
AI Platform
Cloud Storage
Learn about AI Platform
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Authenticate your GCP account
If you are using AI Platform Notebooks, your environment is already
authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions
when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the GCP Console, go to the Create service account key
page.
From the Service account drop-down list, select New service account.
In the Service account name field, enter a name.
From the Role drop-down list, select
Machine Learning Engine > AI Platform Admin and
Storage > Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your
local environment.
Enter the path to your service account key as the
GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
"""
%load_ext autoreload
%autoreload 2
!pip install tensorflow==1.15.2 --user
!pip install torch --user
import tensorflow as tf
import torch
import os
print(tf.__version__)
print(torch.__version__)
"""
Explanation: Run the following cell to install Python dependencies needed to train the model locally. When you run the training job in AI Platform,
dependencies are preinstalled based on the runtime
version
you choose.
End of explanation
"""
PROJECT_ID = '[your-project-id]' # TODO (Set up your GCP Project name)
!gcloud config set project {PROJECT_ID}
"""
Explanation: Set up your GCP project
The following steps are required, regardless of your notebook environment.
Select or create a GCP project.
Make sure that billing is enabled for your project.
Enable the AI Platform ("Cloud Machine Learning Engine") and Compute Engine APIs.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands.
End of explanation
"""
BUCKET_NAME = '[your-bucket-name]' #@param {type:"string"}
REGION = 'us-central1' #@param {type:"string"}
ROOT='torch_text_classification'
MODEL_DIR=os.path.join(ROOT,'models')
PACKAGES_DIR=os.path.join(ROOT,'packages')
# Delete any previous artifacts from Google Cloud Storage
!gsutil rm -r gs://{BUCKET_NAME}/{ROOT}
"""
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you submit a training job using the Cloud SDK, you upload a Python package
containing your training code to a Cloud Storage bucket. AI Platform runs
the code from this package. In this tutorial, AI Platform also saves the
trained model that results from your job in the same bucket. You can then
create an AI Platform model version based on this output in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. It must be unique across all
Cloud Storage buckets.
You may also change the REGION variable, which is used for operations
throughout the rest of this notebook. Make sure to choose a region where Cloud
AI Platform services are
available.
End of explanation
"""
%%bash
gsutil cp gs://cloud-training-demos/blogs/CMLE_custom_prediction/keras_text_pre_processing/train.tsv .
gsutil cp gs://cloud-training-demos/blogs/CMLE_custom_prediction/keras_text_pre_processing/eval.tsv .
!head eval.tsv
"""
Explanation: Download and Explore Data
End of explanation
"""
%%writefile preprocess.py
from tensorflow.python.keras.preprocessing import sequence
from tensorflow.keras.preprocessing import text
class TextPreprocessor(object):
def __init__(self, vocab_size, max_sequence_length):
self._vocabb_size = vocab_size
self._max_sequence_length = max_sequence_length
self._tokenizer = None
def fit(self, text_list):
# Create vocabulary from input corpus.
tokenizer = text.Tokenizer(num_words=self._vocabb_size)
tokenizer.fit_on_texts(text_list)
self._tokenizer = tokenizer
def transform(self, text_list):
# Transform text to sequence of integers
text_sequence = self._tokenizer.texts_to_sequences(text_list)
# Fix sequence length to max value. Sequences shorter than the length are
# padded in the beginning and sequences longer are truncated
# at the beginning.
padded_text_sequence = sequence.pad_sequences(
text_sequence, maxlen=self._max_sequence_length)
return padded_text_sequence
"""
Explanation: Preprocessing
Pre-processing class to be used in both training and serving
End of explanation
"""
from preprocess import TextPreprocessor
processor = TextPreprocessor(5, 5)
processor.fit(['hello machine learning'])
processor.transform(['hello machine learning'])
"""
Explanation: Test Prepocessing Locally
End of explanation
"""
CLASSES = {'github': 0, 'nytimes': 1, 'techcrunch': 2} # label-to-int mapping
NUM_CLASSES = 3
VOCAB_SIZE = 20000 # Limit on the number vocabulary size used for tokenization
MAX_SEQUENCE_LENGTH = 50 # Sentences will be truncated/padded to this length
"""
Explanation: Model Creation
Metadata
End of explanation
"""
import pandas as pd
import numpy as np
from preprocess import TextPreprocessor
def load_data(train_data_path, eval_data_path):
# Parse CSV using pandas
column_names = ('label', 'text')
df_train = pd.read_csv(train_data_path, names=column_names, sep='\t')
df_train = df_train.sample(frac=1)
df_eval = pd.read_csv(eval_data_path, names=column_names, sep='\t')
return ((list(df_train['text']), np.array(df_train['label'].map(CLASSES))),
(list(df_eval['text']), np.array(df_eval['label'].map(CLASSES))))
((train_texts, train_labels), (eval_texts, eval_labels)) = load_data(
'train.tsv', 'eval.tsv')
# Create vocabulary from training corpus.
processor = TextPreprocessor(VOCAB_SIZE, MAX_SEQUENCE_LENGTH)
processor.fit(train_texts)
# Preprocess the data
train_texts_vectorized = processor.transform(train_texts)
eval_texts_vectorized = processor.transform(eval_texts)
"""
Explanation: Prepare data for training and evaluation
End of explanation
"""
%%writefile torch_model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class TorchTextClassifier(nn.Module):
def __init__(self, vocab_size, embedding_dim, seq_length, num_classes,
num_filters, kernel_size, pool_size, dropout_rate):
super(TorchTextClassifier, self).__init__()
self.embeddings = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim)
self.conv1 = nn.Conv1d(seq_length, num_filters, kernel_size)
self.max_pool1 = nn.MaxPool1d(pool_size)
self.conv2 = nn.Conv1d(num_filters, num_filters*2, kernel_size)
self.dropout = nn.Dropout(dropout_rate)
self.dense = nn.Linear(num_filters*2, num_classes)
def forward(self, x):
x = self.embeddings(x)
x = self.dropout(x)
x = self.conv1(x)
x = F.relu(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool1d(x, x.size()[2]).squeeze(2)
x = self.dropout(x)
x = self.dense(x)
x = F.softmax(x, 1)
return x
"""
Explanation: Build the model
End of explanation
"""
import torch
from torch.autograd import Variable
import torch.nn.functional as F
LEARNING_RATE=.001
FILTERS=64
DROPOUT_RATE=0.2
EMBEDDING_DIM=200
KERNEL_SIZE=3
POOL_SIZE=3
NUM_EPOCH=1
BATCH_SIZE=128
train_size = len(train_texts)
steps_per_epoch = int(len(train_labels)/BATCH_SIZE)
print("Train size: {}".format(train_size))
print("Batch size: {}".format(BATCH_SIZE))
print("Number of epochs: {}".format(NUM_EPOCH))
print("Steps per epoch: {}".format(steps_per_epoch))
print("Vocab Size: {}".format(VOCAB_SIZE))
print("Embed Dimensions: {}".format(EMBEDDING_DIM))
print("Sequence Length: {}".format(MAX_SEQUENCE_LENGTH))
print("")
def get_batch(step):
start_index = step*BATCH_SIZE
end_index = start_index + BATCH_SIZE
x = Variable(torch.Tensor(train_texts_vectorized[start_index:end_index]).long())
y = Variable(torch.Tensor(train_labels[start_index:end_index]).long())
return x, y
from torch_model import TorchTextClassifier
model = TorchTextClassifier(VOCAB_SIZE,
EMBEDDING_DIM,
MAX_SEQUENCE_LENGTH,
NUM_CLASSES,
FILTERS,
KERNEL_SIZE,
POOL_SIZE,
DROPOUT_RATE)
model.train()
loss_metric = F.cross_entropy
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
for epoch in range(NUM_EPOCH):
for step in range(steps_per_epoch):
x, y = get_batch(step)
optimizer.zero_grad()
y_pred = model(x)
loss = loss_metric(y_pred, y)
loss.backward()
optimizer.step()
if step % 50 == 0:
print('Batch [{}/{}] Loss: {}'.format(step+1, steps_per_epoch, round(loss.item(),5)))
print('Epoch [{}/{}] Loss: {}'.format(epoch+1, NUM_EPOCH, round(loss.item(),5)))
print('Final Loss: {}'.format(epoch+1, NUM_EPOCH, round(loss.item(),5)))
torch.save(model, 'torch_saved_model.pt')
"""
Explanation: Train and save the model
End of explanation
"""
import pickle
with open('./processor_state.pkl', 'wb') as f:
pickle.dump(processor, f)
"""
Explanation: Save pre-processing object
We need to save this so the same tokenizer used at training can be used to pre-process during serving
End of explanation
"""
!gsutil cp torch_saved_model.pt gs://{BUCKET_NAME}/{MODEL_DIR}/
!gsutil cp processor_state.pkl gs://{BUCKET_NAME}/{MODEL_DIR}/
"""
Explanation: Custom Model Prediction Preparation
Copy model and pre-processing object to GCS
End of explanation
"""
%%writefile model.py
import os
import pickle
import numpy as np
import torch
from torch.autograd import Variable
class CustomModelPrediction(object):
def __init__(self, model, processor):
self._model = model
self._processor = processor
def _postprocess(self, predictions):
labels = ['github', 'nytimes', 'techcrunch']
label_indexes = [np.argmax(prediction)
for prediction in predictions.detach().numpy()]
return [labels[label_index] for label_index in label_indexes]
def predict(self, instances, **kwargs):
preprocessed_data = self._processor.transform(instances)
predictions = self._model(Variable(torch.Tensor(preprocessed_data).long()))
labels = self._postprocess(predictions)
return labels
@classmethod
def from_path(cls, model_dir):
import torch
import torch_model
model = torch.load(os.path.join(model_dir,'torch_saved_model.pt'))
model.eval()
with open(os.path.join(model_dir, 'processor_state.pkl'), 'rb') as f:
processor = pickle.load(f)
return cls(model, processor)
"""
Explanation: Define Model Class
End of explanation
"""
# Headlines for Predictions
techcrunch=[
'Uber shuts down self-driving trucks unit',
'Grover raises €37M Series A to offer latest tech products as a subscription',
'Tech companies can now bid on the Pentagon’s $10B cloud contract'
]
nytimes=[
'‘Lopping,’ ‘Tips’ and the ‘Z-List’: Bias Lawsuit Explores Harvard’s Admissions',
'A $3B Plan to Turn Hoover Dam into a Giant Battery',
'A MeToo Reckoning in China’s Workplace Amid Wave of Accusations'
]
github=[
'Show HN: Moon – 3kb JavaScript UI compiler',
'Show HN: Hello, a CLI tool for managing social media',
'Firefox Nightly added support for time-travel debugging'
]
requests = (techcrunch+nytimes+github)
from model import CustomModelPrediction
local_prediction = CustomModelPrediction.from_path('.')
local_prediction.predict(requests)
"""
Explanation: Test Model Class Locally
End of explanation
"""
%%writefile setup.py
from setuptools import setup
REQUIRED_PACKAGES = ['keras']
setup(
name="text_classification",
version="0.1",
scripts=["preprocess.py", "model.py", "torch_model.py"],
include_package_data=True,
install_requires=REQUIRED_PACKAGES
)
!python setup.py sdist
!gsutil cp ./dist/text_classification-0.1.tar.gz gs://{BUCKET_NAME}/{PACKAGES_DIR}/text_classification-0.1.tar.gz
"""
Explanation: Package up files and copy to GCS
Create a setup.py script to bundle model.py,preprocess.py and torch_model.py in a tarball package. Notice that setup.py does not include the dependencies of model.py in the package. These dependencies are provided to your model version in other ways:
numpy and google-cloud-storage are both included as part of AI Platform Prediction runtime version 1.15.
torch is provided in a separate package, as described in a following section.
End of explanation
"""
MODEL_NAME='torch_text_classification'
MODEL_VERSION='v1'
RUNTIME_VERSION='1.15'
REGION='us-central1'
# Delete model version if any
! gcloud ai-platform versions delete {MODEL_VERSION} --model {MODEL_NAME} --quiet # run if version already created
# Delete model resource
! gcloud ai-platform models delete {MODEL_NAME} --quiet
!gcloud beta ai-platform models create {MODEL_NAME} --regions {REGION} --enable-logging --enable-console-logging
"""
Explanation: Model Deployment to AI Platform
End of explanation
"""
!gcloud beta ai-platform versions create {MODEL_VERSION} --model {MODEL_NAME} \
--origin=gs://{BUCKET_NAME}/{MODEL_DIR}/ \
--python-version=3.7 \
--runtime-version={RUNTIME_VERSION} \
--package-uris=gs://{BUCKET_NAME}/{PACKAGES_DIR}/text_classification-0.1.tar.gz,gs://cloud-ai-pytorch/torch-1.3.1+cpu-cp37-cp37m-linux_x86_64.whl \
--machine-type=mls1-c4-m4 \
--prediction-class=model.CustomModelPrediction
"""
Explanation: Pytorch compatible packages
You need to specify two Python packages when you create your version resource. One of these is the package containing model.py that you uploaded to Cloud Storage in a previous step. The other is a package containing the version of PyTorch that you need.
Google Cloud provides a collection of PyTorch packages in the gs://cloud-ai-pytorch Cloud Storage bucket. These packages are mirrored from the official builds.
For this tutorial, use gs://cloud-ai-pytorch/torch-1.3.1+cpu-cp37-cp37m-linux_x86_64.whl as your PyTorch package. This provides your version resource with PyTorch 1.3.1 for Python 3.7, built to run on a CPU in Linux.
Use the following command to create your version resource:
End of explanation
"""
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
import json
# JSON format the requests
request_data = {'instances': requests}
# Authenticate and call CMLE prediction API
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1', credentials=credentials)
parent = 'projects/{}/models/{}/versions/{}'.format(PROJECT_ID, MODEL_NAME, MODEL_VERSION)
print("Model full name: {}".format(parent))
response = api.projects().predict(body=request_data, name=parent).execute()
print(response['predictions'])
"""
Explanation: Online Predictions from AI Platform Prediction
End of explanation
"""
# Delete model version resource
!gcloud ai-platform versions delete {MODEL_VERSION} --model {MODEL_NAME} --quiet
# Delete model resource
! gcloud ai-platform models delete {MODEL_NAME} --quiet
"""
Explanation: Cleaning up
To clean up all GCP resources used in this project, you can delete the GCP
project you used for the tutorial.
Alternatively, you can clean up individual resources by running the following
commands:
End of explanation
"""
|
gobabiertoAR/datasets-portal | estructura-organica-pen/Cleaner estructura organica.ipynb | mit | from __future__ import unicode_literals
from __future__ import print_function
from data_cleaner import DataCleaner
import pandas as pd
input_path = "estructura-organica-raw.csv"
output_path = "estructura-organica-clean.csv"
dc = DataCleaner(input_path)
"""
Explanation: Limpieza de Estructura Organica del PEN
Se utilizan data-cleaner y pandas para codificar la limpieza de los datos de un archivo CSV. Primero se realiza una exploración de la tabla aplicando algunas reglas de limpieza y comprobando el resultado generado. Cuando este resultado es satisfactorio, se agrega la regla de limpieza a la lista codificada que luego se utilizará para generar la versión limpia del archivo.
Inicio
End of explanation
"""
for c in dc.df.columns:
print(c)
"""
Explanation: Exploración y descubrimiento
End of explanation
"""
rules = [
{
"renombrar_columnas": [
{"field": "aut_dni", "new_field": "autoridad_dni"},
{"field": "aut_cuit_cuil", "new_field": "autoridad_cuil_cuit"},
{"field": "aut_cargo", "new_field": "autoridad_cargo"},
{"field": "aut_tratamiento", "new_field": "autoridad_tratamiento"},
{"field": "aut_apellido", "new_field": "autoridad_apellido"},
{"field": "aut_nombre", "new_field": "autoridad_nombre"},
{"field": "aut_norma_designacion", "new_field": "autoridad_norma_designacion"},
{"field": "norma_competenciasobjetivos", "new_field": "norma_competencias_objetivos"},
{"field": "cordigo_postal", "new_field": "codigo_postal"}
]
},
{
"string": [
{"field": "jurisdiccion", "keep_original": False},
{"field": "unidad", "keep_original": False},
{"field": "reporta_a", "keep_original": False},
{"field": "unidad_tipo", "keep_original": False},
{"field": "autoridad_cargo", "keep_original": False},
{"field": "autoridad_tratamiento", "keep_original": False},
{"field": "autoridad_apellido", "keep_original": False},
{"field": "autoridad_nombre", "keep_original": False},
{"field": "piso_oficina", "keep_original": False},
{"field": "codigo_postal", "keep_original": False},
{"field": "domicilio", "keep_original": False},
{"field": "localidad", "keep_original": False},
{"field": "provincia", "keep_original": False},
]
},
{
"string_regex_substitute": [
{"field": "norma_competencias_objetivos", "regex_str_match": ";", "regex_str_sub": ",",
"keep_original": False},
{"field": "unidad", "regex_str_match": "\(.*\)", "regex_str_sub": "",
"keep_original": False},
{"field": "provincia", "regex_str_match": "Bs\. As\.", "regex_str_sub": "Buenos Aires",
"keep_original": False},
{"field": "autoridad_tratamiento", "regex_str_match": "\s+$", "regex_str_sub": "",
"keep_original": False},
{"field": "autoridad_tratamiento", "regex_str_match": "(.+{^\.})$", "regex_str_sub": "\g<1>.",
"keep_original": False},
{"field": "autoridad_norma_designacion", "regex_str_match": "Dto\D*", "regex_str_sub": "Decreto ",
"keep_original": False},
{"field": "web", "regex_str_match": "^.+www\.", "regex_str_sub": "http://www.",
"keep_original": False},
]
},
{
"mail_format": [
{"field": "mail"}
]
},
{
"reemplazar_string": [
{"field": "piso_oficina", "replacements": {"Oficina": ["Of.icina"]}},
{"field": "piso_oficina", "replacements": {"Piso": ["Planta"]}}
]
}
]
"""
Explanation: Reglas de limpieza codificadas
End of explanation
"""
dc.clean(rules)
map(print, dc.df.piso_oficina.unique())
df_actual = pd.read_csv("estructura-organica-actual.csv")
df_20160926 = pd.read_excel("originales/160926 Set de datos Administración Pública Nacional.xlsx")
df_20160927 = pd.read_csv("originales/estructura_autoridades_apn-Descarga_20160927.csv")
df_20160929 = pd.read_csv("originales/estructura_autoridades_apn-Descargado_29-09-2016.csv")
print(len(df_actual.columns), len(df_20160926.columns), len(df_20160927.columns), len(df_20160929.columns))
# nuevos campos
print(set(dc.df.columns)-set(df_actual.columns))
print(set(dc.df.columns)-set(df_20160926.columns))
print(set(dc.df.columns)-set(df_20160927.columns))
print(set(dc.df.columns)-set(df_20160929.columns))
for escalafon in dc.df.extraescalafonario.unique():
print(escalafon)
dc.df.piso_oficina.unique()
import re
re.sub("(?P<cargo>\(.+\))(?P<nombre>.+)","\g<nombre> \g<cargo>","(presidente) Juan Jose Perez.")
for unidad in dc.df.unidad.unique():
print unidad
dc.save(output_path)
dc.df.to_excel("estructura-organica.xlsx", index=False)
"""
Explanation: Limpieza
End of explanation
"""
|
swirlingsand/deep-learning-foundations | seq2seq/sequence_to_sequence_implementation.ipynb | mit | import numpy as np
import time
import helper
source_path = 'data/letters_source.txt'
target_path = 'data/letters_target.txt'
source_sentences = helper.load_data(source_path)
target_sentences = helper.load_data(target_path)
"""
Explanation: Character Sequence to Sequence
In this notebook, we'll build a model that takes in a sequence of letters, and outputs a sorted version of that sequence. We'll do that using what we've learned so far about Sequence to Sequence models. This notebook was updated to work with TensorFlow 1.1 and builds on the work of Dave Currie. Check out Dave's post Text Summarization with Amazon Reviews.
<img src="images/sequence-to-sequence.jpg"/>
Dataset
The dataset lives in the /data/ folder. At the moment, it is made up of the following files:
* letters_source.txt: The list of input letter sequences. Each sequence is its own line.
* letters_target.txt: The list of target sequences we'll use in the training process. Each sequence here is a response to the input sequence in letters_source.txt with the same line number.
End of explanation
"""
source_sentences[:50].split('\n')
"""
Explanation: Let's start by examining the current state of the dataset. source_sentences contains the entire input sequence file as text delimited by newline symbols.
End of explanation
"""
target_sentences[:50].split('\n')
"""
Explanation: source_sentences contains the entire output sequence file as text delimited by newline symbols. Each line corresponds to the line from source_sentences. source_sentences contains a sorted characters of the line.
End of explanation
"""
def extract_character_vocab(data):
special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>']
set_words = set([character for line in data.split('\n') for character in line])
int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))}
vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}
return int_to_vocab, vocab_to_int
# Build int2letter and letter2int dicts
source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences)
target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences)
# Convert characters to ids
source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<UNK>']) for letter in line] for line in source_sentences.split('\n')]
target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<UNK>']) for letter in line] + [target_letter_to_int['<EOS>']] for line in target_sentences.split('\n')]
print("Example source sequence")
print(source_letter_ids[:3])
print("\n")
print("Example target sequence")
print(target_letter_ids[:3])
"""
Explanation: Preprocess
To do anything useful with it, we'll need to turn the each string into a list of characters:
<img src="images/source_and_target_arrays.png"/>
Then convert the characters to their int values as declared in our vocabulary:
End of explanation
"""
from distutils.version import LooseVersion
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
"""
Explanation: This is the final shape we need them to be in. We can now proceed to building the model.
Model
Check the Version of TensorFlow
This will check to make sure you have the correct version of TensorFlow
End of explanation
"""
# Number of Epochs
epochs = 60
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 50
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 15
decoding_embedding_size = 15
# Learning Rate
learning_rate = 0.001
"""
Explanation: Hyperparameters
End of explanation
"""
def get_model_inputs():
input_data = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length
"""
Explanation: Input
End of explanation
"""
def encoding_layer(input_data, rnn_size, num_layers,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
# Encoder embedding
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, encoding_embedding_size)
# RNN cell
def make_cell(rnn_size):
enc_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return enc_cell
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
return enc_output, enc_state
"""
Explanation: Sequence to Sequence Model
We can now start defining the functions that will build the seq2seq model. We are building it from the bottom up with the following components:
2.1 Encoder
- Embedding
- Encoder cell
2.2 Decoder
1- Process decoder inputs
2- Set up the decoder
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
2.3 Seq2seq model connecting the encoder and decoder
2.4 Build the training graph hooking up the model with the
optimizer
2.1 Encoder
The first bit of the model we'll build is the encoder. Here, we'll embed the input data, construct our encoder, then pass the embedded data to the encoder.
Embed the input data using tf.contrib.layers.embed_sequence
<img src="images/embed_sequence.png" />
Pass the embedded input into a stack of RNNs. Save the RNN state and ignore the output.
<img src="images/encoder.png" />
End of explanation
"""
# Process the input we'll feed to the decoder
def process_decoder_input(target_data, vocab_to_int, batch_size):
'''Remove the last word id from each batch and concat the <GO> to the begining of each batch'''
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return dec_input
"""
Explanation: 2.2 Decoder
The decoder is probably the most involved part of this model. The following steps are needed to create it:
1- Process decoder inputs
2- Set up the decoder components
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
Process Decoder Input
In the training process, the target sequences will be used in two different places:
Using them to calculate the loss
Feeding them to the decoder during training to make the model more robust.
Now we need to address the second point. Let's assume our targets look like this in their letter/word form (we're doing this for readibility. At this point in the code, these sequences would be in int form):
<img src="images/targets_1.png"/>
We need to do a simple transformation on the tensor before feeding it to the decoder:
1- We will feed an item of the sequence to the decoder at each time step. Think about the last timestep -- where the decoder outputs the final word in its output. The input to that step is the item before last from the target sequence. The decoder has no use for the last item in the target sequence in this scenario. So we'll need to remove the last item.
We do that using tensorflow's tf.strided_slice() method. We hand it the tensor, and the index of where to start and where to end the cutting.
<img src="images/strided_slice_1.png"/>
2- The first item in each sequence we feed to the decoder has to be GO symbol. So We'll add that to the beginning.
<img src="images/targets_add_go.png"/>
Now the tensor is ready to be fed to the decoder. It looks like this (if we convert from ints to letters/symbols):
<img src="images/targets_after_processing_1.png"/>
End of explanation
"""
def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size,
target_sequence_length, max_target_sequence_length, enc_state, dec_input):
# 1. Decoder Embedding
target_vocab_size = len(target_letter_to_int)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# 2. Construct the decoder cell
def make_cell(rnn_size):
dec_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return dec_cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
# 3. Dense layer to translate the decoder's output at each time
# step into a choice from the target vocabulary
output_layer = Dense(target_vocab_size,
kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
# 4. Set up a training decoder and an inference decoder
# Training Decoder
with tf.variable_scope("decode"):
# Helper for the training process. Used by BasicDecoder to read inputs.
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False)
# Basic decoder
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
training_helper,
enc_state,
output_layer)
# Perform dynamic decoding using the decoder
training_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)
# 5. Inference Decoder
# Reuses the same parameters trained by the training process
with tf.variable_scope("decode", reuse=True):
start_tokens = tf.tile(tf.constant([target_letter_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_tokens')
# Helper for the inference process.
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,
start_tokens,
target_letter_to_int['<EOS>'])
# Basic decoder
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
enc_state,
output_layer)
# Perform dynamic decoding using the decoder
inference_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)
return training_decoder_output, inference_decoder_output
"""
Explanation: Set up the decoder components
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
1- Embedding
Now that we have prepared the inputs to the training decoder, we need to embed them so they can be ready to be passed to the decoder.
We'll create an embedding matrix like the following then have tf.nn.embedding_lookup convert our input to its embedded equivalent:
<img src="images/embeddings.png" />
2- Decoder Cell
Then we declare our decoder cell. Just like the encoder, we'll use an tf.contrib.rnn.LSTMCell here as well.
We need to declare a decoder for the training process, and a decoder for the inference/prediction process. These two decoders will share their parameters (so that all the weights and biases that are set during the training phase can be used when we deploy the model).
First, we'll need to define the type of cell we'll be using for our decoder RNNs. We opted for LSTM.
3- Dense output layer
Before we move to declaring our decoders, we'll need to create the output layer, which will be a tensorflow.python.layers.core.Dense layer that translates the outputs of the decoder to logits that tell us which element of the decoder vocabulary the decoder is choosing to output at each time step.
4- Training decoder
Essentially, we'll be creating two decoders which share their parameters. One for training and one for inference. The two are similar in that both created using tf.contrib.seq2seq.BasicDecoder and tf.contrib.seq2seq.dynamic_decode. They differ, however, in that we feed the the target sequences as inputs to the training decoder at each time step to make it more robust.
We can think of the training decoder as looking like this (except that it works with sequences in batches):
<img src="images/sequence-to-sequence-training-decoder.png"/>
The training decoder does not feed the output of each time step to the next. Rather, the inputs to the decoder time steps are the target sequence from the training dataset (the orange letters).
5- Inference decoder
The inference decoder is the one we'll use when we deploy our model to the wild.
<img src="images/sequence-to-sequence-inference-decoder.png"/>
We'll hand our encoder hidden state to both the training and inference decoders and have it process its output. TensorFlow handles most of the logic for us. We just have to use the appropriate methods from tf.contrib.seq2seq and supply them with the appropriate inputs.
End of explanation
"""
def seq2seq_model(input_data, targets, lr, target_sequence_length,
max_target_sequence_length, source_sequence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers):
# Pass the input data through the encoder. We'll ignore the encoder output, but use the state
_, enc_state = encoding_layer(input_data,
rnn_size,
num_layers,
source_sequence_length,
source_vocab_size,
encoding_embedding_size)
# Prepare the target sequences we'll feed to the decoder in training mode
dec_input = process_decoder_input(targets, target_letter_to_int, batch_size)
# Pass encoder state and decoder inputs to the decoders
training_decoder_output, inference_decoder_output = decoding_layer(target_letter_to_int,
decoding_embedding_size,
num_layers,
rnn_size,
target_sequence_length,
max_target_sequence_length,
enc_state,
dec_input)
return training_decoder_output, inference_decoder_output
"""
Explanation: 2.3 Seq2seq model
Let's now go a step above, and hook up the encoder and decoder using the methods we just declared
End of explanation
"""
# Build the graph
train_graph = tf.Graph()
# Set the graph to default to ensure that it is ready for training
with train_graph.as_default():
# Load the model inputs
input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_model_inputs()
# Create the training and inference logits
training_decoder_output, inference_decoder_output = seq2seq_model(input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
len(source_letter_to_int),
len(target_letter_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
# Create tensors for the training logits and inference logits
training_logits = tf.identity(training_decoder_output.rnn_output, 'logits')
inference_logits = tf.identity(inference_decoder_output.sample_id, name='predictions')
# Create the weights for sequence_loss
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Model outputs training_decoder_output and inference_decoder_output both contain a 'rnn_output' logits tensor that looks like this:
<img src="images/logits.png"/>
The logits we get from the training tensor we'll pass to tf.contrib.seq2seq.sequence_loss() to calculate the loss and ultimately the gradient.
End of explanation
"""
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_targets_batch, pad_sources_batch, pad_targets_lengths, pad_source_lengths
"""
Explanation: Get Batches
There's little processing involved when we retreive the batches. This is a simple example assuming batch_size = 2
Source sequences (it's actually in int form, we're showing the characters for clarity):
<img src="images/source_batch.png" />
Target sequences (also in int, but showing letters for clarity):
<img src="images/target_batch.png" />
End of explanation
"""
checkpoint = "checkpoints/best_model.ckpt"
# Split data to training and validation sets
train_source = source_letter_ids[batch_size:]
train_target = target_letter_ids[batch_size:]
valid_source = source_letter_ids[:batch_size]
valid_target = target_letter_ids[:batch_size]
(valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>']))
display_step = 20 # Check training loss after every 20 batches
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(1, epochs+1):
for batch_i, (targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate(
get_batches(train_target, train_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>'])):
# Training step
_, loss = sess.run(
[train_op, cost],
{input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths})
# Debug message updating us on the status of the training
if batch_i % display_step == 0 and batch_i > 0:
# Calculate validation cost
validation_loss = sess.run(
[cost],
{input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths})
print('Epoch {:>3}/{} Batch {:>4}/{} - Loss: {:>6.3f} - Validation loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss[0]))
# Save Model
saver = tf.train.Saver()
saver.save(sess, checkpoint)
print('Model Trained and Saved')
"""
Explanation: Train
We're now ready to train our model. If you run into OOM (out of memory) issues during training, try to decrease the batch_size.
End of explanation
"""
def source_to_seq(text):
'''Prepare the text for the model'''
sequence_length = 7
return [source_letter_to_int.get(word, source_letter_to_int['<UNK>']) for word in text]+ [source_letter_to_int['<PAD>']]*(sequence_length-len(text))
input_sentence = 'ih'
text = source_to_seq(input_sentence)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
#Multiply by batch_size to match the model's input parameters
answer_logits = sess.run(logits, {input_data: [text]*batch_size,
target_sequence_length: [len(text)]*batch_size,
source_sequence_length: [len(text)]*batch_size})[0]
pad = source_letter_to_int["<PAD>"]
print('Original Text:', input_sentence)
print('\nSource')
print(' Word Ids: {}'.format([i for i in text]))
print(' Input Words: {}'.format(" ".join([source_int_to_letter[i] for i in text])))
print('\nTarget')
print(' Word Ids: {}'.format([i for i in answer_logits if i != pad]))
print(' Response Words: {}'.format(" ".join([target_int_to_letter[i] for i in answer_logits if i != pad])))
"""
Explanation: Prediction
End of explanation
"""
|
g-weatherill/catalogue_toolkit | notebooks/Homogenisation.ipynb | agpl-3.0 | parser = ISFReader("inputs/isc_test_catalogue_isf.txt",
selected_origin_agencies=["ISC", "GCMT", "HRVD", "NEIC", "EHB", "BJI"],
selected_magnitude_agencies=["ISC", "GCMT", "HRVD", "NEIC", "BJI"])
catalogue = parser.read_file("ISC_DB1", "ISC Global M >= 5")
print("Catalogue contains: %d events" % catalogue.get_number_events())
"""
Explanation: Load in Catalogue - Limit to ISC, GCMT/HRVD, EHB, NEIC, BJI
End of explanation
"""
origin_rules = [
("2005/01/01 - 2005/12/31", ['EHB', 'ISC', 'NEIC', 'GCMT', 'HRVD', 'BJI']),
("2006/01/01 - 2007/01/01", ['ISC', 'EHB', 'NEIC', 'BJI', 'GCMT', 'HRVD'])
]
"""
Explanation: Define Rule Sets
The catalogue covers the years 2005/06. To illustrate how to apply time variable hierarchies we consider two set of rules:
For the origin the order of preference is:
(For 2005): EHB, ISC, NEIC, GCMT/HRVD, BJI
(For 2006): ISC, EHB, NEIC, BJI, GCMT/HRVD
End of explanation
"""
def gcmt_hrvd_mw(magnitude):
"""
For Mw recorded by GCMT take the value with no uncertainty
"""
return magnitude
def gcmt_hrvd_mw_sigma(magnitude):
"""
No additional uncertainty
"""
return 0.0
"""
Explanation: Magnitude Rules
GCMT/HRVD
End of explanation
"""
def neic_mw(magnitude):
"""
If Mw reported by NEIC,
"""
return magnitude
def neic_mw_sigma(magnitude):
"""
Uncertainty of 0.11 units
"""
return 0.11
def scordillis_ms(magnitude):
"""
Scordilis (2006) indicates ISC and NEIC Ms can treated (almost) equivalently
"""
if magnitude < 6.1:
return 0.67 * magnitude + 2.07
else:
return 0.99 * magnitude + 0.08
def scordillis_ms_sigma(magnitude):
"""
With Magnitude dependent uncertainty
"""
if magnitude < 6.1:
return 0.17
else:
return 0.20
def scordillis_mb(magnitude):
"""
Scordilis (2006) finds NEIC and ISC mb nearly equivalent
"""
return 0.85 * magnitude + 1.03
def scordillis_mb_sigma(magnitude):
"""
"""
return 0.29
"""
Explanation: ISC/NEIC
End of explanation
"""
def bji_mb(magnitude):
"""
"""
return 0.9 * magnitude + 0.15
def bji_mb_sigma(magnitude):
"""
"""
return 0.2
def bji_ms(magnitude):
"""
"""
return 0.9 * magnitude + 0.15
def bji_ms_sigma(magnitude):
"""
"""
return 0.2
"""
Explanation: BJI
For BJI - no analysis has been undertaken. We apply a simple scaling of 0.9 M + 0.15 with uncertainty of 0.2. This is for illustrative purposes only
End of explanation
"""
rule_set_2005 = [
MagnitudeConversionRule("GCMT", "Mw", gcmt_hrvd_mw, gcmt_hrvd_mw_sigma),
MagnitudeConversionRule("HRVD", "Mw", gcmt_hrvd_mw, gcmt_hrvd_mw_sigma),
MagnitudeConversionRule("ISC", "Ms", scordillis_ms, scordillis_ms_sigma),
MagnitudeConversionRule("NEIC", "Ms", scordillis_ms, scordillis_ms_sigma),
MagnitudeConversionRule("ISC", "mb", scordillis_mb, scordillis_mb_sigma),
MagnitudeConversionRule("NEIC", "mb", scordillis_mb, scordillis_mb_sigma),
MagnitudeConversionRule("BJI", "Ms", bji_ms, bji_ms_sigma),
MagnitudeConversionRule("BJI", "mb", bji_mb, bji_mb_sigma)
]
rule_set_2006 = [
MagnitudeConversionRule("GCMT", "Mw", gcmt_hrvd_mw, gcmt_hrvd_mw_sigma),
MagnitudeConversionRule("HRVD", "Mw", gcmt_hrvd_mw, gcmt_hrvd_mw_sigma),
MagnitudeConversionRule("ISC", "Ms", scordillis_ms, scordillis_ms_sigma),
MagnitudeConversionRule("BJI", "Ms", bji_ms, bji_ms_sigma),
MagnitudeConversionRule("NEIC", "Ms", scordillis_ms, scordillis_ms_sigma),
MagnitudeConversionRule("ISC", "mb", scordillis_mb, scordillis_mb_sigma),
MagnitudeConversionRule("BJI", "mb", bji_mb, bji_mb_sigma),
MagnitudeConversionRule("NEIC", "mb", scordillis_mb, scordillis_mb_sigma)
]
magnitude_rules = [
("2005/01/01 - 2005/12/31", rule_set_2005),
("2006/01/01 - 2007/01/01", rule_set_2006)
]
"""
Explanation: Define Magnitude Hierarchy
End of explanation
"""
preprocessor = HomogenisorPreprocessor("time")
catalogue = preprocessor.execute(catalogue, origin_rules, magnitude_rules)
"""
Explanation: Pre-processing
Before executing the homogenisation it is necessary to run a preprocessing step. This searches through the catalogue and identifies which conversion rule to apply:
The preprocessor is instantiated with a string describing the sort of rules to be applied.
"time" - Applies time only
"key" - Applies key rules only
"depth" - Applies depth rules only
"time|key" - Applies joint time and key rules
"time|depth" - Applied joint time and depth rules
"depth|key" - Applies joint depth and key rules
End of explanation
"""
harmonisor = DynamicHomogenisor(catalogue, logging=True)
homogenised_catalogue = harmonisor.homogenise(magnitude_rules, origin_rules)
"""
Explanation: Harmonise the Catalogue
End of explanation
"""
log_file = "outputs/homogenisor_log.csv"
if os.path.exists(log_file):
os.remove(log_file)
harmonisor.dump_log(log_file)
"""
Explanation: As logging was enabled, we can dump the log to a csv file and explore which rules and which hierarchy was applied for each event
End of explanation
"""
output_catalogue_file = "outputs/homogeneous_catalogue.csv"
if os.path.exists(output_catalogue_file):
os.remove(output_catalogue_file)
harmonisor.export_homogenised_to_csv(output_catalogue_file)
"""
Explanation: Export the Homogenised Catalogue to CSV
End of explanation
"""
|
jeicher/cobrapy | documentation_builder/getting_started.ipynb | lgpl-2.1 | from __future__ import print_function
import cobra.test
# "ecoli" and "salmonella" are also valid arguments
model = cobra.test.create_test_model("textbook")
"""
Explanation: Getting Started
To begin with, cobrapy comes with bundled models for Salmonella and E. coli, as well as a "textbook" model of E. coli core metabolism. To load a test model, type
End of explanation
"""
print(len(model.reactions))
print(len(model.metabolites))
print(len(model.genes))
"""
Explanation: The reactions, metabolites, and genes attributes of the cobrapy model are a special type of list called a DictList, and each one is made up of Reaction, Metabolite and Gene objects respectively.
End of explanation
"""
model.reactions[29]
"""
Explanation: Just like a regular list, objects in the DictList can be retrived by index. For example, to get the 30th reaction in the model (at index 29 because of 0-indexing):
End of explanation
"""
model.metabolites.get_by_id("atp_c")
"""
Explanation: Addictionally, items can be retrived by their id using the get_by_id() function. For example, to get the cytosolic atp metabolite object (the id is "atp_c"), we can do the following:
End of explanation
"""
model.reactions.EX_glc__D_e.lower_bound
"""
Explanation: As an added bonus, users with an interactive shell such as IPython will be able to tab-complete to list elements inside a list. While this is not recommended behavior for most code because of the possibility for characters like "-" inside ids, this is very useful while in an interactive prompt:
End of explanation
"""
pgi = model.reactions.get_by_id("PGI")
pgi
"""
Explanation: Reactions
We will consider the reaction glucose 6-phosphate isomerase, which interconverts glucose 6-phosphate and fructose 6-phosphate. The reaction id for this reaction in our test model is PGI.
End of explanation
"""
print(pgi.name)
print(pgi.reaction)
"""
Explanation: We can view the full name and reaction catalyzed as strings
End of explanation
"""
print(pgi.lower_bound, "< pgi <", pgi.upper_bound)
print(pgi.reversibility)
"""
Explanation: We can also view reaction upper and lower bounds. Because the pgi.lower_bound < 0, and pgi.upper_bound > 0, pgi is reversible
End of explanation
"""
pgi.check_mass_balance()
"""
Explanation: We can also ensure the reaction is mass balanced. This function will return elements which violate mass balance. If it comes back empty, then the reaction is mass balanced.
End of explanation
"""
pgi.add_metabolites({model.metabolites.get_by_id("h_c"): -1})
pgi.reaction
"""
Explanation: In order to add a metabolite, we pass in a dict with the metabolite object and its coefficient
End of explanation
"""
pgi.check_mass_balance()
"""
Explanation: The reaction is no longer mass balanced
End of explanation
"""
pgi.pop(model.metabolites.get_by_id("h_c"))
print(pgi.reaction)
print(pgi.check_mass_balance())
"""
Explanation: We can remove the metabolite, and the reaction will be balanced once again.
End of explanation
"""
pgi.reaction = "g6p_c --> f6p_c + h_c + green_eggs + ham"
pgi.reaction
pgi.reaction = "g6p_c <=> f6p_c"
pgi.reaction
"""
Explanation: It is also possible to build the reaction from a string. However, care must be taken when doing this to ensure reaction id's match those in the model. The direction of the arrow is also used to update the upper and lower bounds.
End of explanation
"""
atp = model.metabolites.get_by_id("atp_c")
atp
"""
Explanation: Metabolites
We will consider cytosolic atp as our metabolite, which has the id atp_c in our test model.
End of explanation
"""
print(atp.name)
print(atp.compartment)
"""
Explanation: We can print out the metabolite name and compartment (cytosol in this case).
End of explanation
"""
atp.charge
"""
Explanation: We can see that ATP is a charged molecule in our model.
End of explanation
"""
print(atp.formula)
"""
Explanation: We can see the chemical formula for the metabolite as well.
End of explanation
"""
len(atp.reactions)
"""
Explanation: The reactions attribute gives a frozenset of all reactions using the given metabolite. We can use this to count the number of reactions which use atp.
End of explanation
"""
model.metabolites.get_by_id("g6p_c").reactions
"""
Explanation: A metabolite like glucose 6-phosphate will participate in fewer reactions.
End of explanation
"""
gpr = pgi.gene_reaction_rule
gpr
"""
Explanation: Genes
The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in Schellenberger et al 2011 Nature Protocols 6(9):1290-307.
The GPR is stored as the gene_reaction_rule for a Reaction object as a string.
End of explanation
"""
pgi.genes
pgi_gene = model.genes.get_by_id("b4025")
pgi_gene
"""
Explanation: Corresponding gene objects also exist. These objects are tracked by the reactions itself, as well as by the model
End of explanation
"""
pgi_gene.reactions
"""
Explanation: Each gene keeps track of the reactions it catalyzes
End of explanation
"""
pgi.gene_reaction_rule = "(spam or eggs)"
pgi.genes
pgi_gene.reactions
"""
Explanation: Altering the gene_reaction_rule will create new gene objects if necessary and update all relationships.
End of explanation
"""
model.genes.get_by_id("spam")
"""
Explanation: Newly created genes are also added to the model
End of explanation
"""
cobra.manipulation.delete_model_genes(model, ["spam"],
cumulative_deletions=True)
print("after 1 KO: %4d < flux_PGI < %4d" %
(pgi.lower_bound, pgi.upper_bound))
cobra.manipulation.delete_model_genes(model, ["eggs"],
cumulative_deletions=True)
print("after 2 KO: %4d < flux_PGI < %4d" %
(pgi.lower_bound, pgi.upper_bound))
"""
Explanation: The delete_model_genes function will evaluate the gpr and set the upper and lower bounds to 0 if the reaction is knocked out. This function can preserve existing deletions or reset them using the cumulative_deletions flag.
End of explanation
"""
cobra.manipulation.undelete_model_genes(model)
print(pgi.lower_bound, "< pgi <", pgi.upper_bound)
"""
Explanation: The undelete_model_genes can be used to reset a gene deletion
End of explanation
"""
|
chapmanbe/utah_highschool_airquality | windrose/make_WindRose.ipynb | apache-2.0 | %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from datetime import datetime
import json
from urllib.request import urlopen
# Confirm that `pm25rose.py` is in your directory
from pm25rose import WindroseAxes
import mesowest
"""
Explanation: Python Windrose with MesoWest Data
Introduction
Who are we?
http://meso1.chpc.utah.edu/mesowest_overview/
Introduction to api services:
What are the MesoWest/SynopticLabs api services?
https://synopticlabs.org/api/
How do you find out where particulate concentrations are measured in Utah?
https://api.synopticlabs.org/v2/stations/latest?&token=demotoken&state=UT&vars=PM_25_concentration
http://meso2.chpc.utah.edu/aq/
Learning objectives:
Evaluate data from different sensor types.
Use an api service to access data in real time and retrospectively.
Visualize air quality data relative to wind conditions.
We will use Python to view air quality data from the MesoWest API.
But first...
1. Install the JSON Viewer for your Chrome browser. This will help you look at JSON-formated data in your browser.
2. Make sure you have pm25rose.py in the current directory. That package makes wind roses, you won't change anything in that file. (The original wind rose code is found here).
Import some stuff we'll use
End of explanation
"""
mpl.rcParams['xtick.labelsize'] = 8
mpl.rcParams['ytick.labelsize'] = 8
mpl.rcParams['axes.labelsize'] = 10
mpl.rcParams['legend.fontsize'] = 10
mpl.rcParams['figure.figsize'] = [5, 10]
mpl.rcParams['grid.linewidth'] = .25
mpl.rcParams['savefig.bbox'] = 'tight'
"""
Explanation: #### Customize matplotlib
It's so much easier to modify matplotlib defaults like this rather than inline with the plot functions.
See more here http://matplotlib.org/users/customizing.html
End of explanation
"""
default_vars = 'altimeter,pressure,sea_level_pressure,wind_direction,\
wind_speed,air_temp,relative_humidity,dew_point_temperature,wind_gust'
def get_mesowest_ts(stationID, start_time, end_time, variables=default_vars):
"""
Get MesoWest Time Series data:
Makes a time series query from the MesoWest API for a single station.
Input:
stationID : string of the station ID
start_time : datetime object of the start time in UTC
end_time : datetime object of the end time in UTC
variables : a string of variables available through the MesoWest API
see https://synopticlabs.org/api/mesonet/variables/ for
a list of variables.
Output:
A dictionary of the data.
"""
# Hey! You can get your own token! https://synopticlabs.org/api/guides/?getstarted
token = 'demotoken'
# Convert the start and end time to the string format requried by the API
start = start_time.strftime("%Y%m%d%H%M")
end = end_time.strftime("%Y%m%d%H%M")
tz = 'utc' # Timezone is hard coded for now. Could allow local time later.
# Build the API request URL
URL = 'http://api.mesowest.net/v2/stations/timeseries?&token=' + token \
+ '&stid=' + stationID \
+ '&start=' + start \
+ '&end=' + end \
+ '&vars=' + variables \
+ '&obtimezone=' + tz \
+ '&output=json'
print ("Here is the URL you asked for:", URL)
# Open URL and read JSON content. Convert JSON string to some python
# readable format.
f = urlopen(URL)
data = f.read()
data = json.loads(data)
# Create a new dictionary to store the data in.
return_this = {}
# Get basic station information
return_this['URL'] = URL
return_this['NAME'] = str(data['STATION'][0]['NAME'])
return_this['STID'] = str(data['STATION'][0]['STID'])
return_this['LAT'] = float(data['STATION'][0]['LATITUDE'])
return_this['LON'] = float(data['STATION'][0]['LONGITUDE'])
return_this['ELEVATION'] = float(data['STATION'][0]['ELEVATION'])
# Note: Elevation is in feet, NOT METERS!
# Dynamically create keys in the dictionary for each requested variable
for v in data['STATION'][0]['SENSOR_VARIABLES'].keys():
if v == 'date_time':
# Dates: Convert the strings to a python datetime object.
dates = data["STATION"][0]["OBSERVATIONS"]["date_time"]
DATES = [datetime.strptime(x, '%Y-%m-%dT%H:%M:%SZ') for x in dates]
return_this['DATETIME'] = np.array(DATES)
else:
# v represents all the variables, but each variable may have
# more than one set.
# For now, just return the first set.
key_name = str(v)
set_num = 0
grab_this_set = str(list(data['STATION'][0]['SENSOR_VARIABLES']\
[key_name].keys())[set_num]) # This could be problematic. No guarantee of order
# Always grab the first set (either _1 or _1d)
# ! Should make exceptions to this rule for certain stations and certain
# ! variables (a project for another day :p).
if grab_this_set[-1] != '1' and grab_this_set[-1] != 'd':
grab_this_set = grab_this_set[0:-1]+'1'
if grab_this_set[-1] == 'd':
grab_this_set = grab_this_set[0:-2]+'1d'
variable_data = np.array(data['STATION'][0]['OBSERVATIONS']\
[grab_this_set], dtype=np.float)
return_this[key_name] = variable_data
return return_this
"""
Explanation: Define a few functions
First function will get MesoWest data and return a Python dictionary.
Find a list of all available variables:
https://synopticlabs.org/api/mesonet/variables/
End of explanation
"""
# Make Rose
#A quick way to create new windrose axes...
def new_axes():
fig = plt.figure(facecolor='w', edgecolor='w')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, facecolor='w')
fig.add_axes(ax)
return ax
#...and adjust the legend box
def set_legend(ax):
l = ax.legend()
#plt.setp(l.get_texts())
plt.legend(loc='center left', bbox_to_anchor=(1.2, 0.5), prop={'size':10})
"""
Explanation: These two functions set up the windroses axes and legend
End of explanation
"""
# Date range for data we are interested
start = datetime(2016, 12, 1)
end = datetime(2017, 3, 1)
# MesoWest station ID.
stn = 'MTMET'
"""
Explanation: Ok, lets get some data
End of explanation
"""
# Get MesoWest Data
air_data = get_mesowest_ts(stn, start, end, variables='wind_direction,PM_25_concentration')
"""
Explanation: Find other stations with PM 25 concentrations here:
https://api.synopticlabs.org/v2/stations/metadata?&token=demotoken&state=UT&vars=PM_25_concentration&status=active
End of explanation
"""
air_data
"""
Explanation: What is the variable air_data?
End of explanation
"""
air_data.keys()
"""
Explanation: air_data is a dictionary. Each key is associated with a value or object.
What data are in the dictionary?
End of explanation
"""
print ("Station Name:", air_data['NAME'])
print ("Number of Observations:", len(air_data['DATETIME']))
print ("List of dates:", air_data['DATETIME'])
"""
Explanation: You can access the values or objects of each key like so...
End of explanation
"""
# Create a new figure
plt.figure(figsize=[10,5])
# Plot data lines
plt.plot(air_data['DATETIME'], air_data['PM_25_concentration'],
color='dodgerblue',
label="PM 2.5")
plt.axhline(35,
linestyle = '--',
color='r',
label="EPA Standard")
# Add labels, etc.
plt.legend()
plt.ylabel(r'PM 2.5 Concentration ($\mu$g m$\mathregular{^{-3}}$)')
plt.title('PM 2.5 Concentration at %s (%s)' % (air_data['NAME'], air_data['STID']))
plt.xlim([air_data['DATETIME'][0], air_data['DATETIME'][-1]])
plt.ylim([0, np.nanmax(air_data['PM_25_concentration']+5)])
"""
Explanation: Visualize the data
Each datetime object in a['DATETIME'] matches PM 2.5 concentrations in a['PM_25_concentration'] and wind directions in a['wind_direction'].
Plot a time series of PM 2.5 concentration for the time period
End of explanation
"""
# Make the wind rose
ax = new_axes()
ax.bar(air_data['wind_direction'], air_data['PM_25_concentration'],
nsector=16,
normed=True, # displays a normalized wind rose, in percent instead of count.
bins=[0, 12.1, 35.5, 55.5, 150.5],
colors=('green', 'yellow', 'orange', 'red', 'purple'))
# Create a legend
set_legend(ax)
plt.title("PM2.5 Rose %s \n %s - %s" % (air_data['NAME'], start.strftime('%d %b %Y'), end.strftime('%d %b %Y')))
plt.grid(True)
# Grid at 5% intervals
plt.yticks(np.arange(5, 105, 5))
ax.set_yticklabels(['5%', '10%', '15%', '20%', '25%', '30%', '35%', '40%'])
# Change the plot range
ax.set_rmax(np.max(np.sum(ax._info['table'], axis=0)))
#ax.set_rmax(40)
"""
Explanation: Plot a wind rose, to show how PM 2.5 is related to wind direction
ax.bar() is a function that makes wind roses. It requires two inputs:
1. An array of wind directions.
2. An array of some variable related to wind direction, in this case PM 2.5.
The other inputs are not required, but allow us to custimize the figure.
End of explanation
"""
# Values used to create the plot
ax._info["table"]
"""
Explanation: Questions ???
What does ncestors do? (Try increasing or decreasing it)
How can you change the color of each bin? Find matplotlib named colors here
How can you change the color range for each bin?
How can you change the number of bins?
What happens if you uncomment the last line ax.set_rmax(40)?
Instead of using the ax.bar() function, try ax.contour(), ax.contourf(), ax.box()
What does this data tell us?
Where do winds typically blow from? Why?
Do you know where MTMET station is?
Can you find the latitude and longitude for MTMET and find it's location in Google maps?
From what direction did MTMET get the highest PM 2.5 pollution?
How does this compare to the same time period last year?
What data is used to make this plot? What did the ax.bar() function do?
End of explanation
"""
print ('Why does it have this shape?', np.shape(ax._info["table"]))
print ('Why is the last item all zeros?')
print ('The total frequency in each direction:', np.sum(ax._info["table"], axis=0))
print ('Maximum freqency (what we set rmax to)', np.max(np.sum(ax._info["table"], axis=0)))
"""
Explanation: Questions ???
End of explanation
"""
# Find where air_data['PM_25_concentration'] is high
high_PM_idx = air_data['PM_25_concentration'] > 35.5
# Note: You'll get a warning becuase there may be nans in the data
# What did we just do? This variable contains a True/False for every position
high_PM_idx
# Only get the dates and data when high_PM_idx is true.
direction_highPM = air_data['wind_direction'][high_PM_idx]
PM25_highPM = air_data['PM_25_concentration'][high_PM_idx]
# Create a new figure axis
axH = new_axes()
axH.bar(direction_highPM, PM25_highPM,
nsector=16,
normed=True,
bins=[0, 12.1, 35.5, 55.5, 150.5],
colors=('green', 'yellow', 'orange', 'red', 'purple'))
# Create a legend
set_legend(axH)
plt.title("PM2.5 Rose %s \n %s - %s" % (a['NAME'], start.strftime('%d %b %Y'), end.strftime('%d %b %Y')))
plt.grid(True)
# Grid at 5% intervals
plt.yticks(np.arange(5, 105, 5))
axH.set_yticklabels(['5%', '10%', '15%', '20%', '25%', '30%', '35%', '40%'])
# Change the plot range
axH.set_rmax(np.max(np.sum(axH._info['table'], axis=0)))
"""
Explanation: What if we only want a wind rose when PM 2.5 was high?
End of explanation
"""
a1 = get_mesowest_ts(stn, start, end, variables='wind_direction,air_temp,wind_speed')
# These are the availalbe keys
print (a1.keys())
# Make a wind rose for air temperature
ax1 = new_axes()
ax1.bar(a1['wind_direction'], a1['air_temp'],
nsector=16,
normed=True,
bins=range(-10,25,5),
cmap=cm.Spectral_r) # For a lit of other colormap options type: dir(cm)
# Add a legend and title
set_legend(ax1)
plt.title("Temperature Rose %s \n %s - %s" % (a1['NAME'], start.strftime('%d %b %Y'), end.strftime('%d %b %Y')))
# Add the grid lines
plt.grid(True)
# Grid at 5% intervals, between 5 and 100
plt.yticks(np.arange(5, 105, 5))
# Label each grid with a % sign
ax1.set_yticklabels(['5%', '10%', '15%', '20%', '25%', '30%', '35%', '40%'])
# Change the plot range
#ax.set_rmax(25)
ax1.set_rmax(np.max(np.sum(ax1._info['table'], axis=0)))
"""
Explanation: How would you make a wind rose for another variable?
First, we need to get another variable from the MesoWest API. Lets try air temperature and wind speed.
End of explanation
"""
ax2 = new_axes()
ax2.bar(a1['wind_direction'], a1['wind_speed'],
nsector=16,
normed=True,
bins=range(0,10))
set_legend(ax2)
ax2.set_title('Wind Rose: bar')
ax3 = new_axes()
ax3.contourf(a1['wind_direction'], a1['wind_speed'],
nsector=180,
normed=True,
bins=range(0,8),
cmap=cm.inferno_r)
ax3.set_title('Wind Rose: contourf')
set_legend(ax3)
"""
Explanation: Question ???
Can you tell where the wind typically blows at night, when it's cold?
Can you make a rose for another time of year? Another station?
Wind Rose, in m/s
End of explanation
"""
|
AhmetHamzaEmra/Deep-Learning-Specialization-Coursera | Sequence Models/Emojify+-+v2.ipynb | mit | import numpy as np
from emo_utils import *
import emoji
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Emojify!
Welcome to the second assignment of Week 2. You are going to use word vector representations to build an Emojifier.
Have you ever wanted to make your text messages more expressive? Your emojifier app will help you do that. So rather than writing "Congratulations on the promotion! Lets get coffee and talk. Love you!" the emojifier can automatically turn this into "Congratulations on the promotion! 👍 Lets get coffee and talk. ☕️ Love you! ❤️"
You will implement a model which inputs a sentence (such as "Let's go see the baseball game tonight!") and finds the most appropriate emoji to be used with this sentence (⚾️). In many emoji interfaces, you need to remember that ❤️ is the "heart" symbol rather than the "love" symbol. But using word vectors, you'll see that even if your training set explicitly relates only a few words to a particular emoji, your algorithm will be able to generalize and associate words in the test set to the same emoji even if those words don't even appear in the training set. This allows you to build an accurate classifier mapping from sentences to emojis, even using a small training set.
In this exercise, you'll start with a baseline model (Emojifier-V1) using word embeddings, then build a more sophisticated model (Emojifier-V2) that further incorporates an LSTM.
Lets get started! Run the following cell to load the package you are going to use.
End of explanation
"""
X_train, Y_train = read_csv('data/train_emoji.csv')
X_test, Y_test = read_csv('data/tesss.csv')
maxLen = len(max(X_train, key=len).split())
"""
Explanation: 1 - Baseline model: Emojifier-V1
1.1 - Dataset EMOJISET
Let's start by building a simple baseline classifier.
You have a tiny dataset (X, Y) where:
- X contains 127 sentences (strings)
- Y contains a integer label between 0 and 4 corresponding to an emoji for each sentence
<img src="images/data_set.png" style="width:700px;height:300px;">
<caption><center> Figure 1: EMOJISET - a classification problem with 5 classes. A few examples of sentences are given here. </center></caption>
Let's load the dataset using the code below. We split the dataset between training (127 examples) and testing (56 examples).
End of explanation
"""
index = 1
print(X_train[index], label_to_emoji(Y_train[index]))
"""
Explanation: Run the following cell to print sentences from X_train and corresponding labels from Y_train. Change index to see different examples. Because of the font the iPython notebook uses, the heart emoji may be colored black rather than red.
End of explanation
"""
Y_oh_train = convert_to_one_hot(Y_train, C = 5)
Y_oh_test = convert_to_one_hot(Y_test, C = 5)
"""
Explanation: 1.2 - Overview of the Emojifier-V1
In this part, you are going to implement a baseline model called "Emojifier-v1".
<center>
<img src="images/image_1.png" style="width:900px;height:300px;">
<caption><center> Figure 2: Baseline model (Emojifier-V1).</center></caption>
</center>
The input of the model is a string corresponding to a sentence (e.g. "I love you). In the code, the output will be a probability vector of shape (1,5), that you then pass in an argmax layer to extract the index of the most likely emoji output.
To get our labels into a format suitable for training a softmax classifier, lets convert $Y$ from its current shape current shape $(m, 1)$ into a "one-hot representation" $(m, 5)$, where each row is a one-hot vector giving the label of one example, You can do so using this next code snipper. Here, Y_oh stands for "Y-one-hot" in the variable names Y_oh_train and Y_oh_test:
End of explanation
"""
index = 50
print(Y_train[index], "is converted into one hot", Y_oh_train[index])
"""
Explanation: Let's see what convert_to_one_hot() did. Feel free to change index to print out different values.
End of explanation
"""
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('data/glove.6B.50d.txt')
"""
Explanation: All the data is now ready to be fed into the Emojify-V1 model. Let's implement the model!
1.3 - Implementing Emojifier-V1
As shown in Figure (2), the first step is to convert an input sentence into the word vector representation, which then get averaged together. Similar to the previous exercise, we will use pretrained 50-dimensional GloVe embeddings. Run the following cell to load the word_to_vec_map, which contains all the vector representations.
End of explanation
"""
word = "cucumber"
index = 289846
print("the index of", word, "in the vocabulary is", word_to_index[word])
print("the", str(index) + "th word in the vocabulary is", index_to_word[index])
"""
Explanation: You've loaded:
- word_to_index: dictionary mapping from words to their indices in the vocabulary (400,001 words, with the valid indices ranging from 0 to 400,000)
- index_to_word: dictionary mapping from indices to their corresponding words in the vocabulary
- word_to_vec_map: dictionary mapping words to their GloVe vector representation.
Run the following cell to check if it works.
End of explanation
"""
# GRADED FUNCTION: sentence_to_avg
def sentence_to_avg(sentence, word_to_vec_map):
"""
Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word
and averages its value into a single vector encoding the meaning of the sentence.
Arguments:
sentence -- string, one training example from X
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
Returns:
avg -- average vector encoding information about the sentence, numpy-array of shape (50,)
"""
### START CODE HERE ###
# Step 1: Split sentence into list of lower case words (≈ 1 line)
words = [i.lower() for i in sentence.split()]
# Initialize the average word vector, should have the same shape as your word vectors.
avg = np.zeros(50)
# Step 2: average the word vectors. You can loop over the words in the list "words".
for w in words:
avg += word_to_vec_map[w]
avg = avg/len(words)
### END CODE HERE ###
return avg
avg = sentence_to_avg("Morrocan couscous is my favorite dish", word_to_vec_map)
print("avg = ", avg)
"""
Explanation: Exercise: Implement sentence_to_avg(). You will need to carry out two steps:
1. Convert every sentence to lower-case, then split the sentence into a list of words. X.lower() and X.split() might be useful.
2. For each word in the sentence, access its GloVe representation. Then, average all these values.
End of explanation
"""
# GRADED FUNCTION: model
def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400):
"""
Model to train word vector representations in numpy.
Arguments:
X -- input data, numpy array of sentences as strings, of shape (m, 1)
Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
learning_rate -- learning_rate for the stochastic gradient descent algorithm
num_iterations -- number of iterations
Returns:
pred -- vector of predictions, numpy-array of shape (m, 1)
W -- weight matrix of the softmax layer, of shape (n_y, n_h)
b -- bias of the softmax layer, of shape (n_y,)
"""
np.random.seed(1)
# Define number of training examples
m = Y.shape[0] # number of training examples
n_y = 5 # number of classes
n_h = 50 # dimensions of the GloVe vectors
# Initialize parameters using Xavier initialization
W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
b = np.zeros((n_y,))
# Convert Y to Y_onehot with n_y classes
Y_oh = convert_to_one_hot(Y, C = n_y)
# Optimization loop
for t in range(num_iterations): # Loop over the number of iterations
for i in range(m): # Loop over the training examples
### START CODE HERE ### (≈ 4 lines of code)
# Average the word vectors of the words from the i'th training example
avg = sentence_to_avg(X[i], word_to_vec_map)
# Forward propagate the avg through the softmax layer
z = np.dot(W, avg) + b
a = softmax(z)
# Compute cost using the i'th training label's one hot representation and "A" (the output of the softmax)
cost = -np.sum(Y_oh[i]*np.log(a))
### END CODE HERE ###
# Compute gradients
dz = a - Y_oh[i]
dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h))
db = dz
# Update parameters with Stochastic Gradient Descent
W = W - learning_rate * dW
b = b - learning_rate * db
if t % 100 == 0:
print("Epoch: " + str(t) + " --- cost = " + str(cost))
pred = predict(X, Y, W, b, word_to_vec_map)
return pred, W, b
print(X_train.shape)
print(Y_train.shape)
print(np.eye(5)[Y_train.reshape(-1)].shape)
print(X_train[0])
print(type(X_train))
Y = np.asarray([5,0,0,5, 4, 4, 4, 6, 6, 4, 1, 1, 5, 6, 6, 3, 6, 3, 4, 4])
print(Y.shape)
X = np.asarray(['I am going to the bar tonight', 'I love you', 'miss you my dear',
'Lets go party and drinks','Congrats on the new job','Congratulations',
'I am so happy for you', 'Why are you feeling bad', 'What is wrong with you',
'You totally deserve this prize', 'Let us go play football',
'Are you down for football this afternoon', 'Work hard play harder',
'It is suprising how people can be dumb sometimes',
'I am very disappointed','It is the best day in my life',
'I think I will end up alone','My life is so boring','Good job',
'Great so awesome'])
print(X.shape)
print(np.eye(5)[Y_train.reshape(-1)].shape)
print(type(X_train))
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**avg= **
</td>
<td>
[-0.008005 0.56370833 -0.50427333 0.258865 0.55131103 0.03104983
-0.21013718 0.16893933 -0.09590267 0.141784 -0.15708967 0.18525867
0.6495785 0.38371117 0.21102167 0.11301667 0.02613967 0.26037767
0.05820667 -0.01578167 -0.12078833 -0.02471267 0.4128455 0.5152061
0.38756167 -0.898661 -0.535145 0.33501167 0.68806933 -0.2156265
1.797155 0.10476933 -0.36775333 0.750785 0.10282583 0.348925
-0.27262833 0.66768 -0.10706167 -0.283635 0.59580117 0.28747333
-0.3366635 0.23393817 0.34349183 0.178405 0.1166155 -0.076433
0.1445417 0.09808667]
</td>
</tr>
</table>
Model
You now have all the pieces to finish implementing the model() function. After using sentence_to_avg() you need to pass the average through forward propagation, compute the cost, and then backpropagate to update the softmax's parameters.
Exercise: Implement the model() function described in Figure (2). Assuming here that $Yoh$ ("Y one hot") is the one-hot encoding of the output labels, the equations you need to implement in the forward pass and to compute the cross-entropy cost are:
$$ z^{(i)} = W . avg^{(i)} + b$$
$$ a^{(i)} = softmax(z^{(i)})$$
$$ \mathcal{L}^{(i)} = - \sum_{k = 0}^{n_y - 1} Yoh^{(i)}_k * log(a^{(i)}_k)$$
It is possible to come up with a more efficient vectorized implementation. But since we are using a for-loop to convert the sentences one at a time into the avg^{(i)} representation anyway, let's not bother this time.
We provided you a function softmax().
End of explanation
"""
pred, W, b = model(X_train, Y_train, word_to_vec_map)
print(pred)
"""
Explanation: Run the next cell to train your model and learn the softmax parameters (W,b).
End of explanation
"""
print("Training set:")
pred_train = predict(X_train, Y_train, W, b, word_to_vec_map)
print('Test set:')
pred_test = predict(X_test, Y_test, W, b, word_to_vec_map)
"""
Explanation: Expected Output (on a subset of iterations):
<table>
<tr>
<td>
**Epoch: 0**
</td>
<td>
cost = 1.95204988128
</td>
<td>
Accuracy: 0.348484848485
</td>
</tr>
<tr>
<td>
**Epoch: 100**
</td>
<td>
cost = 0.0797181872601
</td>
<td>
Accuracy: 0.931818181818
</td>
</tr>
<tr>
<td>
**Epoch: 200**
</td>
<td>
cost = 0.0445636924368
</td>
<td>
Accuracy: 0.954545454545
</td>
</tr>
<tr>
<td>
**Epoch: 300**
</td>
<td>
cost = 0.0343226737879
</td>
<td>
Accuracy: 0.969696969697
</td>
</tr>
</table>
Great! Your model has pretty high accuracy on the training set. Lets now see how it does on the test set.
1.4 - Examining test set performance
End of explanation
"""
X_my_sentences = np.array(["i adore you", "i love you", "funny lol", "lets play with a ball", "food is ready", "not feeling happy"])
Y_my_labels = np.array([[0], [0], [2], [1], [4],[3]])
pred = predict(X_my_sentences, Y_my_labels , W, b, word_to_vec_map)
print_predictions(X_my_sentences, pred)
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**Train set accuracy**
</td>
<td>
97.7
</td>
</tr>
<tr>
<td>
**Test set accuracy**
</td>
<td>
85.7
</td>
</tr>
</table>
Random guessing would have had 20% accuracy given that there are 5 classes. This is pretty good performance after training on only 127 examples.
In the training set, the algorithm saw the sentence "I love you" with the label ❤️. You can check however that the word "adore" does not appear in the training set. Nonetheless, lets see what happens if you write "I adore you."
End of explanation
"""
print(Y_test.shape)
print(' '+ label_to_emoji(0)+ ' ' + label_to_emoji(1) + ' ' + label_to_emoji(2)+ ' ' + label_to_emoji(3)+' ' + label_to_emoji(4))
print(pd.crosstab(Y_test, pred_test.reshape(56,), rownames=['Actual'], colnames=['Predicted'], margins=True))
plot_confusion_matrix(Y_test, pred_test)
"""
Explanation: Amazing! Because adore has a similar embedding as love, the algorithm has generalized correctly even to a word it has never seen before. Words such as heart, dear, beloved or adore have embedding vectors similar to love, and so might work too---feel free to modify the inputs above and try out a variety of input sentences. How well does it work?
Note though that it doesn't get "not feeling happy" correct. This algorithm ignores word ordering, so is not good at understanding phrases like "not happy."
Printing the confusion matrix can also help understand which classes are more difficult for your model. A confusion matrix shows how often an example whose label is one class ("actual" class) is mislabeled by the algorithm with a different class ("predicted" class).
End of explanation
"""
import numpy as np
np.random.seed(0)
from keras.models import Model
from keras.layers import Dense, Input, Dropout, LSTM, Activation
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.initializers import glorot_uniform
np.random.seed(1)
"""
Explanation: <font color='blue'>
What you should remember from this part:
- Even with a 127 training examples, you can get a reasonably good model for Emojifying. This is due to the generalization power word vectors gives you.
- Emojify-V1 will perform poorly on sentences such as "This movie is not good and not enjoyable" because it doesn't understand combinations of words--it just averages all the words' embedding vectors together, without paying attention to the ordering of words. You will build a better algorithm in the next part.
2 - Emojifier-V2: Using LSTMs in Keras:
Let's build an LSTM model that takes as input word sequences. This model will be able to take word ordering into account. Emojifier-V2 will continue to use pre-trained word embeddings to represent words, but will feed them into an LSTM, whose job it is to predict the most appropriate emoji.
Run the following cell to load the Keras packages.
End of explanation
"""
# GRADED FUNCTION: sentences_to_indices
def sentences_to_indices(X, word_to_index, max_len):
"""
Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences.
The output shape should be such that it can be given to `Embedding()` (described in Figure 4).
Arguments:
X -- array of sentences (strings), of shape (m, 1)
word_to_index -- a dictionary containing the each word mapped to its index
max_len -- maximum number of words in a sentence. You can assume every sentence in X is no longer than this.
Returns:
X_indices -- array of indices corresponding to words in the sentences from X, of shape (m, max_len)
"""
m = X.shape[0] # number of training examples
### START CODE HERE ###
# Initialize X_indices as a numpy matrix of zeros and the correct shape (≈ 1 line)
X_indices = np.zeros([m,max_len])
for i in range(m): # loop over training examples
# Convert the ith training sentence in lower case and split is into words. You should get a list of words.
sentence_words = [x.lower() for x in X[i].split()]
# Initialize j to 0
j = 0
# Loop over the words of sentence_words
for w in range(len(sentence_words)):
# Set the (i,j)th entry of X_indices to the index of the correct word.
X_indices[i, j] = word_to_index[sentence_words[j]]
# Increment j to j + 1
j +=1
### END CODE HERE ###
return X_indices
"""
Explanation: 2.1 - Overview of the model
Here is the Emojifier-v2 you will implement:
<img src="images/emojifier-v2.png" style="width:700px;height:400px;"> <br>
<caption><center> Figure 3: Emojifier-V2. A 2-layer LSTM sequence classifier. </center></caption>
2.2 Keras and mini-batching
In this exercise, we want to train Keras using mini-batches. However, most deep learning frameworks require that all sequences in the same mini-batch have the same length. This is what allows vectorization to work: If you had a 3-word sentence and a 4-word sentence, then the computations needed for them are different (one takes 3 steps of an LSTM, one takes 4 steps) so it's just not possible to do them both at the same time.
The common solution to this is to use padding. Specifically, set a maximum sequence length, and pad all sequences to the same length. For example, of the maximum sequence length is 20, we could pad every sentence with "0"s so that each input sentence is of length 20. Thus, a sentence "i love you" would be represented as $(e_{i}, e_{love}, e_{you}, \vec{0}, \vec{0}, \ldots, \vec{0})$. In this example, any sentences longer than 20 words would have to be truncated. One simple way to choose the maximum sequence length is to just pick the length of the longest sentence in the training set.
2.3 - The Embedding layer
In Keras, the embedding matrix is represented as a "layer", and maps positive integers (indices corresponding to words) into dense vectors of fixed size (the embedding vectors). It can be trained or initialized with a pretrained embedding. In this part, you will learn how to create an Embedding() layer in Keras, initialize it with the GloVe 50-dimensional vectors loaded earlier in the notebook. Because our training set is quite small, we will not update the word embeddings but will instead leave their values fixed. But in the code below, we'll show you how Keras allows you to either train or leave fixed this layer.
The Embedding() layer takes an integer matrix of size (batch size, max input length) as input. This corresponds to sentences converted into lists of indices (integers), as shown in the figure below.
<img src="images/embedding1.png" style="width:700px;height:250px;">
<caption><center> Figure 4: Embedding layer. This example shows the propagation of two examples through the embedding layer. Both have been zero-padded to a length of max_len=5. The final dimension of the representation is (2,max_len,50) because the word embeddings we are using are 50 dimensional. </center></caption>
The largest integer (i.e. word index) in the input should be no larger than the vocabulary size. The layer outputs an array of shape (batch size, max input length, dimension of word vectors).
The first step is to convert all your training sentences into lists of indices, and then zero-pad all these lists so that their length is the length of the longest sentence.
Exercise: Implement the function below to convert X (array of sentences as strings) into an array of indices corresponding to words in the sentences. The output shape should be such that it can be given to Embedding() (described in Figure 4).
End of explanation
"""
X1 = np.array(["funny lol", "lets play baseball", "food is ready for you"])
X1_indices = sentences_to_indices(X1,word_to_index, max_len = 5)
print("X1 =", X1)
print("X1_indices =", X1_indices)
"""
Explanation: Run the following cell to check what sentences_to_indices() does, and check your results.
End of explanation
"""
# GRADED FUNCTION: pretrained_embedding_layer
def pretrained_embedding_layer(word_to_vec_map, word_to_index):
"""
Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional vectors.
Arguments:
word_to_vec_map -- dictionary mapping words to their GloVe vector representation.
word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)
Returns:
embedding_layer -- pretrained layer Keras instance
"""
vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)
emb_dim = word_to_vec_map["cucumber"].shape[0] # define dimensionality of your GloVe word vectors (= 50)
### START CODE HERE ###
# Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)
emb_matrix = np.zeros([vocab_len, emb_dim])
# Set each row "index" of the embedding matrix to be the word vector representation of the "index"th word of the vocabulary
for word, index in word_to_index.items():
emb_matrix[index, :] = word_to_vec_map[word]
# Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False.
embedding_layer = Embedding(input_dim=vocab_len,output_dim=emb_dim, trainable = False)
### END CODE HERE ###
# Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the "None".
embedding_layer.build((None,))
# Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.
embedding_layer.set_weights([emb_matrix])
return embedding_layer
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
print("weights[0][1][3] =", embedding_layer.get_weights()[0][1][3])
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**X1 =**
</td>
<td>
['funny lol' 'lets play football' 'food is ready for you']
</td>
</tr>
<tr>
<td>
**X1_indices =**
</td>
<td>
[[ 155345. 225122. 0. 0. 0.] <br>
[ 220930. 286375. 151266. 0. 0.] <br>
[ 151204. 192973. 302254. 151349. 394475.]]
</td>
</tr>
</table>
Let's build the Embedding() layer in Keras, using pre-trained word vectors. After this layer is built, you will pass the output of sentences_to_indices() to it as an input, and the Embedding() layer will return the word embeddings for a sentence.
Exercise: Implement pretrained_embedding_layer(). You will need to carry out the following steps:
1. Initialize the embedding matrix as a numpy array of zeroes with the correct shape.
2. Fill in the embedding matrix with all the word embeddings extracted from word_to_vec_map.
3. Define Keras embedding layer. Use Embedding(). Be sure to make this layer non-trainable, by setting trainable = False when calling Embedding(). If you were to set trainable = True, then it will allow the optimization algorithm to modify the values of the word embeddings.
4. Set the embedding weights to be equal to the embedding matrix
End of explanation
"""
# GRADED FUNCTION: Emojify_V2
def Emojify_V2(input_shape, word_to_vec_map, word_to_index):
"""
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation
word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
"""
### START CODE HERE ###
# Define sentence_indices as the input of the graph, it should be of shape input_shape and dtype 'int32' (as it contains indices).
sentence_indices = Input(shape=(input_shape), dtype='int32')
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer, you get back the embeddings
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a batch of sequences.
X = LSTM(128,return_sequences=True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a single hidden state, not a batch of sequences.
X = LSTM(128)(X)
# Add dropout with a probability of 0.5
X = Dropout( 0.5)(X)
# Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.
X = Dense(5)(X)
# Add a softmax activation
X = Activation( activation='softmax')(X)
# Create Model instance which converts sentence_indices into X.
model = Model(inputs=sentence_indices, outputs=X)
### END CODE HERE ###
return model
"""
Explanation: Expected Output:
<table>
<tr>
<td>
**weights[0][1][3] =**
</td>
<td>
-0.3403
</td>
</tr>
</table>
2.3 Building the Emojifier-V2
Lets now build the Emojifier-V2 model. You will do so using the embedding layer you have built, and feed its output to an LSTM network.
<img src="images/emojifier-v2.png" style="width:700px;height:400px;"> <br>
<caption><center> Figure 3: Emojifier-v2. A 2-layer LSTM sequence classifier. </center></caption>
Exercise: Implement Emojify_V2(), which builds a Keras graph of the architecture shown in Figure 3. The model takes as input an array of sentences of shape (m, max_len, ) defined by input_shape. It should output a softmax probability vector of shape (m, C = 5). You may need Input(shape = ..., dtype = '...'), LSTM(), Dropout(), Dense(), and Activation().
End of explanation
"""
model = Emojify_V2((maxLen,), word_to_vec_map, word_to_index)
model.summary()
"""
Explanation: Run the following cell to create your model and check its summary. Because all sentences in the dataset are less than 10 words, we chose max_len = 10. You should see your architecture, it uses "20,223,927" parameters, of which 20,000,050 (the word embeddings) are non-trainable, and the remaining 223,877 are. Because our vocabulary size has 400,001 words (with valid indices from 0 to 400,000) there are 400,001*50 = 20,000,050 non-trainable parameters.
End of explanation
"""
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
"""
Explanation: As usual, after creating your model in Keras, you need to compile it and define what loss, optimizer and metrics your are want to use. Compile your model using categorical_crossentropy loss, adam optimizer and ['accuracy'] metrics:
End of explanation
"""
X_train_indices = sentences_to_indices(X_train, word_to_index, maxLen)
Y_train_oh = convert_to_one_hot(Y_train, C = 5)
"""
Explanation: It's time to train your model. Your Emojifier-V2 model takes as input an array of shape (m, max_len) and outputs probability vectors of shape (m, number of classes). We thus have to convert X_train (array of sentences as strings) to X_train_indices (array of sentences as list of word indices), and Y_train (labels as indices) to Y_train_oh (labels as one-hot vectors).
End of explanation
"""
model.fit(X_train_indices, Y_train_oh, epochs = 50, batch_size = 32, shuffle=True)
"""
Explanation: Fit the Keras model on X_train_indices and Y_train_oh. We will use epochs = 50 and batch_size = 32.
End of explanation
"""
X_test_indices = sentences_to_indices(X_test, word_to_index, max_len = maxLen)
Y_test_oh = convert_to_one_hot(Y_test, C = 5)
loss, acc = model.evaluate(X_test_indices, Y_test_oh)
print()
print("Test accuracy = ", acc)
"""
Explanation: Your model should perform close to 100% accuracy on the training set. The exact accuracy you get may be a little different. Run the following cell to evaluate your model on the test set.
End of explanation
"""
# This code allows you to see the mislabelled examples
C = 5
y_test_oh = np.eye(C)[Y_test.reshape(-1)]
X_test_indices = sentences_to_indices(X_test, word_to_index, maxLen)
pred = model.predict(X_test_indices)
for i in range(len(X_test)):
x = X_test_indices
num = np.argmax(pred[i])
if(num != Y_test[i]):
print('Expected emoji:'+ label_to_emoji(Y_test[i]) + ' prediction: '+ X_test[i] + label_to_emoji(num).strip())
"""
Explanation: You should get a test accuracy between 80% and 95%. Run the cell below to see the mislabelled examples.
End of explanation
"""
# Change the sentence below to see your prediction. Make sure all the words are in the Glove embeddings.
x_test = np.array(['not feeling happy'])
X_test_indices = sentences_to_indices(x_test, word_to_index, maxLen)
print(x_test[0] +' '+ label_to_emoji(np.argmax(model.predict(X_test_indices))))
"""
Explanation: Now you can try it on your own example. Write your own sentence below.
End of explanation
"""
|
dennys-bd/Udacity-Deep-Learning | 3 - Convolutional Neural Net/Scripts/Simple_Autoencoder.ipynb | mit | %matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
"""
Explanation: A Simple Autoencoder
We'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data.
In this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset.
End of explanation
"""
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
"""
Explanation: Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
End of explanation
"""
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
image_size = mnist.train.images.shape[1]
# Input and target placeholders
inputs_ = tf.placeholder(tf.float32, (None, image_size), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, image_size), name='targets')
# Output of hidden layer, single fully connected layer here with ReLU activation
encoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu)
# Output layer logits, fully connected layer with no activation
logits = tf.layers.dense(encoded, image_size, activation=None)
# Sigmoid output from logits
decoded = tf.nn.sigmoid(logits, name='output')
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
"""
Explanation: We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.
Exercise: Build the graph for the autoencoder in the cell below. The input images will be flattened into 784 length vectors. The targets are the same as the inputs. And there should be one hidden layer with a ReLU activation and an output layer with a sigmoid activation. Feel free to use TensorFlow's higher level API, tf.layers. For instance, you would use tf.layers.dense(inputs, units, activation=tf.nn.relu) to create a fully connected layer with a ReLU activation. The loss should be calculated with the cross-entropy loss, there is a convenient TensorFlow function for this tf.nn.sigmoid_cross_entropy_with_logits (documentation). You should note that tf.nn.sigmoid_cross_entropy_with_logits takes the logits, but to get the reconstructed images you'll need to pass the logits through the sigmoid function.
End of explanation
"""
# Create the session
sess = tf.Session()
"""
Explanation: Training
End of explanation
"""
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
"""
Explanation: Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss.
Calling mnist.train.next_batch(batch_size) will return a tuple of (images, labels). We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with sess.run(tf.global_variables_initializer()). Then, run the optimizer and get the loss with batch_cost, _ = sess.run([cost, opt], feed_dict=feed).
End of explanation
"""
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
"""
Explanation: Checking out the results
Below I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts.
End of explanation
"""
|
probml/pyprobml | notebooks/misc/elegy_intro.ipynb | mit | %%capture
!pip install git+https://github.com/deepmind/dm-haiku
#!pip install -q clu ml-collections git+https://github.com/google/flax
%%capture
! pip install --upgrade pip
! pip install elegy datasets matplotlib
"""
Explanation: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/elegy_intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Introduction to Elegy
This is slightly modified from
https://poets-ai.github.io/elegy/getting-started/high-level-api/
and
https://poets-ai.github.io/elegy/getting-started/low-level-api/
In this tutorial we will explore the basic features of Elegy. If you are a Keras user you should feel at home, if you are currently learning JAX things will appear much more streamlined. To get started you will first need to install the following dependencies:
End of explanation
"""
from datasets.load import load_dataset
dataset = load_dataset("mnist")
dataset.set_format("np")
X_train = dataset["train"]["image"]
y_train = dataset["train"]["label"]
X_test = dataset["test"]["image"]
y_test = dataset["test"]["label"]
print("X_train:", X_train.shape, X_train.dtype)
print("y_train:", y_train.shape, y_train.dtype)
print("X_test:", X_test.shape, X_test.dtype)
print("y_test:", y_test.shape, y_test.dtype)
"""
Explanation: Loading the Data
In this tutorial we will train a Neural Network on the MNIST dataset, for this we will first need to download and load the data into memory. Here we will use the datasets library to load the dataset.
End of explanation
"""
import jax.numpy as jnp
import jax
import elegy as eg
class MLP(eg.Module):
def __init__(self, n1: int, n2: int):
self.n1 = n1
self.n2 = n2
@eg.compact
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
x = x.astype(jnp.float32) / 255.0
x = eg.nn.Flatten()(x)
# first layers
x = eg.nn.Linear(self.n1)(x)
x = jax.nn.relu(x)
# first layers
x = eg.nn.Linear(self.n2)(x)
x = jax.nn.relu(x)
# output layer
x = eg.nn.Linear(10)(x)
return x
"""
Explanation: Defining the Architecture
The first thing we need to do is define our model's architecture inside a Module, to do this we just create a class that inherites from Module and implement a __call__ method. In this example we will create a simple 2 layer MLP:
End of explanation
"""
import optax
model = eg.Model(
module=MLP(n1=300, n2=100),
loss=[
eg.losses.Crossentropy(),
eg.regularizers.L2(l=1e-4),
],
metrics=eg.metrics.Accuracy(),
optimizer=optax.adam(1e-3),
)
"""
Explanation: This code should feel familiar to most Keras / PyTorch users, the main difference is that we are using the @compact decorator to define submodules inline (e.g. Linear) inline, this tends to produce much shorter and readable code.
Creating the Model
Now that we have this module we can create an Elegy Model which is Elegy's central API:
End of explanation
"""
model.summary(X_train[:64])
"""
Explanation: If you are a Keras user this code should look familiar, main differences are:
You need to pass a module with the architecture.
loss and metrics are a bit more flexible in that they do not need to match the label's structure.
There is no compile step, all its done in the constructor.
For the optimizer you can use any optax optimizer.
As in Keras, you can get a rich description of the model by calling Model.summary with a sample input:
End of explanation
"""
%%time
history = model.fit(
inputs=X_train,
labels=y_train,
epochs=10,
steps_per_epoch=200,
batch_size=64,
validation_data=(X_test, y_test),
shuffle=True,
verbose=3,
callbacks=[eg.callbacks.ModelCheckpoint("models/high-level", save_best_only=True)],
)
"""
Explanation: Training the Model
We are now ready to pass our model some data to start training, like in Keras this is done via the fit method which contains more or less the same signature. Elegy support a variety of input data sources like Tensorflow Dataset, Pytorch DataLoader, Elegy DataLoader, and Python Generators, check out the guide on Data Sources for more information.
The following code will train our model for 10 epochs while limiting each epoch to 200 steps and using a batch size of 64:
End of explanation
"""
print(history)
print(history.history)
print(history.history.keys())
import matplotlib.pyplot as plt
def plot_history(history):
n_plots = len(history.history.keys()) // 2
plt.figure(figsize=(14, 24))
for i, key in enumerate(list(history.history.keys())[:n_plots]):
metric = history.history[key]
val_metric = history.history[f"val_{key}"]
plt.subplot(n_plots, 1, i + 1)
plt.plot(metric, "o-", label=f"Training {key}")
plt.plot(val_metric, "x-", label=f"Validation {key}")
plt.legend(loc="lower right")
plt.ylabel(key)
plt.title(f"Training and Validation {key}")
plt.show()
plot_history(history)
ev = model.evaluate(x=X_test, y=y_test)
print(ev)
"""
Explanation: The ModelCheckpoint callback will periodically save the model in a folder called "models/high-level", we will use it later.
fit returns a History object which of the losses and metrics during training which we can visualize.
Plotting learning curves
End of explanation
"""
import numpy as np
idxs = np.random.randint(0, len(X_test), size=(9,))
x_sample = X_test[idxs]
y_pred = model.predict(x=x_sample)
print(y_pred.shape)
"""
Explanation: Generating Predictions
Having our trained model we can now get some samples from the test set and generate some predictions. Lets select 9 random images and call .predict:
End of explanation
"""
plt.figure(figsize=(12, 12))
for i in range(3):
for j in range(3):
k = 3 * i + j
plt.subplot(3, 3, k + 1)
plt.title(f"{np.argmax(y_pred[k])}")
plt.imshow(x_sample[k], cmap="gray")
"""
Explanation: Easy right? Finally lets plot the results to see if they are accurate.
End of explanation
"""
model.save("mymodel")
!ls
!ls mymodel
"""
Explanation: Serialization
To serialize the Model you can use the model.save(...), this will create a folder with some files that contain the model's code plus all parameters and states.
End of explanation
"""
!ls models
!ls models/high-level
# current model reference
print("current model id:", id(model))
model.evaluate(x=X_test, y=y_test)
model_old = model
# load model from disk
model = eg.load("models/high-level")
# new model reference
print("new model id: ", id(model))
# check that it works!
model.evaluate(x=X_test, y=y_test)
"""
Explanation: However since we had previously used the ModelCheckpoint callback we can load it using elegy.load. Lets get a new model reference containing the same weights and call its evaluate method to verify it loaded correctly:
End of explanation
"""
model.saved_model(x_sample, "saved-models/high-level")
!ls saved-models/high-level
"""
Explanation: You can also serialize your Elegy Model as a TensorFlow SavedModel which is portable to many platforms many platforms and services, to do this you can use the saved_model method. saved_model will convert the function that creates the predictions for your Model (pred_step) in Jax to a TensorFlow version via jax2tf and then serialize it to disk.
The function saved_model accepts a sample to infer the shapes, the path where the model will be saved at, and a list of batch sizes for the different signatures it accepts. Due to some current limitations in Jax it is not possible to create signatures with dynamic dimensions so you must specify a couple which might fit you needs.
End of explanation
"""
import tensorflow as tf
saved_model = tf.saved_model.load("saved-models/high-level")
y_pred_tf = saved_model(x_sample.astype(np.int32))
plt.figure(figsize=(12, 12))
for i in range(3):
for j in range(3):
k = 3 * i + j
plt.subplot(3, 3, k + 1)
plt.title(f"{np.argmax(y_pred_tf[k])}")
plt.imshow(x_sample[k], cmap="gray")
"""
Explanation: We can test our saved model by loading it with TensorFlow and generating a couple of predictions as we did previously:
End of explanation
"""
import jax
import numpy as np
import jax.numpy as jnp
import typing as tp
import elegy as eg
M = tp.TypeVar("M", bound=eg.Model)
class LinearClassifier(eg.Model):
w: jnp.ndarray = eg.Parameter.node()
b: jnp.ndarray = eg.Parameter.node()
def __init__(
self,
features_out: int,
loss: tp.Any = None,
metrics: tp.Any = None,
optimizer=None,
seed: int = 42,
eager: bool = False,
):
self.features_out = features_out
super().__init__(
loss=loss,
metrics=metrics,
optimizer=optimizer,
seed=seed,
eager=eager,
)
def init_step(self: M, key: jnp.ndarray, inputs: jnp.ndarray) -> M:
features_in = np.prod(inputs.shape[1:])
self.w = jax.random.uniform(
key,
shape=[
features_in,
self.features_out,
],
)
self.b = jnp.zeros([self.features_out])
assert self.optimizer is not None
self.optimizer = self.optimizer.init(self)
return self
def pred_step(self: M, inputs: tp.Any) -> eg.PredStepOutput[M]:
# flatten + scale
inputs = jnp.reshape(inputs, (inputs.shape[0], -1)) / 255
# linear
logits = jnp.dot(inputs, self.w) + self.b
return logits, self
def test_step(
self: M,
inputs,
labels,
) -> eg.TestStepOutput[M]:
model = self
# forward
logits, model = model.pred_step(inputs)
# crossentropy loss
target = jax.nn.one_hot(labels["target"], self.features_out)
loss = jnp.mean(-jnp.sum(target * jax.nn.log_softmax(logits), axis=-1))
# metrics
logs = dict(
acc=jnp.mean(jnp.argmax(logits, axis=-1) == labels["target"]),
loss=loss,
)
return loss, logs, model
"""
Explanation: Distributed training
To parallelize training and inference using pmap on a mulit-core TPU you just need to add
model = model.distributed()
after creating the model.
For an example, try running https://github.com/probml/pyprobml/blob/master/scripts/mnist_elegy_distributed.py on a TPU VM v3-8.
In colab, there will not be any speedup, since there is only 1 GPU. (I have not tried TPU mode in colab.)
Low-level API
Introduction
The low-level API lets you redefine what happens during the various stages of training, evaluation and inference by implementing some methods in a custom class. Here is the list of methods you can define along with the high-level method that uses it:
| Low-level Method | High-level Method |
| :- | :- |
| pred_step | predict |
| test_step | evaluate |
| grad_step | NA |
| train_step | fit |
Check out the guides on the low-level API for more information.
In this tutorial we are going to implement Linear Classifier using pure Jax by overridingpred_step which defines the forward pass and test_step which defines loss and metrics of our model.
pred_step returns a tuple with:
* y_pred: predictions of the model
* states: a elegy.States namedtuple that contains the states for thing like network trainable parameter, network states, metrics states, optimizer states, rng state.
test_step returns a tuple with:
* loss: the scalar loss use to calculate the gradient
* logs: a dictionary with the logs to be reported during training
* states: a elegy.States namedtuple that contains the states for thing like network trainable parameter, network states, metrics states, optimizer states, rng state.
Since Jax is functional you will find that low-level API is very explicit with state management, that is, you always get the currrent state as input and you return the new state as output. Lets define test_step to make things clearer:
Linear classifier
End of explanation
"""
import optax
model = LinearClassifier(features_out=10, optimizer=optax.adam(1e-3))
history = model.fit(
inputs=X_train,
labels=y_train,
epochs=10,
steps_per_epoch=200,
batch_size=64,
validation_data=(X_test, y_test),
shuffle=True,
callbacks=[eg.callbacks.ModelCheckpoint("models/low-level", save_best_only=True)],
)
import matplotlib.pyplot as plt
def plot_history(history):
n_plots = len(history.history.keys()) // 2
plt.figure(figsize=(14, 24))
for i, key in enumerate(list(history.history.keys())[:n_plots]):
metric = history.history[key]
val_metric = history.history[f"val_{key}"]
plt.subplot(n_plots, 1, i + 1)
plt.plot(metric, "o-", label=f"Training {key}")
plt.plot(val_metric, "x-", label=f"Validation {key}")
plt.legend(loc="lower right")
plt.ylabel(key)
plt.title(f"Training and Validation {key}")
plt.show()
plot_history(history)
"""
Explanation: Notice the following:
* We define a bunch of arguments with specific names, Elegy uses Dependency Injection so you can just request what you need.
* initializing tells us if we should initialize our parameters or not, here we are directly creating them ourselves but if you use a Module system you can conditionally call its init method here.
* Our model is defined by a simple linear function.
* Defined a simple crossentropy loss and an accuracy metric, we added both the the logs.
* We set the updated States.net_params with the w and b parameters so we get them as an input on the next run after they are initialized.
* States.update offers a clean way inmutably update the states without having to copy all fields to a new States structure.
Remember test_step only defines what happens during evaluate, however, Model's default implementation has a structure where on method is defined in terms of another:
pred_step ⬅ test_step ⬅ grad_step ⬅ train_step
Because of this, we get the train_step / fit for free if we just pass an optimizer to the the constructor as we are going to do next:
Training
End of explanation
"""
|
M0nica/python-foundations-hw | 08/08.ipynb | mit | # workon dataanalysis - my virtual environment
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# df = pd.read_table('34933-0001-Data.tsv')
odf = pd.read_csv('accreditation_2016_03.csv')
odf.head()
odf.columns
odf['Campus_City'].value_counts().head(10)
top_cities = odf['Campus_City'].value_counts().head(10).plot(kind="bar", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
top_cities.set_title('Top 10 College Cities (By Number of Colleges in State)')
top_cities.set_xlabel('City')
top_cities.set_ylabel('# of Colleges')
plt.savefig('topcollegecities.png')
top_cities = odf['Campus_State'].value_counts().head(10).plot(kind="bar", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
top_cities.set_title('Top 10 College States (By Number of Campuses in State)')
top_cities.set_xlabel('City')
top_cities.set_ylabel('# of Colleges')
plt.savefig('topcollegecities.png')
odf['Accreditation_Status'].value_counts()
df = pd.read_csv('Full Results - Stack Overflow Developer Survey - 2015 2.csv', encoding ='mac_roman')
df.head()
df.columns
df.info()
"""
Explanation: Note: you can find my iPython Notebook for Dataset 1 here -> https://github.com/M0nica/2016-new-coder-survey
End of explanation
"""
df['Age'].value_counts().head(10).plot(kind="bar", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
"""
Explanation: How old are the programmers that answered this survey?
End of explanation
"""
df['Industry'].value_counts().head(10).plot(kind="barh", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
"""
Explanation: What industries are these individuals working in?
End of explanation
"""
df['Preferred text editor'].value_counts().head(10).plot(kind="barh", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
df['Preferred text editor'].value_counts().head(10).plot(kind="bar", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
# df['Training & Education: BS in CS'].value_counts().head(10).plot(kind="bar", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
"""
Explanation: What text editor do these individuals prefer?
End of explanation
"""
df['Occupation'].value_counts()
"""
Explanation: What occupation were the people who answered this survey? What is the most popular occupation?
End of explanation
"""
df['Occupation'].value_counts(ascending = 'False')
"""
Explanation: What is the least popular occupation?
End of explanation
"""
df.groupby('Gender')['Occupation'].value_counts().plot(kind="bar", color = ['#599ad3', '#f9a65a']) # too mmuch data to appropriately display
"""
Explanation: Job Distribution by Gender -- woah too much data
End of explanation
"""
gender_df = df[(df['Gender'] == 'Male') | (df['Gender'] == 'Female')]
print(gender_df['Gender'].value_counts())
"""
Explanation: How many males answer this survey? How many females answered this survey?
End of explanation
"""
gender_df.groupby('Gender')['Years IT / Programming Experience'].value_counts().sort_values().plot(kind="bar", color = ['#599ad3', '#f9a65a'])
"""
Explanation: Is there a difference in year's of experience based on one's gender?
End of explanation
"""
gender_df.groupby('Gender')['Occupation'].value_counts()
gender_df = gender_df[gender_df['Occupation'] == "Full-stack web developer"]
gender_df.groupby('Gender')['Occupation'].value_counts().plot(kind="bar", color = ['#599ad3', '#f9a65a'])
#gender_df.groupby('Gender')['Years IT / Programming Experience'].value_counts().plot(kind="bar", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
df['Age'].value_counts()
"""
Explanation: How many full-stack web developers are male versus female?
End of explanation
"""
gender_df.groupby('Gender')['Age'].value_counts().sort_values().plot(kind="bar", color = ['#624ea7', '#599ad3', '#f9a65a', '#9e66ab', 'purple'])
df["AgeScale"] = df["Age"].apply(str).replace("< 20", "0").apply(str).replace("20-24", "1").apply(str).replace("25-29", "2").apply(str).replace("30-34", "3").apply(str).replace("30-34", "3").apply(str).replace("35-39", "4").apply(str).replace("40-50", "5").apply(str).replace("51-60", "6").apply(str).replace("> 60", "7")
print(df["AgeScale"].head(10))
years_df =df[df['AgeScale'] != "Prefer not to disclose"]
years_df['AgeScale'] = years_df['AgeScale'].astype(float)
print(years_df).head()
years_df['Years IT / Programming Experience'].value_counts()
years_df['ExperienceRank'] = years_df['Years IT / Programming Experience'].apply(str).replace("Less than 1 year", "0").apply(str).replace("1 - 2 years", "1").apply(str).replace("2 - 5 years", "2").apply(str).replace("6 - 10 years", "3").apply(str).replace("11+ years", "4").astype(float)
# years_df.head()
years_df['ExperienceRank'].value_counts()
years_df['AgeScale'].value_counts()
#years_df['ExperienceRank'] = float(years_df['ExperienceRank'])
# years_df['AgeScale'] = float(years_df['AgeScale'])
# years_df['AgeScale'] = years_df['AgeScale'].apply(int)
#years_df['ExperienceRank'] = parseInt(years_df['ExperienceRank'])
#years_df['ExperienceRank'] = pd.Series(years_df['ExperienceRank'])
#years_df['AgeScale'] = pd.Series(years_df['AgeScale'])
moneyScatter = years_df.plot(kind='scatter', x='ExperienceRank', y='AgeScale', alpha=0.2) # caegorical data dos not display well on scatter plots
#moneyScatter.set_title('Distribution of Money Spent Amongst Respondents to the Survey by Age')
#moneyScatter.set_xlabel('Months Programming')
#moneyScatter.set_ylabel('Hours Spent Learning Each Week')
#plt.savefig('studyingovertime.png')
years_df['ExperienceRank'].describe()
years_df[['ExperienceRank','AgeScale']] = years_df[['ExperienceRank','AgeScale']].apply(pd.to_numeric)
# years_df.apply(lambda x: pd.to_numeric(x, errors='ignore'))
years_df['ExperienceRank'].describe()
years_df['ExperienceRank'].head()
years_df['AgeScale'].head()
"""
Explanation: What is the age distribution by gender?
End of explanation
"""
|
bbfamily/abu | abupy_lecture/23-美股UMP决策(ABU量化使用文档).ipynb | gpl-3.0 | # 基础库导入
from __future__ import print_function
from __future__ import division
import warnings
warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import ipywidgets
%matplotlib inline
import os
import sys
# 使用insert 0即只使用github,避免交叉使用了pip安装的abupy,导致的版本不一致问题
sys.path.insert(0, os.path.abspath('../'))
import abupy
# 使用沙盒数据,目的是和书中一样的数据环境
abupy.env.enable_example_env_ipython()
from abupy import AbuFactorAtrNStop, AbuFactorPreAtrNStop, AbuFactorCloseAtrNStop, AbuFactorBuyBreak
from abupy import abu, EMarketTargetType, AbuMetricsBase, ABuMarketDrawing, ABuProgress, ABuSymbolPd
from abupy import EMarketTargetType, EDataCacheType, EMarketSourceType, EMarketDataFetchMode, EStoreAbu, AbuUmpMainMul
from abupy import AbuUmpMainDeg, AbuUmpMainJump, AbuUmpMainPrice, AbuUmpMainWave, feature, AbuFeatureDegExtend
from abupy import AbuUmpEdgeDeg, AbuUmpEdgePrice, AbuUmpEdgeWave, AbuUmpEdgeFull, AbuUmpEdgeMul, AbuUmpEegeDegExtend
from abupy import AbuUmpMainDegExtend, ump, Parallel, delayed, AbuMulPidProgress
# 关闭沙盒数据
abupy.env.disable_example_env_ipython()
"""
Explanation: ABU量化系统使用文档
<center>
<img src="./image/abu_logo.png" alt="" style="vertical-align:middle;padding:10px 20px;"><font size="6" color="black"><b>第23节 美股UMP决策</b></font>
</center>
作者: 阿布
阿布量化版权所有 未经允许 禁止转载
abu量化系统github地址 (欢迎+star)
本节ipython notebook
上一节通过切割美股市场训练集测试集symbol,分别对切割的训练集和测试集做了回测,本节将示例美股ump主裁,边裁决策。
首先导入abupy中本节使用的模块:
End of explanation
"""
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_US
abupy.env.g_data_fetch_mode = EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL
abu_result_tuple = abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='train_us')
abu_result_tuple_test = abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_us')
ABuProgress.clear_output()
print('训练集结果:')
metrics_train = AbuMetricsBase.show_general(*abu_result_tuple, returns_cmp=True ,only_info=True)
print('测试集结果:')
metrics_test = AbuMetricsBase.show_general(*abu_result_tuple_test, returns_cmp=True, only_info=True)
"""
Explanation: 下面读取上一节存储的训练集和测试集回测数据,如下所示:
End of explanation
"""
# 需要全局设置为美股市场,在ump会根据市场类型保存读取对应的ump
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_US
us_ump_deg=None
us_ump_price=None
us_ump_jump=None
us_ump_wave=None
# 使用训练集交易数据训练主裁
orders_pd_train_us = abu_result_tuple.orders_pd
def train_main_ump():
print('AbuUmpMainDeg begin...')
AbuUmpMainDeg.ump_main_clf_dump(orders_pd_train_us, save_order=False, show_order=False)
print('AbuUmpMainPrice begin...')
AbuUmpMainPrice.ump_main_clf_dump(orders_pd_train_us, save_order=False, show_order=False)
print('AbuUmpMainJump begin...')
AbuUmpMainJump.ump_main_clf_dump(orders_pd_train_us, save_order=False, show_order=False)
print('AbuUmpMainDegExtend begin...')
AbuUmpMainWave.ump_main_clf_dump(orders_pd_train_us, save_order=False, show_order=False)
# 依然使用load_main_ump,避免下面多进程内存拷贝过大
load_main_ump()
def load_main_ump():
global us_ump_deg, us_ump_price, us_ump_jump, us_ump_wave
us_ump_deg = AbuUmpMainDeg(predict=True)
us_ump_price = AbuUmpMainPrice(predict=True)
us_ump_jump = AbuUmpMainJump(predict=True)
us_ump_wave = AbuUmpMainWave(predict=True)
print('load main ump complete!')
def select(select):
if select == 'train main ump':
train_main_ump()
else:
load_main_ump()
_ = ipywidgets.interact_manual(select, select=['train main ump', 'load main ump'])
"""
Explanation: 1. 美股训练集主裁, 边裁训练
上一节A股的主裁训练示例了使用两个内置主裁和两个自定义主裁的组合,本节只先使用四个内置主裁的组合: AbuUmpMainDeg, AbuUmpMainPrice, AbuUmpMainJump, AbuUmpMainWave
下面开始训练主裁,第一次运行select:train main ump,然后点击run select,如果已经训练过可select:load main ump直接读取以训练好的主裁:
End of explanation
"""
# 需要全局设置为美股市场,在ump会根据市场类型保存读取对应的ump
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_US
print('AbuUmpEdgeDeg begin...')
AbuUmpEdgeDeg.ump_edge_clf_dump(orders_pd_train_us)
us_edge_deg = AbuUmpEdgeDeg(predict=True)
print('AbuUmpEdgePrice begin...')
AbuUmpEdgePrice.ump_edge_clf_dump(orders_pd_train_us)
us_edge_price = AbuUmpEdgePrice(predict=True)
print('AbuUmpEdgeWave begin...')
AbuUmpEdgeWave.ump_edge_clf_dump(orders_pd_train_us)
us_edge_wave = AbuUmpEdgeMul(predict=True)
print('AbuUmpEegeDegExtend begin...')
AbuUmpEdgeFull.ump_edge_clf_dump(orders_pd_train_us)
us_edge_full = AbuUmpEdgeFull(predict=True)
print('fit edge complete!')
"""
Explanation: 上一节A股的边裁训练也示例了使用两个内置主裁和两个自定义主裁的组合,本节只先使用四个内置主裁的组合: AbuUmpEdgeDeg, AbuUmpEdgePrice, AbuUmpEdgeWave, AbuUmpEdgeFull。
如下所示,由于边裁的运行机制,所以边裁的训练非常快,这里直接进行训练:
End of explanation
"""
# 回测时会根据env中的市场读取对应市场的最后测试集交易
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_US
# 开启主裁
abupy.env.g_enable_ump_main_deg_block = True
abupy.env.g_enable_ump_main_jump_block = True
abupy.env.g_enable_ump_main_price_block = True
abupy.env.g_enable_ump_main_wave_block = True
# 开启边裁
abupy.env.g_enable_ump_edge_deg_block = True
abupy.env.g_enable_ump_edge_full_block = True
abupy.env.g_enable_ump_edge_price_block = True
abupy.env.g_enable_ump_edge_wave_block = True
# 回测时需要开启特征生成,因为裁判开启需要生成特征做为输入
abupy.env.g_enable_ml_feature = True
# 回测时使用上一次切割好的测试集数据
abupy.env.g_enable_last_split_test = True
"""
Explanation: 2. 使用美股内置主裁,边裁进行回测
内置边裁的开启很简单,只需要通过env中的相关设置即可完成,如下所示,分别开启主裁和边裁的4个内置裁判:
End of explanation
"""
# 初始化资金500万
read_cash = 5000000
# 买入因子依然延用向上突破因子
buy_factors = [{'xd': 60, 'class': AbuFactorBuyBreak},
{'xd': 42, 'class': AbuFactorBuyBreak}]
# 卖出因子继续使用上一节使用的因子
sell_factors = [
{'stop_loss_n': 1.0, 'stop_win_n': 3.0,
'class': AbuFactorAtrNStop},
{'class': AbuFactorPreAtrNStop, 'pre_atr_n': 1.5},
{'class': AbuFactorCloseAtrNStop, 'close_atr_n': 1.5}
]
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_US
abupy.env.g_data_fetch_mode = EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL
"""
Explanation: 买入因子,卖出因子等依然使用相同的设置,如下所示:
End of explanation
"""
abu_result_tuple_test_ump = None
def run_loop_back_ump():
global abu_result_tuple_test_ump
abu_result_tuple_test_ump, _ = abu.run_loop_back(read_cash,
buy_factors,
sell_factors,
choice_symbols=None,
start='2012-08-08', end='2017-08-08')
# 把运行的结果保存在本地,以便之后分析回测使用,保存回测结果数据代码如下所示
abu.store_abu_result_tuple(abu_result_tuple_test_ump, n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_ump_us')
ABuProgress.clear_output()
def run_load_ump():
global abu_result_tuple_test_ump
abu_result_tuple_test_ump = abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_ump_us')
def select_ump(select):
if select == 'run loop back ump':
run_loop_back_ump()
else:
run_load_ump()
_ = ipywidgets.interact_manual(select_ump, select=['run loop back ump', 'load test ump data'])
"""
Explanation: 完成裁判组合的开启,即可开始回测,回测操作流程和之前的操作一样:
下面开始回测,第一次运行select:run loop back ump,然后点击run select_ump,如果已经回测过可select:load test ump data直接从缓存数据读取:
End of explanation
"""
AbuMetricsBase.show_general(*abu_result_tuple_test_ump, returns_cmp=True, only_info=True)
AbuMetricsBase.show_general(*abu_result_tuple_test, returns_cmp=True, only_info=True)
"""
Explanation: 下面对比美股市场测试集交易开启主裁,边裁拦截和未开启主裁,边裁,结果:
End of explanation
"""
# 回测时会根据env中的市场读取对应市场的最后测试集交易
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_US
# 开启主裁
abupy.env.g_enable_ump_main_deg_block = True
abupy.env.g_enable_ump_main_jump_block = True
abupy.env.g_enable_ump_main_price_block = True
abupy.env.g_enable_ump_main_wave_block = True
# 开启边裁
abupy.env.g_enable_ump_edge_deg_block = True
abupy.env.g_enable_ump_edge_full_block = True
abupy.env.g_enable_ump_edge_price_block = True
abupy.env.g_enable_ump_edge_wave_block = True
# 回测时需要开启特征生成,因为裁判开启需要生成特征做为输入
abupy.env.g_enable_ml_feature = True
# 回测时使用上一次切割好的测试集数据
abupy.env.g_enable_last_split_test = True
"""
Explanation: 上面的结果可以看出拦截了接近一半多的交易,但是胜率和盈亏比提高的并不多,下面将想其它的办法提高回测效果,如下。
3. 中美裁判配合决策交易
上一节通过A股市场的训练集交易进行了主裁,边裁训练,下面将使用上一节训练的A股市场的裁判和本节训练的几个美股裁判一同进行决策(中美联合决策),示例如何使用不同市场的裁判进行交易决策。
备注:下面的运行需要在完成运行第20节,21节的基础上
美国方面:还是使用刚刚训练好的四个主裁和四个边裁做为内置主裁进行回测设置,如下所示:
End of explanation
"""
feature.clear_user_feature()
# 10,30,50,90,120日走势拟合角度特征的AbuFeatureDegExtend,做为回测时的新的视角来录制比赛
feature.append_user_feature(AbuFeatureDegExtend)
# 打开使用用户自定义裁判开关
ump.manager.g_enable_user_ump = True
# 先clear一下
ump.manager.clear_user_ump()
# 中国的主裁读取,使用market_name参数
cn_ump_deg = AbuUmpMainDeg(predict=True, market_name=EMarketTargetType.E_MARKET_TARGET_CN)
cn_ump_price = AbuUmpMainPrice(predict=True, market_name=EMarketTargetType.E_MARKET_TARGET_CN)
cn_ump_deg_extend = AbuUmpMainDegExtend(predict=True, market_name=EMarketTargetType.E_MARKET_TARGET_CN)
cn_ump_mul = AbuUmpMainMul(predict=True, market_name=EMarketTargetType.E_MARKET_TARGET_CN)
# 中国的边裁读取,使用market_name参数
cn_edge_deg = AbuUmpEdgeDeg(predict=True, market_name=EMarketTargetType.E_MARKET_TARGET_CN)
cn_edge_price = AbuUmpEdgePrice(predict=True, market_name=EMarketTargetType.E_MARKET_TARGET_CN)
cn_edge_deg_extend = AbuUmpEegeDegExtend(predict=True, market_name=EMarketTargetType.E_MARKET_TARGET_CN)
cn_edge_mul = AbuUmpEdgeMul(predict=True, market_name=EMarketTargetType.E_MARKET_TARGET_CN)
# 把中国的主裁对象使用append_user_ump添加到系统中
ump.manager.append_user_ump(cn_ump_deg)
ump.manager.append_user_ump(cn_ump_price)
ump.manager.append_user_ump(cn_ump_deg_extend)
ump.manager.append_user_ump(cn_ump_mul)
# 把中国的边裁对象使用append_user_ump添加到系统中
ump.manager.append_user_ump(cn_edge_deg)
ump.manager.append_user_ump(cn_edge_price)
ump.manager.append_user_ump(cn_edge_deg_extend)
ump.manager.append_user_ump(cn_edge_mul)
"""
Explanation: 中国方面:把上一节训练的所有A股裁判都做为用户自定义的裁判进行添加到裁判系统中,如下所示:
注意下面主裁或者边裁的读取都使用了market_name参数,来声明读取的是中国的裁判
注意下面使用ump.manager.append_user_ump添加的不再是裁判的类名称,需要直接添加裁判对象,具体为什么请阅读源代码
注意下面还需要把10,30,50,90,120日走势拟合角度特征的AbuFeatureDegExtend,做为回测时的新的视角来录制比赛(记录回测特征),因为裁判里面有AbuUmpEegeDegExtend和AbuUmpMainDegExtend,它们需要生成带有10,30,50,90,120日走势拟合角度特征的回测交易单
End of explanation
"""
abupy.env.g_market_target = EMarketTargetType.E_MARKET_TARGET_US
abupy.env.g_data_fetch_mode = EMarketDataFetchMode.E_DATA_FETCH_FORCE_LOCAL
abu_result_tuple_test_ump_cn_us = None
def run_loop_back_cn_us():
global abu_result_tuple_test_ump_cn_us
abu_result_tuple_test_ump_cn_us, _ = abu.run_loop_back(read_cash,
buy_factors,
sell_factors,
choice_symbols=None,
start='2012-08-08', end='2017-08-08')
# 把运行的结果保存在本地,以便之后分析回测使用,保存回测结果数据代码如下所示
abu.store_abu_result_tuple(abu_result_tuple_test_ump_cn_us, n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_ump_cn_us')
ABuProgress.clear_output()
def run_load_cn_us():
global abu_result_tuple_test_ump_cn_us
abu_result_tuple_test_ump_cn_us = abu.load_abu_result_tuple(n_folds=5, store_type=EStoreAbu.E_STORE_CUSTOM_NAME,
custom_name='test_ump_cn_us')
def select_cn_us(select):
if select == 'run loop back us&cn':
run_loop_back_cn_us()
else:
run_load_cn_us()
_ = ipywidgets.interact_manual(select_cn_us, select=['run loop back us&cn', 'load us&cn data'])
AbuMetricsBase.show_general(*abu_result_tuple_test_ump_cn_us, returns_cmp=True, only_info=True)
AbuMetricsBase.show_general(*abu_result_tuple_test, returns_cmp=True, only_info=True)
"""
Explanation: 完成中美裁判组合的开启,即可开始回测,回测操作流程和之前的操作一样:
下面开始回测,第一次运行select:run loop back us&cn,然后点击select_cn_us,如果已经回测过可select:load us&cn data直接从缓存数据读取:
End of explanation
"""
|
scikit-optimize/scikit-optimize.github.io | 0.8/notebooks/auto_examples/plots/partial-dependence-plot-with-categorical.ipynb | bsd-3-clause | print(__doc__)
import sys
from skopt.plots import plot_objective
from skopt import forest_minimize
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
from skopt.space import Integer, Categorical
from skopt import plots, gp_minimize
from skopt.plots import plot_objective
"""
Explanation: Partial Dependence Plots with categorical values
Sigurd Carlsen Feb 2019
Holger Nahrstaedt 2020
.. currentmodule:: skopt
Plot objective now supports optional use of partial dependence as well as
different methods of defining parameter values for dependency plots.
End of explanation
"""
def objective(params):
clf = DecisionTreeClassifier(
**{dim.name: val for dim, val in
zip(SPACE, params) if dim.name != 'dummy'})
return -np.mean(cross_val_score(clf, *load_breast_cancer(True)))
"""
Explanation: objective function
Here we define a function that we evaluate.
End of explanation
"""
SPACE = [
Integer(1, 20, name='max_depth'),
Integer(2, 100, name='min_samples_split'),
Integer(5, 30, name='min_samples_leaf'),
Integer(1, 30, name='max_features'),
Categorical(list('abc'), name='dummy'),
Categorical(['gini', 'entropy'], name='criterion'),
Categorical(list('def'), name='dummy'),
]
result = gp_minimize(objective, SPACE, n_calls=20)
"""
Explanation: Bayesian optimization
End of explanation
"""
_ = plot_objective(result, n_points=10)
"""
Explanation: Partial dependence plot
Here we see an example of using partial dependence. Even when setting
n_points all the way down to 10 from the default of 40, this method is
still very slow. This is because partial dependence calculates 250 extra
predictions for each point on the plots.
End of explanation
"""
_ = plot_objective(result, sample_source='result', n_points=10)
"""
Explanation: Plot without partial dependence
Here we plot without partial dependence. We see that it is a lot faster.
Also the values for the other parameters are set to the default "result"
which is the parameter set of the best observed value so far. In the case
of funny_func this is close to 0 for all parameters.
End of explanation
"""
_ = plot_objective(result, n_points=10, sample_source='expected_minimum_random',
minimum='expected_minimum_random', n_minimum_search=10000)
"""
Explanation: Modify the shown minimum
Here we try with setting the other parameters to something other than
"result". When dealing with categorical dimensions we can't use
'expected_minimum'. Therefore we try with "expected_minimum_random"
which is a naive way of finding the minimum of the surrogate by only
using random sampling. n_minimum_search sets the number of random samples,
which is used to find the minimum
End of explanation
"""
_ = plot_objective(result, n_points=10, sample_source=[15, 4, 7, 15, 'b', 'entropy', 'e'],
minimum=[15, 4, 7, 15, 'b', 'entropy', 'e'])
"""
Explanation: Set a minimum location
Lastly we can also define these parameters ourselfs by
parsing a list as the pars argument:
End of explanation
"""
|
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn | doc/notebooks/automaton.eliminate_state.ipynb | gpl-3.0 | import vcsn
"""
Explanation: automaton.eliminate_state(state = -1)
In the Brzozowski-McCluskey procedure, remove one state.
Preconditions:
- The labelset is oneset (i.e., the automaton is spontaneous).
- The weightset is expressionset (i.e., the weights are expressions).
- The _state_ is indeed a state of the automaton, or it is -1, in which case a heuristics is used to select the next state.
See also:
- automaton.expression
- automaton.lift
Examples
End of explanation
"""
a0 = vcsn.B.expression('ab*c').standard()
a0
"""
Explanation: The following examples with be using this simple automaton as input.
End of explanation
"""
a1 = a0.lift()
a1
"""
Explanation: We first need to convert this automaton into a spontaneous automaton labeled with expressions. That's the purpose of automaton.lift.
End of explanation
"""
a2 = a1.eliminate_state(2)
a2
"""
Explanation: Explicit state elimination
Let's remove state 2:
End of explanation
"""
a1
"""
Explanation: Note that the result is a fresh automaton: the original automaton is not modified:
End of explanation
"""
a3 = a2.eliminate_state(1)
a3
"""
Explanation: Let's eliminate state 1.
End of explanation
"""
a4 = a3.eliminate_state(0)
a4
"""
Explanation: We can also remove the initial and final states.
End of explanation
"""
a5 = a4.eliminate_state(1)
a5
"""
Explanation: Eventually, when all the states have been removed, you get a broken automaton, with no states, but a "lone transition" that bears the answer.
End of explanation
"""
a1.eliminate_state()
a1.eliminate_state().eliminate_state().eliminate_state().eliminate_state()
"""
Explanation: Rest assured that such automata (no states but with one transition) never occur in the normal course of use of Vcsn.
Using the heuristics
Use -1 (or no argument at all) to leave the choice of the next state to eliminate to Vcsn. This is how automaton.expression works.
End of explanation
"""
from IPython.html import widgets
from IPython.display import display
from IPython.utils import traitlets
from vcsn.ipython import interact_h
def slider_eliminate_state(aut):
''' Create the list of automata while applying the eliminate_state algorithm.'''
count = aut.state_number()
auts = {}
auts[0] = aut
for i in range(count):
aut = aut.eliminate_state()
auts[i + 1] = aut
return auts, count
def update_svg(name, value, new):
interact_h(lambda: display(auts[new]))
class SliderWidget(widgets.IntSlider):
def __init__(self, auths, count):
self.auths = auths
self.value = 0
self._widget = widgets.IntSlider(description='Algorithm step(s)', min=0, max=count, step=1, value=0)
self._widget.on_trait_change(update_svg,'value')
def show(self):
display(self._widget)
interact_h(lambda: display(auts[0]))
# Call on the automaton to show.
auts, count = slider_eliminate_state(a1 ** 2)
slider = SliderWidget(auts, count)
slider.show()
"""
Explanation: Interactive Examples
You may use the following widgets to see, step by step, how state elimination works.
End of explanation
"""
|
atcemgil/notes | HamiltonianDynamics.ipynb | mit | %matplotlib inline
import scipy as sc
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
A = np.mat('[0,1;-1,0]')
dt = 0.05
T = 100
z = np.mat(np.zeros((2,T)))
H = la.expm(dt*A)
z[:,0] = np.mat('[2.4;0]')
for i in range(1,T):
z[:,i] = H*z[:,i-1]
plt.plot(z[0,:], z[1,:],'.-r')
ax = plt.gcf().gca()
ax.set_aspect('equal')
plt.show()
"""
Explanation: Hamiltonian Dynamics
Consider an object with mass $m$ on a 2D space sliding freely on a curve $y = h(x)$. Here $h(x)$ gives the height at horizontal coordinate $x$. As the object will be constraint to the curve, it will always be at some coordinate $(x, h(x))$ so we will refer to its position by $x$.
Remember from high school physics that under constant gravity $g$ with velocity $v$ the object has
Potential Energy $U = m g h(x)$
Kinetic Energy $K = \frac{1}{2} m v^2 = \frac{1}{2m}p^2$
Momentum $p = mv$
Under energy preservation we have
$$
H_{\text{total}} = U + K = m g h(x) + \frac{1}{2m} p^2
$$
As $m$ and $g$ are constants, we can define any motion as some trajectory in the $x,p$ coordinate plane as a function of time. Hamiltonian dynamics is the description of this idea in a more general setting for some arbitrary potential function $U$ and kinetic energy $K$.
We define position variables $x$, a potential function $U(x)$, momentum variables $p$ and kinetic energy $K(p)$.
$$
H(x, p) = U(x) + K(p)
$$
The change of position in time is described by the change in the Kinetic energy
$$
\frac{d x_i}{d t} = \frac{\partial H}{\partial p_i}
$$
The change in momentum is in the opposite direction of the change in potential energy
$$
\frac{d p_i}{d t} = -\frac{\partial H}{\partial x_i}
$$
We accelerate if we fall down and deccelerate if we jump up (under the gravity field that defines the potential at height $h(x)$).
Example: Take $g=1$ and $m=1$
$$U(x) = \frac{1}{2} x^2$$
$$K(p) = \frac{1}{2} p^2 $$
$$H(x, p) = \frac{1}{2} x^2 + \frac{1}{2} p^2$$
$$
\frac{\partial H}{\partial p} = p
$$
$$
\frac{\partial H}{\partial x} = x
$$
Hence
$$
\frac{d x}{d t} = p
$$
$$
\frac{d p}{d t} = -x
$$
\begin{eqnarray}
\left( \begin{array}{c} \dot{x} \ \dot{p} \end{array} \right) & = & \left( \begin{array}{cc} 0 & 1 \ -1 & 0 \end{array} \right) \left( \begin{array}{c} x \ p \end{array} \right) \
\dot{z} & = & A z \
z(t) & = & \exp(At)z(0)
\end{eqnarray}
End of explanation
"""
epsilon = 0.05
T = 1000
z_euler = np.mat(np.zeros((2,T)))
def dHdx(x):
return x
def dHdp(p):
return p
z_euler[:,0] = np.mat('[2.4;0]')
for i in range(1,T):
#z_euler[:,i] = z_euler[:,i-1] + epsilon*dH(z_euler[:,i-1])
z_euler[0,i] = z_euler[0,i-1] + epsilon*dHdp(z_euler[1,i-1])
z_euler[1,i] = z_euler[1,i-1] - epsilon*dHdx(z_euler[0,i-1])
plt.plot(z_euler[0,:], z_euler[1,:],'.-r')
ax = plt.gcf().gca()
ax.set_aspect('equal')
plt.show()
"""
Explanation: Euler's method
Position Evolution
\begin{eqnarray}
\frac{x_i(t + \epsilon) - x_i(t)}{\epsilon} & = & \frac{d x_i}{d t}(t) = \frac{\partial K}{\partial p_i}(p_i(t)) \
x_i(t + \epsilon) & = & x_i(t) + \epsilon \frac{\partial K}{\partial p_i}(p_i(t))
\end{eqnarray}
Momentum Evolution
\begin{eqnarray}
\frac{p_i(t + \epsilon) - p_i(t)}{\epsilon} & = & \frac{d p_i}{d t}(t) = - \frac{\partial U}{\partial x_i}(x_i(t)) \
p_i(t + \epsilon) & = & p_i(t) - \epsilon \frac{\partial U}{\partial x_i}(x_i(t)) \
\end{eqnarray}
End of explanation
"""
epsilon = 0.01
T = 600
z_euler2 = np.mat(np.zeros((2,T)))
def dHdx(x):
return x
def dHdp(p):
return p
z_euler2[:,0] = np.mat('[2.4;0]')
for i in range(1,T):
z_euler2[0,i] = z_euler2[0,i-1] + epsilon*dHdp(z_euler2[1,i-1])
z_euler2[1,i] = z_euler2[1,i-1] - epsilon*dHdx(z_euler2[0,i])
plt.plot(z_euler2[0,:], z_euler2[1,:],'.-r')
ax = plt.gcf().gca()
ax.set_aspect('equal')
plt.show()
"""
Explanation: Modified Euler
Use the intermediate solution immediately
End of explanation
"""
epsilon = 0.5
T = 50
def dHdx(x):
return x
def dHdp(p):
return p
#def dHdx(x):
# A = np.mat('[1;-1]')
# b = np.mat('[1;3]')
# u = (b - A*x)
# if np.all(u > 0):
# g = A.T*(1/u)
# else:
# g = np.inf*u
# return g[0,0]
T = 100
z_lf = np.mat(np.zeros((2,T)))
z_lf[:,0] = np.mat('[0.1;0]')
for i in range(1,T):
p_mid = z_lf[1,i-1] - (epsilon/2)*dHdx(z_lf[0,i-1])
z_lf[0,i] = z_lf[0,i-1] + epsilon*dHdp(p_mid)
z_lf[1,i] = p_mid - (epsilon/2)*dHdx(z_lf[0,i])
plt.plot(z_lf[0,:].T, z_lf[1,:].T,'or-')
ax = plt.gcf().gca()
ax.set_aspect('equal')
plt.show()
"""
Explanation: The Leapfrog Method
Approximate the momentum at the middle of the time interval.
\begin{eqnarray}
\frac{p_i(t + \epsilon/2) - p_i(t)}{\epsilon/2} & = & \frac{d p_i}{d t}(t) = - \frac{\partial U}{\partial x_i}(x_i(t)) \
p_i(t + \epsilon/2) & = & p_i(t) - (\epsilon/2) \frac{\partial U}{\partial x_i}(x_i(t))
\end{eqnarray}
Use the half-way momentum to approximate the position
\begin{eqnarray}
\frac{x_i(t + \epsilon) - x_i(t)}{\epsilon} & = & \frac{d x_i}{d t}(t + \epsilon/2) = \frac{\partial K}{\partial p_i}(p_i(t+\epsilon/2)) \
x_i(t + \epsilon) &=& x_i(t) + \epsilon \frac{\partial K}{\partial p_i}(p_i(t+\epsilon/2))
\end{eqnarray}
Update the momentum at time $t+\epsilon$
\begin{eqnarray}
\frac{p_i(t + \epsilon) - p_i(t + \epsilon/2)}{\epsilon/2} & = & \frac{d p_i}{d t}(t+\epsilon) = - \frac{\partial U}{\partial x_i}(x_i(t + \epsilon)) \
p_i(t + \epsilon) & = & p_i(t+\epsilon/2) - (\epsilon/2) \frac{\partial U}{\partial x_i}(x_i(t+\epsilon))
\end{eqnarray}
End of explanation
"""
|
tanmay987/deepLearning | seq2seq/sequence_to_sequence_implementation.ipynb | mit | import helper
source_path = 'data/letters_source.txt'
target_path = 'data/letters_target.txt'
source_sentences = helper.load_data(source_path)
target_sentences = helper.load_data(target_path)
"""
Explanation: Character Sequence to Sequence
In this notebook, we'll build a model that takes in a sequence of letters, and outputs a sorted version of that sequence. We'll do that using what we've learned so far about Sequence to Sequence models.
<img src="images/sequence-to-sequence.jpg"/>
Dataset
The dataset lives in the /data/ folder. At the moment, it is made up of the following files:
* letters_source.txt: The list of input letter sequences. Each sequence is its own line.
* letters_target.txt: The list of target sequences we'll use in the training process. Each sequence here is a response to the input sequence in letters_source.txt with the same line number.
End of explanation
"""
source_sentences[:50].split('\n')
"""
Explanation: Let's start by examining the current state of the dataset. source_sentences contains the entire input sequence file as text delimited by newline symbols.
End of explanation
"""
target_sentences[:50].split('\n')
"""
Explanation: target_sentences contains the entire output sequence file as text delimited by newline symbols. Each line corresponds to the line from source_sentences. target_sentences contains a sorted characters of the line.
End of explanation
"""
def extract_character_vocab(data):
special_words = ['<pad>', '<unk>', '<s>', '<\s>']
set_words = set([character for line in data.split('\n') for character in line])
int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))}
vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}
return int_to_vocab, vocab_to_int
# Build int2letter and letter2int dicts
source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences)
target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences)
# Convert characters to ids
source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<unk>']) for letter in line] for line in source_sentences.split('\n')]
target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<unk>']) for letter in line] for line in target_sentences.split('\n')]
print("Example source sequence")
print(source_letter_ids[:3])
print("\n")
print("Example target sequence")
print(target_letter_ids[:3])
"""
Explanation: Preprocess
To do anything useful with it, we'll need to turn the characters into a list of integers:
End of explanation
"""
def pad_id_sequences(source_ids, source_letter_to_int, target_ids, target_letter_to_int, sequence_length):
new_source_ids = [sentence + [source_letter_to_int['<pad>']] * (sequence_length - len(sentence)) \
for sentence in source_ids]
new_target_ids = [sentence + [target_letter_to_int['<pad>']] * (sequence_length - len(sentence)) \
for sentence in target_ids]
return new_source_ids, new_target_ids
# Use the longest sequence as sequence length
sequence_length = max(
[len(sentence) for sentence in source_letter_ids] + [len(sentence) for sentence in target_letter_ids])
# Pad all sequences up to sequence length
source_ids, target_ids = pad_id_sequences(source_letter_ids, source_letter_to_int,
target_letter_ids, target_letter_to_int, sequence_length)
print("Sequence Length")
print(sequence_length)
print("\n")
print("Input sequence example")
print(source_ids[:3])
print("\n")
print("Target sequence example")
print(target_ids[:3])
"""
Explanation: The last step in the preprocessing stage is to determine the the longest sequence size in the dataset we'll be using, then pad all the sequences to that length.
End of explanation
"""
from distutils.version import LooseVersion
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
"""
Explanation: This is the final shape we need them to be in. We can now proceed to building the model.
Model
Check the Version of TensorFlow
This will check to make sure you have the correct version of TensorFlow
End of explanation
"""
# Number of Epochs
epochs = 60
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 50
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 13
decoding_embedding_size = 13
# Learning Rate
learning_rate = 0.001
"""
Explanation: Hyperparameters
End of explanation
"""
input_data = tf.placeholder(tf.int32, [batch_size, sequence_length])
targets = tf.placeholder(tf.int32, [batch_size, sequence_length])
lr = tf.placeholder(tf.float32)
"""
Explanation: Input
End of explanation
"""
source_vocab_size = len(source_letter_to_int)
# Encoder embedding
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, encoding_embedding_size)
# Encoder
enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
_, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, dtype=tf.float32)
"""
Explanation: Sequence to Sequence
The decoder is probably the most complex part of this model. We need to declare a decoder for the training phase, and a decoder for the inference/prediction phase. These two decoders will share their parameters (so that all the weights and biases that are set during the training phase can be used when we deploy the model).
First, we'll need to define the type of cell we'll be using for our decoder RNNs. We opted for LSTM.
Then, we'll need to hookup a fully connected layer to the output of decoder. The output of this layer tells us which word the RNN is choosing to output at each time step.
Let's first look at the inference/prediction decoder. It is the one we'll use when we deploy our chatbot to the wild (even though it comes second in the actual code).
<img src="images/sequence-to-sequence-inference-decoder.png"/>
We'll hand our encoder hidden state to the inference decoder and have it process its output. TensorFlow handles most of the logic for us. We just have to use tf.contrib.seq2seq.simple_decoder_fn_inference and tf.contrib.seq2seq.dynamic_rnn_decoder and supply them with the appropriate inputs.
Notice that the inference decoder feeds the output of each time step as an input to the next.
As for the training decoder, we can think of it as looking like this:
<img src="images/sequence-to-sequence-training-decoder.png"/>
The training decoder does not feed the output of each time step to the next. Rather, the inputs to the decoder time steps are the target sequence from the training dataset (the orange letters).
Encoding
Embed the input data using tf.contrib.layers.embed_sequence
Pass the embedded input into a stack of RNNs. Save the RNN state and ignore the output.
End of explanation
"""
import numpy as np
# Process the input we'll feed to the decoder
ending = tf.strided_slice(targets, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_letter_to_int['<s>']), ending], 1)
demonstration_outputs = np.reshape(range(batch_size * sequence_length), (batch_size, sequence_length))
sess = tf.InteractiveSession()
print("Targets")
print(demonstration_outputs[:3])
print("\n")
print("Processed Decoding Input")
print(sess.run(dec_input, {targets: demonstration_outputs})[:3])
"""
Explanation: Process Decoding Input
End of explanation
"""
target_vocab_size = len(target_letter_to_int)
# Decoder Embedding
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# Decoder RNNs
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
with tf.variable_scope("decoding") as decoding_scope:
# Output Layer
output_fn = lambda x: tf.contrib.layers.fully_connected(x, target_vocab_size, None, scope=decoding_scope)
"""
Explanation: Decoding
Embed the decoding input
Build the decoding RNNs
Build the output layer in the decoding scope, so the weight and bias can be shared between the training and inference decoders.
End of explanation
"""
with tf.variable_scope("decoding") as decoding_scope:
# Training Decoder
train_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_train(enc_state)
train_pred, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
dec_cell, train_decoder_fn, dec_embed_input, sequence_length, scope=decoding_scope)
# Apply output function
train_logits = output_fn(train_pred)
"""
Explanation: Decoder During Training
Build the training decoder using tf.contrib.seq2seq.simple_decoder_fn_train and tf.contrib.seq2seq.dynamic_rnn_decoder.
Apply the output layer to the output of the training decoder
End of explanation
"""
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
# Inference Decoder
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn, enc_state, dec_embeddings, target_letter_to_int['<s>'], target_letter_to_int['<\s>'],
sequence_length - 1, target_vocab_size)
inference_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, infer_decoder_fn, scope=decoding_scope)
"""
Explanation: Decoder During Inference
Reuse the weights the biases from the training decoder using tf.variable_scope("decoding", reuse=True)
Build the inference decoder using tf.contrib.seq2seq.simple_decoder_fn_inference and tf.contrib.seq2seq.dynamic_rnn_decoder.
The output function is applied to the output in this step
End of explanation
"""
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([batch_size, sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Optimization
Our loss function is tf.contrib.seq2seq.sequence_loss provided by the tensor flow seq2seq module. It calculates a weighted cross-entropy loss for the output logits.
End of explanation
"""
import numpy as np
train_source = source_ids[batch_size:]
train_target = target_ids[batch_size:]
valid_source = source_ids[:batch_size]
valid_target = target_ids[:batch_size]
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch, targets: target_batch, lr: learning_rate})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source})
train_acc = np.mean(np.equal(target_batch, np.argmax(batch_train_logits, 2)))
valid_acc = np.mean(np.equal(valid_target, np.argmax(batch_valid_logits, 2)))
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_ids) // batch_size, train_acc, valid_acc, loss))
"""
Explanation: Train
We're now ready to train our model. If you run into OOM (out of memory) issues during training, try to decrease the batch_size.
End of explanation
"""
input_sentence = 'hello'
input_sentence = [source_letter_to_int.get(word, source_letter_to_int['<unk>']) for word in input_sentence.lower()]
input_sentence = input_sentence + [0] * (sequence_length - len(input_sentence))
batch_shell = np.zeros((batch_size, sequence_length))
batch_shell[0] = input_sentence
chatbot_logits = sess.run(inference_logits, {input_data: batch_shell})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in input_sentence]))
print(' Input Words: {}'.format([source_int_to_letter[i] for i in input_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(chatbot_logits, 1)]))
print(' Chatbot Answer Words: {}'.format([target_int_to_letter[i] for i in np.argmax(chatbot_logits, 1)]))
"""
Explanation: Prediction
End of explanation
"""
|
nathawkins/PHY451_FS_2017 | Diode Laser Spectroscopy/20171003_morning/Interference with SAS no Dopple/Interferometer with SAS No Doppler Analysis.ipynb | gpl-3.0 | get_peak_data(ch2, [0.025, 0.030]);
get_peak_data(ch2, [0.030, 0.035]);
get_peak_data(ch2, [0.0350,0.045]);
get_peak_data(ch2, [0.049, 0.0517]);
maximum_time_positions = [0.028124, 0.03266, 0.042744, 0.05052]
maximum_voltage_positions = [0.738, 0.53, 0.716, 0.48]
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
f.set_figheight(10)
f.set_figwidth(15)
ax1.set_title("Maximum Values on SAS, No Doppler")
ax1.set_ylabel("Voltage (V)")
ax1.set_xlabel("Time (s)")
ax1.plot(maximum_time_positions, maximum_voltage_positions, 'rp')
ax1.plot(time, ch2)
ax2.set_title("Maximum Values on Interferometer Spikes")
ax2.set_xlabel("Time (s)")
ax2.plot(maximum_time_positions, maximum_voltage_positions, 'rp')
ax2.plot(time, ch3, 'y-')
"""
Explanation: Isolating the Maximum Values
End of explanation
"""
get_peak_data(ch3, [0.019, 0.020]);
get_peak_data(ch3, [0.019, 0.022]);
0.020856 - 0.019276
"""
Explanation: The distance between the interferometer peaks should be constant, so finding the time separating two maximum values should tell me how much time elapses between the two peaks, which we can use as a measure of separation of frequencies.
End of explanation
"""
differences = [0]
for i in range(1,len(maximum_time_positions)):
differences.append(maximum_time_positions[i]-maximum_time_positions[i-1])
differences
from prettytable import PrettyTable
x = PrettyTable()
x = PrettyTable()
x.add_column("Time (s)", maximum_time_positions)
x.add_column("Voltage (V)", maximum_voltage_positions)
x.add_column("Difference (s)", [round(i, 5) for i in differences])
x.add_column("Number of Interferometer Distances Apart", [round(i, 5)/0.00158 for i in differences])
x.add_column("Separation of Features (MHz)", [round(i, 5)/0.00158 *379 for i in differences])
print(x)
file = open('SummaryTable.txt', 'w')
file.write(str(x))
file.close()
"""
Explanation: The time difference between peaks is 0.00158 seconds.
End of explanation
"""
|
OSGeo-live/CesiumWidget | Examples/CesiumWidget Interact-Example.ipynb | apache-2.0 | from CesiumWidget import CesiumWidget
from IPython import display
from czml_example import simple_czml, complex_czml
"""
Explanation: Cesium Widget Example
This is an example notebook to sow how to bind the Cesiumjs with the IPython interactive widget system.
End of explanation
"""
cesiumExample = CesiumWidget(width="100%", enable_lighting=True)
"""
Explanation: The code:
from czml_example import simple_czml, complex_czml
Simply import some CZML data for the viewer to display.
Create widget object
End of explanation
"""
cesiumExample
"""
Explanation: Display the widget:
End of explanation
"""
cesiumExample.czml = simple_czml
"""
Explanation: Add some data to the viewer
A simple czml
End of explanation
"""
cesiumExample.czml = complex_czml
"""
Explanation: A more complex CZML example
End of explanation
"""
from __future__ import print_function
from ipywidgets import interact, interactive, fixed
from ipywidgets import widgets
"""
Explanation: Now let's make some interactive widget:
End of explanation
"""
myczml = {'simple_czml':simple_czml, 'complex_czml':complex_czml}
myplace = {'Eboli, IT':'', 'Woods Hole, MA':'', 'Durham, NH':''}
import geocoder
import time
for i in myplace.keys():
g = geocoder.google(i)
print(g.latlng)
myplace[i]=g.latlng
myplace
@interact(z=(0,20000000), heading=(-180,180), pitch=(-90,90), roll=(-360,360),
Location=('Eboli, IT','Woods Hole, MA','Durham, NH'))
def f(z=1000000, heading=0, pitch=-90, roll=0, Location='Eboli, IT'):
cesiumExample.zoom_to(myplace[Location][1], myplace[Location][0], z, heading, pitch, roll)
@interact(CZML=('simple_czml','complex_czml'))
def c(CZML=None):
cesiumExample.czml = myczml[CZML]
cesiumExample
"""
Explanation: store the CZML objet in a dictionary and use their name as keys
define a function to switch between CZML
bind the IPython intercat class to the function
End of explanation
"""
|
ComputationalModeling/spring-2017-danielak | past-semesters/spring_2016/homework_assignments/function_tutorial.ipynb | agpl-3.0 | def print_hello():
print("hello!")
# call the function and store its output. You don't really have to have the output= part if you don't want to.
output=print_hello()
print("output of print_hello() is:", output)
"""
Explanation: Python functions - some examples
This notebook demonstrates how to work with python functions, including some complex examples.
Some useful links:
Python tutorial - defining functions
A different tutorial on Python functions
A very simple function
This function takes no parameters and doesn't explicitly return a value. Note that a function with no return actually returns "None".
End of explanation
"""
def squareme(thisval):
return thisval**2
output = squareme(4)
print("output of squareme() is:", output)
"""
Explanation: A function with one argument and one returned quantity
This function takes in one parameter, thisval, and returns the square of it. We'll store the output in a variable and print it.
End of explanation
"""
def sum_me(a,b,c):
sum = a+b+c
return sum
output = sum_me(1,2,3)
print("output of sum_me() is:", output)
"""
Explanation: A function with several arguments and one returned quantity
This function takes three arguments and returns their sum.
End of explanation
"""
def more_math(a,b,c):
d = a+b+c # should be a number
e = [a,b,c] # should be a list of three numbers
f = str(a) + str(b) + str(c) # should be a string composed of three characters, each of which is a number
return d,e,f
# put all returns in one variable
val1 = more_math(2,4,6)
print("printing out all returns in one variable:", val1)
print("") # adds an extra line
print("getting the second returned quantity:",val1[1])
print("")
print("second quantity is a list. Getting one value out of that list:",val1[1][2])
# put returns in separate variables
val2, val3, val4 = more_math(8,10,12)
print("first return:", val2)
print("second return:", val3)
print("third return:", val4)
"""
Explanation: A function that returns multiple quantities
Python allows functions to return multiple quantities. The quantities returned can be of any type: numbers, strings, lists, arrays, and so on. You can either put all of the returns into a single variable (and can then extract the various returns through indexes) or put the returned quantities into one variable per returned quantity.
Note: For the purposes of the function below, we're assuming that the input values a, b, and c are all integers - however, think about what might happen if they were some combination of numbers and other quantities (such as strings or lists). What do you think the function would do?
End of explanation
"""
val5, val6 = more_math(14,16,18)
"""
Explanation: What do you think happens if you have more than one variable, but not the right number of variables to accept all of the returns?
End of explanation
"""
# this version has one default value
def print_stuff(first,second='dog',third='monkey'):
print(first,second,third)
print_stuff('cat')
print_stuff('cat','elephant')
print_stuff('donkey','wolf','mouse')
# this version has no default values!
def print_stuff_2(first='fish',second='bird',third='dinosaur'):
print(first,second,third)
print_stuff_2()
print_stuff_2('newt')
print_stuff_2('vole','koala')
print_stuff_2('lion','armadillo','cow')
"""
Explanation: A function that has default arguments
Python allows you to have functions that have a default value for one or more arguments. This allows you to call a function with fewer arguments than it is defined to allow (i.e., fewer than the maximum number of possible arguments). In function definitions of this sort, the mandatory arguments (ones that do not have a default value) must come before the optional arguments (ones that have default values). This can be extremely helpful in many circumstances, and can be called in several ways:
Giving only the mandatory argument
Giving the mandatory argument plus one of the optional arguments
Giving the mandatory argument plus two or more of the optional arguments
There does not need to be a mandatory argument - all arguments can have defaults! Note that if you are going to give more than just the mandatory argument, the optional arguments need to be given in the order they are defined. In other words, you can't just give the mandatory argument and the second optional argument in a function with one mandatory argument and multiple optional arguments - you have to give all arguments up to the one that you want.
End of explanation
"""
# function with one mandatory argument, two keyword arguments
def orbit(mass1, mass2=1.0, distance=17.0):
print("mass1: ", mass1)
print("mass2: ", mass2)
print("distance:", distance)
# just the mandatory argument
orbit(3.0)
# two arguments, given in the order they appear in the function call.
orbit(3.0,9.0)
# the mandatory argument and one keyword argument
orbit(3.0,distance=8.0)
# the mandatory argument and the two keyword arguments given out of order
orbit(5.0,distance=3.0,mass2=17.0)
"""
Explanation: A function that has keyword arguments
Finally, functions can also be called with "keyword arguments" that have the form keyword=value. For example, the function below has one mandatory argument and two keyword arguments. The mandatory arguments must always be called first, and in the same order that they appear in the function call. The keyword arguments have to be after that, and can be called in any order. Note that you don't have to take advantage of the keywords - it's perfectly acceptable to just treat the keyword arguments as arguments with default values (because that's actually what they are!) and just give them in the order they appear in the function call. For example:
End of explanation
"""
# function with all keyword arguments
def orbit_new(mass1=3.0, mass2=1.0, distance=17.0):
print("mass1: ", mass1)
print("mass2: ", mass2)
print("distance:", distance)
orbit_new()
orbit_new(distance=9.0)
orbit_new(distance=5.0,mass1=81.0)
orbit_new(distance=5.0,mass2=0.7,mass1=81.0)
"""
Explanation: A note about functions with keyword arguments
A function with keyword arguments does not need to have any default arguments at all. For example:
End of explanation
"""
|
trungdong/datasets-provanalytics-dmkd | Extra 2.1 - Unbalanced Data - Application 1.ipynb | mit | import pandas as pd
df = pd.read_csv("provstore/data.csv")
df.head()
df.describe()
# The number of each label in the dataset
df.label.value_counts()
"""
Explanation: Extra 2.1 - Unbalanced Data - Application 1: ProvStore Documents
Identifying owners of provenance documents from their provenance network metrics.
In this notebook, we compared the classification accuracy on unbalanced (original) ProvStore dataset vs that on a balanced ProvStore dataset.
Goal: To determine if the provenance network analytics method can identify the owner of a provenance document from its provenance network metrics.
Training data: In order to ensure that there are sufficient samples to represent a user's provenance documents the Training phase, we limit our experiment to users who have at least 20 documents. There are fourteen such users (the authors were excluded to avoid bias), who we named $u_{1}, u_{2}, \ldots, u_{14}$. Their numbers of documents range between 21 and 6,745, with the total number of documents in the data set is 13,870.
Classification labels: $\mathcal{L} = \left{ u_1, u_2, \ldots, u_{14} \right} $, where $l_{x} = u_i$ if the provenance document $x$ belongs to user $u_i$. Hence, there are 14 labels in total.
Reading data
For each provenance document, we calculate the 22 provenance network metrics. The dataset provided contains those metrics values for 13,870 provenance documents along with the owner identifier (i.e. $u_{1}, u_{2}, \ldots, u_{14}$).
End of explanation
"""
from analytics import test_classification
"""
Explanation: Classification on unbalanced (original) data
End of explanation
"""
results, importances = test_classification(df)
"""
Explanation: Cross Validation tests: We now run the cross validation tests on the dataset (df) using all the features (combined), only the generic network metrics (generic), and only the provenance-specific network metrics (provenance). Please refer to Cross Validation Code.ipynb for the detailed description of the cross validation code.
End of explanation
"""
from analytics import balance_smote
"""
Explanation: ## Classification on balanced data
End of explanation
"""
df = balance_smote(df)
results_bal, importances_bal = test_classification(df)
"""
Explanation: Balancing the data
With an unbalanced like the above, the resulted trained classifier will typically be skewed towards the majority labels. In order to mitigate this, we balance the dataset using the SMOTE Oversampling Method.
End of explanation
"""
|
tpin3694/tpin3694.github.io | neural-networks/mnist_nn.ipynb | mit | %matplotlib inline
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
# Load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print(X_train.shape)
print(y_train.shape)
"""
Explanation: Title: Artificial Neural Network in Keras
Slug: ann-mnist
Summary: A Feed Forward Artificial Nerual Netowrk in Keras using the MNIST Digits Dataset
Date: 2017-01-3 19:23
Category: Neural Networks
Tags: Basics
Authors: Thomas Pinder
Introduction
The Keras library is a very popular library for implementing a range of different types of neural networks. In this guide we'll focus on a fully connected neural network using the popular MNIST dataset. We'll also be validating our results using k-fold cross validation: the gold standard for assessing a model's performance. By the end of this guide you should be comfortable doing the following:
Implementing a fully connected neural network
Applying different activation functions
Adding and removing layers from your network
Interpreting your cross validated results
If you're not familiar with activation functions already you may wish to brush up on these here.
Preliminaries
Before we can do any modeling we must first load in the necessary libraries and the MNIST data. Fortunately, the MNIST data is provided as part of the Keras library and is around 15mb. Installation details for Keras can be found here, however the command pip install keras should work provided you do not want any of the dependencies related to Keras.
End of explanation
"""
plt.imshow(X_train[123], cmap=plt.get_cmap('gray'))
"""
Explanation: For those of you unfamiliar with the MNIST dataset, it is a set of 70000, 28x28 pixel images depicting handwritten digits from 0-9. It is a commonly used dataset within image classification tutorials due to to its clean nature and uniform structure meaning that very little time must be spent wrangling the data.
Preprocessing
To get a feel for the data, we can first plot an image to physically see what we're modeling:
End of explanation
"""
def array_reshape(X, output_size):
X = X.reshape(output_size[0],output_size[1])
return X
X_train = array_reshape(X_train, [60000, 28**2])
X_test = array_reshape(X_test, [10000, 28**2])
"""
Explanation: This may actually be a harder observation for our network to classify as it is likely a 7, however, it could in fact be a 2 or 3 with the lower segment cropped. In any case, this gives us a feel for the type of images that we are modeling.
As can be seen from the dimension of the X_train dataset, we currently have a 3-dimensional array. This array needs to be flattened prior to passing the data through our neural network. This can be achieved through the NumPy function reshape() which does what it says on the tin - takes an array and reshapes it whilst preserving the original data.
End of explanation
"""
def standardise_array(X, multiplier=1):
X = (X-np.mean(X))/(multiplier*np.std(X))
return X
X_train = standardise_array(X_train)
X_test = standardise_array(X_test)
"""
Explanation: With the data reshaped, the only remaining steps are to standardise the data with mean 0 and 1 standard deviation and encode the output variable. Whilst not entirely necessary as each columns holds values within the same range (0 and 255), it will nonetheless make backpropogation more efficient and avoid saturation. If you're interested in reading more on this then I recommend these two articles here and here. Whilst there is a preprocessing() function available in sklearn, we'll implement our own here just demonstrate the point ore verbosely.
End of explanation
"""
y_train_enc = to_categorical(y_train)
y_test_enc = to_categorical(y_test)
"""
Explanation: The argument for a multiplier has been included here as you may wish to divide through by twice the standard deviation, as is the recommendation here. As for encoding the labels, we're going to apply one-hot encoding to them. What this means is the a k-leveled variable is transformed into k individual boolean columns, in our case k=10, so 10 new columns created, the first indicating if the image is a zero or not, the second if it is a one and so forth. You may be wondering why we must do this additional step and the reason is simply that if we do not, the model will assume the outputs are ordered, so 1 is greater than 0 and so forth, often leading to poor predictions from the model. This one-hot encoding can be achieved through the Keras function to_categorical().
End of explanation
"""
model = Sequential()
model.add(Dense(units = 32, activation = "relu", input_dim = 28**2, kernel_initializer = "normal"))
model.add(Dense(units = 12, activation = "relu", kernel_initializer='normal'))
model.add(Dense(units = 10, activation = "softmax", kernel_initializer='normal'))
model.compile(loss = "categorical_crossentropy",
optimizer = "adam",
metrics = ["accuracy"])
"""
Explanation: Building The Model
With the data now correctly preprocessed, we can begin building and training our neural network. In its simplest form, Keras works by initially by defining your network type and from there building layers into the network. With this defined, the model can be compiled with the addition of a loss and optimisation function. In our case we're going to be build a simple fully-connected model with two hidden layers. The term fully-connected means that every pair of nodes in adjacent layers is connected. The network type being used here is a Sequential network, meaning that the entire network is a linear stack of layers. The two hidden layers utilise the ReLU activation function (discussed in greater depth [here])(runningthenumbers.co.uk/neural-networks/activation-functions.html)) along with the softmax function in the final layer. With all that said, get into building our network.
End of explanation
"""
np.random.seed(123)
model.fit(X_train, y_train_enc, epochs=50, batch_size=32, verbose=0)
"""
Explanation: The kernel_initializer argument initialises the sets of weights between each layer to be random draws from the normal distribution. When you build your own model you should not be too prescriptive in your choices of units and activation functions in your first two layers, instead trying out different sizes and function. The input_dimension and units arguments in the firs and final layer respectively should be kept constant however as these are dictated by the shape of the data.
Training
With a model compiled it should trained. In Keras this is a very intuitive process, with the user only needing to define the training data, epochs (number forward and backward propogation passes) and the batch size (the number of samples to pass through the network). Generally the more epochs the better, although there will be diminishing returns at a point. I generally start with 50 epochs and increase the number if necessary. As for batch size, 32 is generally considered a good starting point, though a larger batch size will result in a faster training time but converge slower, with the converse being true for smaller batch sizes. It is important to train the model using batches as failure to do so can result in excessive memory usage.
End of explanation
"""
accuracy = model.evaluate(X_test, y_test_enc, verbose=0)
print("The model has {}% accuracy on unseen testing data".format(np.round(accuracy[1]*100, 1)))
"""
Explanation: You'll notice in the above code snippet that verbose=0, I have done this to keep the tutorial clean, however you may want to set verbose=1 when running yours as you'll get useful output regarding the model's accuracy. Should you re-run this exact guide, you'll notice that the model's accuracy on the training data begins to plateau around 40 epochs.
Testing
With a model trained, the final step is to make some predictions on our testing data and assess the true accuracy of the model by running it on unseen data.
End of explanation
"""
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.wrappers.scikit_learn import KerasClassifier
# Set the seed again for reproducabilty
np.random.seed(123)
# Create our entire arrays
X = np.vstack((X_train, X_test))
y = np.vstack((y_train_enc, y_test_enc))
# Wrap our model inside a function
def mnist_nn():
model = Sequential()
model.add(Dense(units = 32, activation = "relu", input_dim = 28**2, kernel_initializer = "normal"))
model.add(Dense(units = 12, activation = "relu", kernel_initializer='normal'))
model.add(Dense(units = 10, activation = "softmax", kernel_initializer='normal'))
model.compile(loss = "categorical_crossentropy",
optimizer = "adam",
metrics = ["accuracy"])
return model
# Place an Sklearn wrapper around our Keras Network
clf = KerasClassifier(build_fn=mnist_nn, epochs = 50, batch_size=32, verbose=0)
# Define the number of folds to be made
folds = KFold(n_splits=10, random_state=123)
# Run Cross-validation
accuracies = cross_val_score(clf, X, y, cv=folds)
# Obtain our final model's metrics
final_accuracy = np.mean(accuracies)
s_error = np.std(accuracies*100)/np.sqrt(10)
print("Final Accuracy: {}, with standard error: {}".format(np.round(final_accuracy*100,1), np.round(s_error, 1)))
"""
Explanation: An accuracy as high as this is by all standards very good, however some tuning of our networks can increase this further, although that is beyond the scope of this guide.
K-Fold Cross-Validation
As discussed in the introduction to this guide, the true accuracy of this network will be assessed through cross-validation. To implement this, we can make use of the SKFold function in the sklearn library. K-fold cross validation works by dividing the data up into k partitions and using k-1 of the partitions as training data and the final partition as testing data. This process is repeated k times, with each of the partitions being used once as the testing data. It has been found that when k=10, the best balance of variance and bias is found in cross validation, so it will therefore be used here. To do this in Keras we must present our model in a function. We will also have to stack the data as cross-validation is run on the entire dataset. Cross-validation can be quite time consuming as for each data partition, a new model will have to be defined and trained before being fitted, however, this should give you a chunk of code with everything needed to run your own neural network later.
End of explanation
"""
|
msampathkumar/datadriven_pumpit | pumpit/save/BenchMarkSeed_0.8118.ipynb | apache-2.0 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
np.random.seed(69572)
%matplotlib inline
%load_ext writeandexecute
# plt.figure(figsize=(120,10))
small = (4,3)
mid = (10, 8)
large = (12, 8)
"""
Explanation: PUMP IT
Using data from Taarifa and the Tanzanian Ministry of Water, can you predict which pumps are functional, which need some repairs, and which don't work at all? This is an intermediate-level practice competition. Predict one of these three classes based on a number of variables about what kind of pump is operating, when it was installed, and how it is managed. A smart understanding of which waterpoints will fail can improve maintenance operations and ensure that clean, potable water is available to communities across Tanzania.
An interactive course exploring this dataset is currently offered by DataCamp.com!
Competition End Date: Jan. 28, 2017, 11:59 p.m.
This competition is for learning and exploring, so the deadline may be extended in the future.
Git Hub Repo
Git Hub Report
Features Details
Import Libraries
End of explanation
"""
from __future__ import absolute_import
from IPython.core.getipython import get_ipython
from IPython.core.magic import (Magics, magics_class, cell_magic)
import sys
from StringIO import StringIO
from markdown import markdown
from IPython.core.display import HTML
@magics_class
class MarkdownMagics(Magics):
@cell_magic
def asmarkdown(self, line, cell):
buffer = StringIO()
stdout = sys.stdout
sys.stdout = buffer
try:
exec(cell, locals(), self.shell.user_ns)
except:
sys.stdout = stdout
raise
sys.stdout = stdout
return HTML("<p>{}</p>".format(markdown(buffer.getvalue(), extensions=['markdown.extensions.extra'])))
return buffer.getvalue() + 'test'
get_ipython().register_magics(MarkdownMagics)
"""
Explanation: Custom Functions
MarkUP Fns
End of explanation
"""
def raw_markup_value_counts(dataframe, max_print_value_counts=30, show_plots=False):
'''
prints value counts of each feature in data frame
'''
mydf = pd.DataFrame.copy(dataframe)
i = 0
raw_markup_data = []
pp = raw_markup_data.append
pp('''|Col ID|Col Name|UniqCount|Col Values|UniqValCount|''')
pp('''|------|--------|---------|----------|------------|''')
for col in mydf.dtypes.index:
i += 1
sam = mydf[col]
tmp = len(sam.value_counts())
if tmp < max_print_value_counts:
flag = True
for key, val in dict(sam.value_counts()).iteritems():
if flag:
pp('|%i|%s|%i|%s|%s|' % (
i, col, len(sam.value_counts()), key, val))
flag = False
else:
pp('||-|-|%s|%s|' % (key, val))
if show_plots:
plt.figure(i)
ax = sam.value_counts().plot(kind='barh', figsize=(12, 5))
_ = plt.title(col.upper())
_ = plt.xlabel('counts')
else:
pp('|%i|%s|%i|||' % (i, col, len(sam.value_counts())))
return raw_markup_data
"""
Explanation: DataFrame Value Counts
End of explanation
"""
from __future__ import division
import itertools
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def confusion_maxtrix_stuff(y_test, y_pred, class_names):
'''
Example
>>> confusion_maxtrix_stuff(y_test,
y_pred,
class_names=RAW_y.status_group.value_counts().keys()
):
'''
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure(figsize=(8,8))
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure(figsize=(8,8))
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
"""
Explanation: Confusion Matrix
End of explanation
"""
RAW_X = pd.read_csv('traning_set_values.csv', index_col='id')
RAW_y = pd.read_csv('training_set_labels.csv', index_col='id')
RAW_TEST_X = pd.read_csv('test_set_values.csv', index_col='id')
"""
Explanation: Import & Explore Data
End of explanation
"""
from datetime import datetime
strptime = datetime.strptime
DATE_FORMAT = "%Y-%m-%d"
REFERENCE_DATE_POINT = strptime('2000-01-01', DATE_FORMAT)
# Reducing geo location precision to 11 meters
LONG_LAT_PRECISION = 0.001
def sam_datetime_to_number(x):
return (strptime(str(x), DATE_FORMAT) - REFERENCE_DATE_POINT).days
# Transforming Date to Int.
if RAW_X.date_recorded.dtype == 'O':
RAW_X.date_recorded = RAW_X.date_recorded.map(sam_datetime_to_number)
RAW_TEST_X.date_recorded = RAW_TEST_X.date_recorded.map(sam_datetime_to_number)
# Filling Missing/OUTLIAR Values
_ = np.mean(RAW_X[u'latitude'][RAW_X.latitude < -1.0].values)
if not RAW_X.loc[RAW_X.latitude >= -1.0, u'latitude'].empty:
RAW_X.loc[RAW_X.latitude >= -1.0, u'latitude'] = _
RAW_TEST_X.loc[RAW_TEST_X.latitude >= -1.0, u'latitude'] = _
# Filling Missing/OUTLIAR Values
_ = np.mean(RAW_X[u'longitude'][RAW_X[u'longitude'] > 1.0].values)
if not RAW_X.loc[RAW_X[u'longitude'] <= 1.0, u'longitude'].empty:
RAW_X.loc[RAW_X[u'longitude'] <= 1.0, u'longitude'] = _
RAW_TEST_X.loc[RAW_TEST_X[u'longitude'] <= 1.0, u'longitude'] = _
# Reducing Precision of Lat.
if RAW_X.longitude.mean() < 50:
RAW_X.longitude = RAW_X.longitude // LONG_LAT_PRECISION
RAW_X.latitude = RAW_X.latitude // LONG_LAT_PRECISION
RAW_TEST_X.longitude = RAW_TEST_X.longitude // LONG_LAT_PRECISION
RAW_TEST_X.latitude = RAW_TEST_X.latitude // LONG_LAT_PRECISION
# Filling Missing/OUTLIAR Values
if RAW_X.public_meeting.dtype != 'bool':
RAW_X.public_meeting = RAW_X.public_meeting == True
RAW_TEST_X.public_meeting = RAW_TEST_X.public_meeting == True
if RAW_X.permit.dtype != 'bool':
RAW_X.permit = RAW_X.permit == True
RAW_TEST_X.permit = RAW_TEST_X.permit == True
if list(RAW_TEST_X.dtypes[RAW_TEST_X.dtypes != RAW_X.dtypes]):
raise Exception('RAW_X.dtypes and RAW_TEST_X.dtypes are not in Sync')
"""
Explanation: Pre Processing
Log_Lat_Help: Link
Num Data Tranformation
date_recorded --> Int
longitude --> Float(less precision)
latitude --> Float(less precision)
public_meeting --> Bool
permit --> Bool
End of explanation
"""
def text_transformation(name):
if name:
name = name.lower().strip()
name = ''.join([i if 96 < ord(i) < 128 else ' ' for i in name])
if 'and' in name:
name = name.replace('and', ' ')
if '/' in name:
name = name.replace('/', ' ')
while ' ' in name:
name = name.replace(' ', ' ')
return name.strip()
return
for col in RAW_X.dtypes[RAW_X.dtypes == object].index:
aa = len(RAW_X[col].unique())
RAW_X[col] = RAW_X[col].fillna('').apply(text_transformation)
RAW_TEST_X[col] = RAW_TEST_X[col].fillna('').apply(text_transformation)
bb = len(RAW_X[col].unique())
if aa != bb:
print col, aa, bb
"""
Explanation: Text Data Tranformations
End of explanation
"""
from collections import defaultdict
from sklearn import preprocessing
# http://stackoverflow.com/questions/24458645/label-encoding-across-multiple-columns-in-scikit-learn
d = defaultdict(preprocessing.LabelEncoder)
# Labels Fit
sam = pd.concat([RAW_X, RAW_TEST_X]).apply(lambda x: d[x.name].fit(x))
# Labels Transform - Training Data
X = RAW_X.apply(lambda x: d[x.name].transform(x))
TEST_X = RAW_TEST_X.apply(lambda x: d[x.name].transform(x))
le = preprocessing.LabelEncoder().fit(RAW_y[u'status_group'])
y = le.transform(RAW_y[u'status_group'])
# g = sns.PairGrid(X[:1000])
# g.map(plt.scatter);
"""
Explanation: Cols vs Uniq distribution
Data Distribution
Vector Transformation
Feature Selection:
http://machinelearningmastery.com/feature-selection-machine-learning-python/
End of explanation
"""
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
test = SelectKBest(score_func=chi2, k=30)
fit = test.fit(X, y)
cols_names = RAW_X.columns
np.set_printoptions(precision=2)
print(fit.scores_), len(fit.scores_)
col_importances = list(zip(fit.scores_, cols_names))
col_importances.sort(reverse=True)
selected_cols = [_[-1] for _ in col_importances[:30] ]
features = pd.DataFrame(fit.transform(X))
features.columns = selected_cols
print len(X.columns), features.shape, len(y)
X = pd.DataFrame(fit.transform(X))
TEST_X = pd.DataFrame(fit.transform(TEST_X))
X.columns = selected_cols
TEST_X.columns = selected_cols
"""
Explanation: UniVariate Analysis
End of explanation
"""
from sklearn.decomposition import PCA
# feature extraction
pca = PCA(n_components=18)
fit = pca.fit(X)
plt.scatter (range(len(fit.explained_variance_ratio_)), fit.explained_variance_ratio_.cumsum())
X = pca.transform(X)
TEST_X = pca.transform(TEST_X)
"""
Explanation: PCA
End of explanation
"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)
# X_train, X_test, y_train, y_test = train_test_split(features, y, test_size=0.25, random_state=42, stratify=y)
"""
Explanation: Test-Train Split
End of explanation
"""
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100, class_weight="balanced_subsample", n_jobs=-1)
# class_weight="balanced_subsample"/"balanced"
# criterion="gini"/"entropy"
clf
clf = clf.fit(X_train, y_train)
pred = clf.predict_proba(X_test)
clf.score(X_test, y_test) # 0.79303132333435367 # 0.80252525252525253 # 0.80303030303030298 # 0.80345117845117842
# 0.79814814814814816
# (n_estimators=100, class_weight="balanced_subsample", n_jobs=-1) 0.80782828282828278
# (n_estimators=100, class_weight="balanced_subsample", n_jobs=-1) 0.81186868686868685
clf?
plt.figure(figsize=(12, 3))
# making importance relative
a, b = min(clf.feature_importances_), max(clf.feature_importances_)
cols_imp = (clf.feature_importances_ - a) /b
_ = plt.scatter(range(30), cols_imp)
_ = plt.plot((0, 29), (0.05,0.05), '-r')
_ = plt.xlabel('Columns')
_ = plt.ylabel('Relative Col Importance')
"""
Explanation: Model Training
Random Forest
End of explanation
"""
from sklearn import metrics
print map(lambda x: len(x), [X_test, y_test])
clf.score(X_test, y_test) # 0.79303132333435367 # 0.80252525252525253 # 0.80303030303030298 # 0.80345117845117842
print .79303132333435367 - 0.80345117845117842
print .8285 - 0.80345117845117842, .8285 - .79303132333435367
"""
Explanation: Scoring
Random Forest Score
End of explanation
"""
test_ids = RAW_TEST_X.index
predictions = clf.predict(TEST_X)
print (predictions.shape)
predictions_labels = le.inverse_transform(predictions)
# sub = pd.DataFrame(predictions, columns=list(le.classes_))
sub = pd.DataFrame(predictions_labels, columns=['status_group'])
sub.head()
sub.insert(0, 'id', test_ids)
sub.reset_index()
sub.to_csv('submit.csv', index = False)
sub.head()
X.shape
"""
Explanation: XGBOOST
Submission
End of explanation
"""
|
jorisvandenbossche/DS-python-data-analysis | notebooks/pandas_08_reshaping_data.ipynb | bsd-3-clause | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
"""
Explanation: <p><font size="6"><b>07 - Pandas: Tidy data and reshaping</b></font></p>
© 2021, Joris Van den Bossche and Stijn Van Hoey (jorisvandenbossche@gmail.com, stijnvanhoey@gmail.com). Licensed under CC BY 4.0 Creative Commons
End of explanation
"""
data = pd.DataFrame({
'WWTP': ['Destelbergen', 'Landegem', 'Dendermonde', 'Eeklo'],
'Treatment A': [8.0, 7.5, 8.3, 6.5],
'Treatment B': [6.3, 5.2, 6.2, 7.2]
})
data
"""
Explanation: Tidy data
meltcan be used to make a dataframe longer, i.e. to make a tidy version of your data. In a tidy dataset (also sometimes called 'long-form' data or 'denormalized' data) each observation is stored in its own row and each column contains a single variable:
Consider the following example with measurements in different Waste Water Treatment Plants (WWTP):
End of explanation
"""
pd.melt(data) #, id_vars=["WWTP"])
data_long = pd.melt(data, id_vars=["WWTP"],
value_name="pH", var_name="Treatment")
data_long
"""
Explanation: This data representation is not "tidy":
Each row contains two observations of pH (each from a different treatment)
'Treatment' (A or B) is a variable not in its own column, but used as column headers
Melt - from wide to long/tidy format
We can melt the data set to tidy the data:
End of explanation
"""
data_long.groupby("Treatment")["pH"].mean() # switch to `WWTP`
sns.catplot(data=data, x="WWTP", y="...", hue="...", kind="bar") # this doesn't work that easily
sns.catplot(data=data_long, x="WWTP", y="pH",
hue="Treatment", kind="bar") # switch `WWTP` and `Treatment`
"""
Explanation: The usage of the tidy data representation has some important benefits when working with groupby or data visualization libraries such as Seaborn:
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data1.py
# %load _solutions/pandas_08_reshaping_data2.py
"""
Explanation: Exercise with energy consumption data
To practice the "melt" operation, we are going to use a dataset from Fluvius (who operates and manages the gas and elektricity networks in Flanders) about the monthly consumption of elektricity and gas in 2021 (https://www.fluvius.be/sites/fluvius/files/2021-10/verbruiksgegevens-per-maand.xlsx).
This data is available as an Excel file.
<div class="alert alert-success">
**EXERCISE 1**:
* Read the "verbruiksgegevens-per-maand.xlsx" file (in the "data/" directory) into a DataFrame `df`.
* Drop the "Regio" column (this column has a constant value "Regio 1" and thus is not that interesting).
<details><summary>Hints</summary>
- Reading Excel files can be done with the `pd.read_excel()` function, passing the path to the file as first argument.
- To drop a column, use the `columns` keyword in the `drop()` method.
</details>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data3.py
"""
Explanation: <div class="alert alert-success">
**EXERCISE 2**:
The actual data (consumption numbers) is spread over multiple columns: one column per month. Make a tidy version of this dataset with a single "consumption" column, and an additional "time" column.
Make sure to keep the "Hoofdgemeente", "Energie" and "SLP" columns in the data set. The "SLP" column contains additional categories about the type of elektricity or gas consumption (eg household vs non-household consumption).
Use `pd.melt()` to create a long or tidy version of the dataset, and call the result `df_tidy`.
<details><summary>Hints</summary>
- If there are columns in the original dataset that you want to keep (with repeated values), pass those names to the `id_vars` keyword of `pd.melt()`.
- You can use the `var_name` and `value_name` keywords to directly specify the column names to use for the new variable and value columns.
</details>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data4.py
"""
Explanation: <div class="alert alert-success">
**EXERCISE 3**:
Convert the "time" column to a column with a datetime data type using `pd.to_datetime`.
<details><summary>Hints</summary>
* When using `pd.to_datetime`, remember to specify a `format`.
</details>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data5.py
# %load _solutions/pandas_08_reshaping_data6.py
"""
Explanation: <div class="alert alert-success">
**EXERCISE 4**:
* Calculate the total consumption of elektricity and gas over all municipalities ("Hoofdgemeente") for each month. Assign the result to a dataframe called `df_overall`.
* Using `df_overall`, make a line plot of the consumption of elektricity vs gas over time.
* Create a separate subplot for elektricity and for gas, putting them next to each other.
* Ensure that the y-limit starts at 0 for both subplots.
<details><summary>Hints</summary>
* If we want to sum the consumption over all municipalities that means we should _not_ include this variable in the groupby keys. On the other hand, we want to calculate the sum *for each* month ("time") and *for each* category of elektricity/gas ("Energie").
* Creating a line plot with seaborn can be done with `sns.relplot(..., kind="line")`.
* If you want to split the plot into multiple subplots based on a variable, check the `row` or `col` keyword.
* The `sns.relplot` returns a "facet grid" object, and you can change an element of each of the subplots of this object using the `set()` method of this object. To set the y-limits, you can use the `ylim` keyword.
</details>
</div>
End of explanation
"""
excelample = pd.DataFrame({'Month': ["January", "January", "January", "January",
"February", "February", "February", "February",
"March", "March", "March", "March"],
'Category': ["Transportation", "Grocery", "Household", "Entertainment",
"Transportation", "Grocery", "Household", "Entertainment",
"Transportation", "Grocery", "Household", "Entertainment"],
'Amount': [74., 235., 175., 100., 115., 240., 225., 125., 90., 260., 200., 120.]})
excelample
excelample_pivot = excelample.pivot(index="Category", columns="Month", values="Amount")
excelample_pivot
"""
Explanation: Pivoting data
Cfr. excel
People who know Excel, probably know the Pivot functionality:
The data of the table:
End of explanation
"""
# sum columns
excelample_pivot.sum(axis=1)
# sum rows
excelample_pivot.sum(axis=0)
"""
Explanation: Interested in Grand totals?
End of explanation
"""
df = pd.DataFrame({'Fare': [7.25, 71.2833, 51.8625, 30.0708, 7.8542, 13.0],
'Pclass': [3, 1, 1, 2, 3, 2],
'Sex': ['male', 'female', 'male', 'female', 'female', 'male'],
'Survived': [0, 1, 0, 1, 0, 1]})
df
df.pivot(index='Pclass', columns='Sex', values='Fare')
df.pivot(index='Pclass', columns='Sex', values='Survived')
"""
Explanation: Pivot is just reordering your data:
Small subsample of the titanic dataset:
End of explanation
"""
df = pd.read_csv("data/titanic.csv")
df.head()
"""
Explanation: So far, so good...
Let's now use the full titanic dataset:
End of explanation
"""
try:
df.pivot(index='Sex', columns='Pclass', values='Fare')
except Exception as e:
print("Exception!", e)
"""
Explanation: And try the same pivot (no worries about the try-except, this is here just used to catch a loooong error):
End of explanation
"""
df.loc[[1, 3], ["Sex", 'Pclass', 'Fare']]
"""
Explanation: This does not work, because we would end up with multiple values for one cell of the resulting frame, as the error says: duplicated values for the columns in the selection. As an example, consider the following rows of our three columns of interest:
End of explanation
"""
df = pd.read_csv("data/titanic.csv")
df.pivot_table(index='Sex', columns='Pclass', values='Fare')
"""
Explanation: Since pivot is just restructering data, where would both values of Fare for the same combination of Sex and Pclass need to go?
Well, they need to be combined, according to an aggregation functionality, which is supported by the functionpivot_table
<div class="alert alert-danger">
<b>NOTE</b>:
<ul>
<li><b>Pivot</b> is purely restructering: a single value for each index/column combination is required.</li>
</ul>
</div>
Pivot tables - aggregating while pivoting
End of explanation
"""
df.pivot_table(index='Sex', columns='Pclass',
values='Fare', aggfunc='max')
df.pivot_table(index='Sex', columns='Pclass',
values='Fare', aggfunc='count')
"""
Explanation: <div class="alert alert-info">
<b>REMEMBER</b>:
* By default, `pivot_table` takes the **mean** of all values that would end up into one cell. However, you can also specify other aggregation functions using the `aggfunc` keyword.
</div>
End of explanation
"""
pd.crosstab(index=df['Sex'], columns=df['Pclass'])
"""
Explanation: <div class="alert alert-info">
<b>REMEMBER</b>:
<ul>
<li>There is a shortcut function for a <code>pivot_table</code> with a <code>aggfunc='count'</code> as aggregation: <code>crosstab</code></li>
</ul>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data7.py
# %load _solutions/pandas_08_reshaping_data8.py
"""
Explanation: Exercises
<div class="alert alert-success">
<b>EXERCISE 5</b>:
<ul>
<li>Make a pivot table with the survival rates for Pclass vs Sex.</li>
</ul>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data9.py
# %load _solutions/pandas_08_reshaping_data10.py
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE 6</b>:
<ul>
<li>Make a table of the median Fare payed by aged/underaged vs Sex.</li>
</ul>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data11.py
# %load _solutions/pandas_08_reshaping_data12.py
"""
Explanation: <div class="alert alert-success">
**EXERCISE 7**:
A pivot table aggregates values for each combination of the new row index and column values. That reminds of the "groupby" operation.
Can you mimick the pivot table of the first exercise (a pivot table with the survival rates for Pclass vs Sex) using `groupby()`?
</div>
End of explanation
"""
df = pd.DataFrame({'A':['one', 'one', 'two', 'two'],
'B':['a', 'b', 'a', 'b'],
'C':range(4)})
df
"""
Explanation: Reshaping with stack and unstack
The docs say:
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
Indeed...
<img src="../img/pandas/schema-stack.svg" width=50%>
Before we speak about hierarchical index, first check it in practice on the following dummy example:
End of explanation
"""
df = df.set_index(['A', 'B']) # Indeed, you can combine two indices
df
result = df['C'].unstack()
result
df = result.stack().reset_index(name='C')
df
"""
Explanation: To use stack/unstack, we need the values we want to shift from rows to columns or the other way around as the index:
End of explanation
"""
df = pd.read_csv("data/titanic.csv")
df.head()
"""
Explanation: <div class="alert alert-info">
<b>REMEMBER</b>:
<ul>
<li><b>stack</b>: make your data <i>longer</i> and <i>smaller</i> </li>
<li><b>unstack</b>: make your data <i>shorter</i> and <i>wider</i> </li>
</ul>
</div>
Mimick pivot table
To better understand and reason about pivot tables, we can express this method as a combination of more basic steps. In short, the pivot is a convenient way of expressing the combination of a groupby and stack/unstack.
End of explanation
"""
df.pivot_table(index='Pclass', columns='Sex',
values='Survived', aggfunc='mean')
"""
Explanation: Exercises
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data13.py
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE 8</b>:
<ul>
<li>Get the same result as above based on a combination of `groupby` and `unstack`</li>
<li>First use `groupby` to calculate the survival ratio for all groups`unstack`</li>
<li>Then, use `unstack` to reshape the output of the groupby operation</li>
</ul>
</div>
End of explanation
"""
cast = pd.read_csv('data/cast.csv')
cast.head()
titles = pd.read_csv('data/titles.csv')
titles.head()
"""
Explanation: [OPTIONAL] Exercises: use the reshaping methods with the movie data
These exercises are based on the PyCon tutorial of Brandon Rhodes (so credit to him!) and the datasets he prepared for that. You can download these data from here: titles.csv and cast.csv and put them in the /notebooks/data folder.
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data14.py
# %load _solutions/pandas_08_reshaping_data15.py
# %load _solutions/pandas_08_reshaping_data16.py
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE 9</b>:
<ul>
<li>Plot the number of actor roles each year and the number of actress roles each year over the whole period of available movie data.</li>
</ul>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data17.py
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE 10</b>:
<ul>
<li>Plot the number of actor roles each year and the number of actress roles each year. Use kind='area' as plot type</li>
</ul>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data18.py
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE 11</b>:
<ul>
<li>Plot the fraction of roles that have been 'actor' roles each year over the whole period of available movie data.</li>
</ul>
</div>
End of explanation
"""
# %load _solutions/pandas_08_reshaping_data19.py
# %load _solutions/pandas_08_reshaping_data20.py
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE 12</b>:
<ul>
<li>Define a year as a "Superman year" when films of that year feature more Superman characters than Batman characters. How many years in film history have been Superman years?</li>
</ul>
</div>
End of explanation
"""
|
char-lie/physical-informatics | Lab1/lab1.ipynb | mit | MU = 3.9
N = int(10E4)
INITIAL = 0.5
MIN_SIZE = 2
MAX_SIZE = 26
BITS_RANGE = array(list(range(MIN_SIZE, MAX_SIZE + 1)))
def generate(x, mu, n):
current = x
for _ in range(n):
yield current
current = mu * current * (1 - current)
def bin_to_dec(sequence, bits):
aligned_sequence = sequence.flatten()[:sequence.size - sequence.size % bits]
binary_matrix = aligned_sequence.reshape((sequence.size // bits, bits))
result_sequence = array([binary_matrix[:,i] * (2**i) for i in range(binary_matrix.shape[1])])
return result_sequence.sum(axis=0)
def R(probabilities, beta):
return -log((probabilities[probabilities>0]**beta).sum()) / (beta - 1)
def H(probabilities):
indices = probabilities>0
return -(log(probabilities[indices]) * probabilities[indices]).sum()
def show_entropies(entropies, errors=None, count=None, labels=None):
for entropy, label in zip(entropies, labels):
if count is None:
plt.plot(BITS_RANGE, entropy, '-o', label=label)
else:
plt.plot(BITS_RANGE[:count], entropy, '-o', label=label)
if errors is not None:
plt.errorbar(**errors)
plt.legend()
plt.show()
"""
Explanation: Initial conditions
End of explanation
"""
source_sequence = array(list(generate(INITIAL, MU, N)))
bin_sequence = (source_sequence > 0.5).astype('i')
sequences = array([bin_to_dec(bin_sequence, bits_count).astype('i') for bits_count in BITS_RANGE])
probabilities = array([bincount(sequence)/sequence.size for sequence in sequences])
probabilities[1]
plt.plot(source_sequence[:100])
plt.show()
"""
Explanation: Generate a sequence
Generate long sequence of real numbers
For further analysis it's handy to convert it to binary array
Then we can get different sequences of integers by splitting binary array into blocks
End of explanation
"""
shannon_entropies = [H(p) for p in probabilities]
renyi_entropies2 = [R(p, 2) for p in probabilities]
renyi_entropies3 = [R(p, 3) for p in probabilities]
"""
Explanation: Entropies
We need to calculate different entropies for each sequence
End of explanation
"""
show_entropies([shannon_entropies, renyi_entropies2, renyi_entropies3], labels=['Shannon', 'Renyi (2)', 'Renyi (3)'])
"""
Explanation: Now we have mappings from binary block size to entropies of sequences
End of explanation
"""
means = array([shannon_entropies, renyi_entropies2, renyi_entropies3]).mean(axis=0)
show_entropies([shannon_entropies, renyi_entropies2, renyi_entropies3, means], labels=['Shannon', 'Renyi (2)', 'Renyi (3)', 'Mean'])
"""
Explanation: Now we will calculate average entropy for each block size
End of explanation
"""
shannon_entropies_chaos = [n*log(2) for n in BITS_RANGE]
renyi_entropies2_chaos = [-(n*log(2)-2*n*log(2)) for n in BITS_RANGE]
renyi_entropies3_chaos = [-(n*log(2)-3*n*log(2))/2 for n in BITS_RANGE]
show_entropies([shannon_entropies, renyi_entropies2, renyi_entropies3, means, shannon_entropies_chaos], labels=['Shannon', 'Renyi (2)', 'Renyi (3)', 'Mean', 'Uniform entropy'])
"""
Explanation: Get same entropies for chaotic sequence
Sequences of uniformly distributed numbers with same binary sizes as existent sequences have the biggest possible entropies
End of explanation
"""
standard_deviation = array([shannon_entropies, renyi_entropies2, renyi_entropies3]).std(axis=0)
show_entropies([shannon_entropies, renyi_entropies2, renyi_entropies3], labels=['Shannon', 'Renyi (2)', 'Renyi (3)'], errors={'x': BITS_RANGE, 'y': means, 'yerr': standard_deviation, 'label': 'Mean'})
"""
Explanation: To get only needed numbers we need to calculate standard deviations
It will show us how good the average value estimates entropy of the source
End of explanation
"""
count = standard_deviation.argmax()
a, b = lstsq(vstack([BITS_RANGE[:count], ones(count)]).T, means[:count])[0]
approximation_error = array([means[:count], a*BITS_RANGE[:count]+b]).std(axis=0)
show_entropies([shannon_entropies[:count], renyi_entropies2[:count], renyi_entropies3[:count], means[:count]], labels=['Shannon', 'Renyi (2)', 'Renyi (3)', 'Mean'], errors={'x': BITS_RANGE[:count], 'y': a*BITS_RANGE[:count]+b, 'yerr': approximation_error, 'label': 'Linear approximation'}, count=count)
show_entropies([shannon_entropies_chaos[:count], means[:count]], labels=['Uniform entropy', 'Mean'], errors={'x': BITS_RANGE[:count], 'y': a*BITS_RANGE[:count]+b, 'yerr': approximation_error, 'label': 'Linear approximation'}, count=count)
"""
Explanation: Analysis
We can see from the chart that standard deviation grows to some moment and then falls down
Experiments with other source sequences have shown that the point with maximal standard deviation moves to right with sequence size growth
This can mean that before that moment we can gather data which is good for entropy estimate and after that we have not enough numbers for estimate
For example, when block size is equal to sequence length, entropy will be zero but source generates different pseudorandom numbers
Estimate
Entropy of the source should be calculated as
H = lim(n to infinity) Hn/n
where n is a block size and Hn is an entropy of the sequence with size of block equal to n
Here are following conserns:
- we cannot get n big enough without sequence size increase;
- it's impossible to get very big sequence;
- estimate based on single block size will have undefined error
We can use least squares method to estimate the mean entropies (will be denoted as Hn') using linear model
Hn' = k * n + c
Here k will be an estimate of H because following equality will take a place when n is big enough
Hn'/n = k + c/n -> k = H
End of explanation
"""
print('Source entropy estimate is', a)
print('Chaotic entropy is', log(2))
(array(shannon_entropies[1:]) - shannon_entropies[:-1])[:count][:6]
"""
Explanation: Results
Here are estimate of the source entropy and value of chaotic source
End of explanation
"""
|
probml/pyprobml | notebooks/book2/17/gp_spectral_mixture.ipynb | mit | %%capture
import jax
import jax.numpy as jnp
import numpy as np
import matplotlib.pyplot as plt
try:
import tinygp
except ModuleNotFoundError:
%pip install -qq tinygp
import tinygp
try:
import optax
except ModuleNotFoundError:
%pip install -qq optax
import optax
try:
import probml_utils as pml
except ModuleNotFoundError:
%pip install git+https://github.com/probml/probml-utils.git
import probml_utils as pml
import seaborn as sns
pml.latexify(width_scale_factor=2)
plt.rcParams["legend.fontsize"] = 8 if pml.is_latexify_enabled() else "medium"
scattersize = 3 if pml.is_latexify_enabled() else 6
class SpectralMixture(tinygp.kernels.Kernel):
def __init__(self, weight, scale, freq):
self.weight = jnp.atleast_1d(weight)
self.scale = jnp.atleast_1d(scale)
self.freq = jnp.atleast_1d(freq)
def evaluate(self, X1, X2):
tau = jnp.atleast_1d(jnp.abs(X1 - X2))[..., None]
return jnp.sum(
self.weight
* jnp.prod(
jnp.exp(-2 * jnp.pi**2 * tau**2 / self.scale**2) * jnp.cos(2 * jnp.pi * self.freq * tau),
axis=-1,
)
)
"""
Explanation: Spectral mixture kernel in 1d for GP
https://tinygp.readthedocs.io/en/latest/tutorials/kernels.html#example-spectral-mixture-kernel
In this section, we will implement the "spectral mixture kernel" proposed by Gordon Wilson & Adams (2013).
End of explanation
"""
def build_gp(theta):
kernel = SpectralMixture(
jnp.exp(theta["log_weight"]),
jnp.exp(theta["log_scale"]),
jnp.exp(theta["log_freq"]),
)
return tinygp.GaussianProcess(kernel, train_input, diag=jnp.exp(theta["log_diag"]), mean=theta["mean"])
params = {
"log_weight": np.log([1.0, 1.0]),
"log_scale": np.log([10.0, 20.0]),
"log_freq": np.log([1.0, 1.0 / 3.0]),
"log_diag": np.log(0.1),
"mean": 0.0,
}
random = np.random.default_rng(546)
train_input = np.sort(random.uniform(0, 10, 50))
true_gp = build_gp(params)
train_output = true_gp.sample(jax.random.PRNGKey(123))
plt.figure()
plt.plot(train_input, train_output, ".k", markersize=scattersize)
plt.ylim(-4.5, 4.5)
plt.title("Simulated dataset")
plt.xlabel(r"$x$")
plt.ylabel(r"$y$");
"""
Explanation: Now let's implement the simulate some data from this model:
End of explanation
"""
@jax.jit
@jax.value_and_grad
def loss(theta):
return -build_gp(theta).log_probability(train_output)
opt = optax.sgd(learning_rate=3e-4)
opt_state = opt.init(params)
for i in range(1000):
loss_val, grads = loss(params)
updates, opt_state = opt.update(grads, opt_state)
params = optax.apply_updates(params, updates)
opt_gp = build_gp(params)
tau = np.linspace(0, 5, 500)
plt.figure()
plt.plot(tau, true_gp.kernel(tau[:1], tau)[0], "--k", lw=1, label="true kernel")
plt.plot(tau, opt_gp.kernel(tau[:1], tau)[0], lw=1, label="inferred kernel")
plt.xlabel(r"$\tau$")
plt.ylabel(r"$k(\tau)$")
plt.legend()
sns.despine()
pml.savefig("gp-spectral-mixture-learned-kernel")
"""
Explanation: One thing to note here is that we've used named parameters in a dictionary, instead of an array of parameters as in some of the other examples.
This would be awkward (but not impossible) to fit using scipy, so instead we'll use optax for optimization:
End of explanation
"""
test_input = np.linspace(-2, 12, 500)
pred_gp = opt_gp.condition(train_output, test_input).gp
mu, variance = pred_gp.loc, pred_gp.variance
plt.figure()
plt.plot(train_input, train_output, ".k", label="data", markersize=scattersize)
plt.fill_between(
test_input,
mu + np.sqrt(variance),
mu - np.sqrt(variance),
color="C0",
alpha=0.5,
label="prediction",
)
plt.plot(test_input, mu, color="C0", lw=1)
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.legend(loc="lower left")
sns.despine()
pml.savefig("gp-spectral-mixture-pred")
"""
Explanation: Using our optimized model, over-plot the conditional predictions:
End of explanation
"""
|
Danghor/Algorithms | Python/Chapter-07/.ipynb_checkpoints/Heap-checkpoint.ipynb | gpl-2.0 | class Heap:
sNodeCount = 0
def __init__(self):
Heap.sNodeCount += 1
self.mID = str(Heap.sNodeCount)
def getID(self):
return self.mID # used only by graphviz
"""
Explanation: Implementing Priority Queues as Heaps
Ths notebook presents <em style="color:blue">heaps</em>. We define the set $\mathcal{H}$ of heaps by induction:
$\texttt{Nil} \in \mathcal{H}$.
$\texttt{Node}(p,v,l,r) \in \mathcal{H}$ if and only if the following is true:
$p \leq l \;\wedge\; p \leq r$
The priority stored at the root is less than or equal to every other priority stored in
the heap. This condition is known as the <em style="color:blue">heap condition</em>.
It is important to remember that we associate <em style="color:blue">high</em> priorities
with <em style="color:blue">small</em> numbers.
- $\mid l.\texttt{count}() - r.\texttt{count}() \mid \;\leq\, 1$
The number of elements in the left subtree differs from the number of elements stored in
the right subtree by at most one.
This condition is known as the <em style="color:blue;">balancing condition</em>.
- $l \in \mathcal{H} \;\wedge\; r \in \mathcal{H}$
Both the left and the right subtree of a heap are heaps.
The class Heap is a superclass for constructing heaps. We will later define the classes Nil and Node that represent heaps of the form $\texttt{Nil}$ and $\texttt{Node}(p, v, l, r)$ respectively. The class Heap has one static variable sNodeCount which is needed to assign unique identifiers to different nodes. Every object of class Heap has a uniques identifier mID that is stored as a member variable. This identifier is used by graphviz. In order to generate new identifiers we use the static variable sNodeCount as a counter.
End of explanation
"""
def _make_string(self, attributes):
# get the name of the class of the object self
name = self.__class__.__name__
# map the function __str__ to all attributes and join them with a comma
return f"{name}({', '.join(map(str, [getattr(self, at) for at in attributes]))})"
Heap._make_string = _make_string
"""
Explanation: The function make_string is a helper function that is used to simplify the implementation of the method __str__.
- self is the object that is to be rendered as a string
- attributes is a list of the names of those member variables of the object self that are used to create the string that is returned.
End of explanation
"""
import graphviz as gv
"""
Explanation: Graphical Representation
End of explanation
"""
def toDot(self):
dot = gv.Digraph(node_attr={'shape': 'record', 'style': 'rounded'})
nodeDict = {}
self._collectIDs(nodeDict)
for n, t in nodeDict.items():
if isinstance(t, Nil):
dot.node(n, label='', shape='point')
elif isinstance(t, Node):
if t.mValue != None:
dot.node(n, label='{' + str(t.mPriority) + '|' + str(t.mValue) + '}')
else:
dot.node(n, label= str(t.mPriority))
else:
assert False, f'Unknown node {t}'
for n, t in nodeDict.items():
if isinstance(t, Node):
dot.edge(n, t.mLeft .getID())
dot.edge(n, t.mRight.getID())
return dot
Heap.toDot = toDot
"""
Explanation: The method $t.\texttt{toDot}()$ takes a binary trie $t$ and returns a graph that depicts the tree $t$.
End of explanation
"""
def _collectIDs(self, nodeDict):
nodeDict[self.getID()] = self
if isinstance(self, Node):
self.mLeft ._collectIDs(nodeDict)
self.mRight._collectIDs(nodeDict)
Heap._collectIDs = _collectIDs
"""
Explanation: The method $t.\texttt{collectIDs}(d)$ takes a binary trie $t$ and a dictionary $d$ and updates the dictionary so that the following holds:
$$ d[\texttt{id}] = n \quad \mbox{for every node $n$ in $t$.} $$
Here, $\texttt{id}$ is the unique identifier of the node $n$, i.e. $d$ associates the identifiers with the corresponding nodes.
End of explanation
"""
class Nil(Heap):
def _count(self):
return 0
def __str__(self):
return 'Nil()'
"""
Explanation: Defining $\texttt{Nil}$ and $\texttt{Node}(p, v, l, r)$ as Classes
The class Nil represents an empty heap.
End of explanation
"""
class Node(Heap):
def __init__(self, priority, value, left, right):
Heap.__init__(self)
self.mPriority = priority
self.mValue = value
self.mLeft = left
self.mRight = right
self.mCount = left._count() + right._count() + 1
def _extract(self):
return self.mPriority, self.mValue, self.mLeft, self.mRight
def _count(self):
return self.mCount
def __str__(self):
return _make_string(self, ['mPriority', 'mValue', 'mLeft', 'mRight'])
"""
Explanation: The class Node represents a heap of the form $\texttt{Node}(p,v,l,r)$ where
- $p$ is the <em style="color:blue">priority</em> stored as mPriority,
- $v$ is the <em style="color:blue">value</em> stored as mValue,
- $l$ is the <em style="color:blue">left subtree</em> stored as mLeft,
- $r$ is the <em style="color:blue">right subtree</em> stored as mRight,
- The number of nodes is stored in the member variable mCount.
End of explanation
"""
def top(self):
return None
Nil.top = top
"""
Explanation: Implementing the Method top
For the class Nil, the function topis specified via a single equation:
- $\texttt{Nil}.\texttt{top}() = \Omega$
End of explanation
"""
def top(self):
return self.mPriority, self.mValue
Node.top = top
del top
"""
Explanation: For the class Node, the function top is specified via the following equation:
- $\texttt{Node}(p,v,l,r).\texttt{top}() = (p,v)$
End of explanation
"""
def insert(self, p, v):
return Node(p, v, Nil(), Nil())
Nil.insert = insert
"""
Explanation: Implementing the method insert
$\texttt{Nil}.\texttt{insert}(p,v) = \texttt{Node}(p,v,\texttt{Nil}, \texttt{Nil})$
End of explanation
"""
def insert(self, p, v):
p_top, v_top, l, r = self._extract()
if p_top <= p:
if l._count() <= r._count():
return Node(p_top, v_top, l.insert(p, v), r)
else:
return Node(p_top, v_top, l, r.insert(p, v))
else:
if l._count() <= r._count():
return Node(p, v, l.insert(p_top, v_top), r)
else:
return Node(p, v, l, r.insert(p_top, v_top))
Node.insert = insert
del insert
"""
Explanation: $p_{\mathrm{top}} \leq p \;\wedge\; l.\texttt{count}() \leq r.\texttt{count}() \;\rightarrow\;
\texttt{Node}(p_{\mathrm{top}},v_\mathrm{top},l,r).\texttt{insert}(p,v) =
\texttt{Node}\bigl(p_\mathrm{top},v_\mathrm{top},l.\texttt{insert}(p,v), r\bigr)$
$p_{\mathrm{top}} \leq p \;\wedge\; l.\texttt{count}() > r.\texttt{count}() \;\rightarrow
\texttt{Node}(p_{\mathrm{top}},v_\mathrm{top},l,r).\texttt{insert}(p,v) =
\texttt{Node}\bigl(p_\mathrm{top},v_\mathrm{top},l,r.\texttt{insert}(p,v)\bigr)$
$p_{\mathrm{top}} > p \;\wedge\; l.\texttt{count}() \leq r.\texttt{count}() \;\rightarrow
\texttt{Node}(p_{\mathrm{top}},v_\mathrm{top},l,r).\texttt{insert}(p,v) =
\texttt{Node}\bigl(p,v,l.\texttt{insert}(p_\mathrm{top},v_\mathrm{top}), r\bigr)$
$p_{\mathrm{top}} > p \;\wedge\; l.\texttt{count}() > r.\texttt{count}() \;\rightarrow
\texttt{Node}(p_{\mathrm{top}},v_\mathrm{top},l,r).\texttt{insert}(p,v) =
\texttt{Node}\bigl(p,v,l,r.\texttt{insert}(p_\mathrm{top},v_\mathrm{top})\bigr)$
End of explanation
"""
def remove(self):
return self
Nil.remove = remove
"""
Explanation: Implementing the Method remove
$\texttt{Nil}.\texttt{remove}() = \texttt{Nil}$
End of explanation
"""
def remove(self):
p, v, l, r = self._extract()
if isinstance(l, Nil):
return r
if isinstance(r, Nil):
return l
p1, v1, l1, r1 = l._extract()
p2, v2, l2, r2 = r._extract()
if p1 <= p2:
return Node(p1, v1, l.remove(), r)
else:
return Node(p2, v2, l, r.remove())
Node.remove = remove
del remove
"""
Explanation: $\texttt{Node}(p,v,\texttt{Nil},r).\texttt{remove}() = r$
$\texttt{Node}(p,v,l,\texttt{Nil}).\texttt{remove}() = l$
$l = \texttt{Node}(p_1,v_1,l_1,r_1) \;\wedge\; r = \texttt{Node}(p_2,v_2,l_2,r_2) \;\wedge\; p_1 \leq p_2 \;\rightarrow
\texttt{Node}(p,v,l,r).\texttt{remove}() = \texttt{Node}(p_1,v_1,l.\texttt{remove}(),r)$
$l = \texttt{Node}(p_1,v_1,l_1,r_1) \;\wedge\; r = \texttt{Node}(p_2,v_2,l_2,r_2) \;\wedge\; p_1 > p_2 \rightarrow
\texttt{Node}(p,v,l,r).\texttt{remove}() = \texttt{Node}(p_2,v_2,l,r.\texttt{remove}())$
End of explanation
"""
h = Nil()
h.toDot()
h = h.insert(2, 'a')
h.toDot()
h = h.insert(1, 'b')
h.toDot()
h = h.insert(7, 'c')
h.toDot()
h = h.insert(0, 'd')
h.toDot()
h = h.insert(8, 'e')
h.toDot()
h = h.insert(3, 'f')
h.toDot()
h = h.insert(4, 'g')
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
for i in range(1, 31+1):
h = h.insert(i, None)
h.toDot()
"""
Explanation: Testing
End of explanation
"""
def heap_sort(L):
H = Nil()
for p in L:
H = H.insert(p, None)
S = []
display(H.toDot())
while isinstance(H, Node):
p, _ = H.top()
S.append(p)
H = H.remove()
return S
heap_sort([77, 54, 68, 7, 13, 1, 4, 5, 6, 3, 12, 67, 12, 14, 23, 54, 67])
"""
Explanation: Heapsort
Given a list L, the function heap_sort(L) returns a sorted version of L. The algorithm works in two phases.
- In the first phase, all elements of the list L are inserted into the empty heap H, which is initially empty.
- In the second phase, the elements of H are extracted one by one beginning with the smallest elements. These elements
are appended to the list S, which is initially empty. When the function returns, H contains all the elements of L
and is sorted ascendingly.
End of explanation
"""
|
deeplearningsp/5_meetup | src/perceptron.ipynb | mit | %matplotlib inline
import numpy as np
import pandas as pd
import inspect
import matplotlib.pyplot as plt
from perceptron import Perceptron
plt.style.use('ggplot')
print inspect.getsource(Perceptron)
inputs = np.array([0.2, 12.2,0.98])
pc = Perceptron(len(inputs), 0.5)
"""
Explanation: Modeling a Perceptron
To show the behavior of a perceptron, we will use a very simple one: a perceptron with a Heavyside activation function.
End of explanation
"""
pc
"""
Explanation: The perceptron was initialized with random weights to start.
End of explanation
"""
def f(x):
return -2.*x + 10.
x = np.array([1,3,7,4])
y = f(x_arr)
plt.plot(x,y)
y_test = np.random.randint(1,10,4)
y_test
fig, axs = plt.subplots()
axs.plot(*zip(x, y_test), marker='o', ls='')
axs.plot(x,y)
axs.spines["top"].set_visible(False)
axs.spines["right"].set_visible(False)
axs.spines["left"].set_visible(False)
axs.spines["bottom"].set_visible(False)
training_data = (y < y_test).astype(int)
training_data
x, y_test, training_data
df = pd.DataFrame({'x':x, 'y':y_test, 'target':training_data})
df
pc.train(np.array([1.0, x[0], y_test[0]]), training_data[0])
pc
for i in xrange(4):
pc.train(np.array([1.0, x[i], y_test[i]]), training_data[i])
pc
for i in xrange(4):
pc.train(np.array([1.0, x[i], y_test[i]]), training_data[i])
pc
"""
Explanation: The training data
Let's assume that we want to use our perceptron to check if a pair (x,y) is above a line. Let's define the line as
f(x) = -2*x+10
End of explanation
"""
|
atulsingh0/MachineLearning | python_DC/Learning_pandas_DataFrame_#2.ipynb | gpl-3.0 | state = ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada']
year = [2000, 2001, 2002, 2001, 2002]
pop = [1.5, 1.7, 3.6, 2.4, 2.9]
print(type(state), type(year), type(pop))
# creating dataframe
df = pd.DataFrame({'state':state, 'year':year, 'pop':pop})
print(df.info())
print(df)
sdata = {'state':state, 'year':year, 'pop':pop}
print(sdata,"\n",type(sdata))
df = pd.DataFrame(sdata, columns=['pop1', 'state1', 'year1']) # we can not rename columns like this, but create column names
# if doesn't exists
print(df)
df = pd.DataFrame(sdata, columns=['pop1', 'state', 'year']) # this will pick those columns from sdata which matched
print(df)
df = pd.DataFrame(sdata)
print(df.columns)
# renaming columns and index
df.columns = ['pop1', 'state1', 'year1']
df.index = ['one', 'two', 'three', 'four', 'five']
print(df)
# stats about dataframe
print(df.index, "\n", df.shape, "\n", df.columns)
df['pop1'] = 1.5
print(df)
df['pop1'] = range(5)
print(df)
# can access the data as
print(df['state1'])
print(df.state1)
# for deleting any columns
del df['pop1']
print(df)
# transpose the dataframe
dft = df.T
print(dft)
# using columns as an index
df.index = df['year1']
del df['year1']
print(df)
df.columns.name, df.index.name
df.columns
# printing values
df.values
"""
Explanation: Possible data inputs to DataFrame constructor
2D ndarray A matrix of data, passing optional row and column labels
dict of arrays, lists, or tuples Each sequence becomes a column in the DataFrame. All sequences must be the same length.
NumPy structured/record array Treated as the “dict of arrays” case
dict of Series Each value becomes a column. Indexes from each Series are unioned together to form the result’s row index if no explicit index is passed.
dict of dicts Each inner dict becomes a column. Keys are unioned to form the row index as in the “dict of Series” case.
list of dicts or Series Each item becomes a row in the DataFrame. Union of dict keys or Series indexes become the DataFrame’s column labels
List of lists or tuples Treated as the “2D ndarray” case
Another DataFrame The DataFrame’s indexes are used unless different ones are passed
NumPy MaskedArray Like the “2D ndarray” case except masked values become NA/missing in the DataFrame result
End of explanation
"""
# Series and DataFrames index are mutable
df.index
#df.index[2]=2009 # this will throw a error
"""
Explanation: Index methods and properties
append Concatenate with additional Index objects, producing a new Index
diff Compute set difference as an Index
intersection Compute set intersection
union Compute set union
isin Compute boolean array indicating whether each value is contained in the passed collection
delete Compute new Index with element at index i deleted
drop Compute new index by deleting passed values
insert Compute new Index by inserting element at index i
is_monotonic Returns True if each element is greater than or equal to the previous element
is_unique Returns True if the Index has no duplicate values
unique Compute the array of unique values in the Index
End of explanation
"""
print(df)
df.index
# df2 = df.reindex([2000, 2001, 2002, 2001, 2002, 2009])
# this will throw an value error, as index should be unique
frame = pd.DataFrame(np.arange(9).reshape((3, 3)), index=['a', 'c', 'd'],columns=['Ohio', 'Texas', 'California'])
print(frame)
frame2 = frame.reindex(['a', 'b', 'c', 'd'])
print(frame2)
# likewise let's revert the df
df['year'] = df.index
df.index = [0,1,2,3,4]
print(df)
# now we can reindex this df
df2 = df.reindex([1,2,3,4,5,6,7]) # again, reindex will first look into the df and then create the new
print(df2) # as here, it will keep 1,2,3,4 and drop 0 and create new 5,6,7 index
# better and faster way to do that is -
df3=df2.ix[[1,2,3,4,6]]
print(df3)
# CAN ALter the columns as well
new_columns = ['state1', 'year', 'population']
df4 = df3.ix[[1,2,3,4,6], new_columns]
print(df4)
df4.columns
# renaming columns
df4.columns = ['state', 'year', 'pop']
print(df4)
# dropping index or columns
df5=df4.drop([3])
print(df5)
df5 = df5.drop(['pop'], axis=1)
print(df5)
"""
Explanation: Reindex Series or DataFrme
index New sequence to use as index. Can be Index instance or any other sequence-like Python data structure. An Index will be used exactly as is without any copying
method Interpolation (fill) method, see Table 5-4 for options.
fill_value Substitute value to use when introducing missing data by reindexing
limit When forward- or backfilling, maximum size gap to fill
level Match simple Index on level of MultiIndex, otherwise select subset of
copy Do not copy underlying data if new index is equivalent to old index. True by default (i.e. always copy data).
End of explanation
"""
df4
df4[df4['state']=='Ohio']
df4[['state', 'year']]
df4['year'][df4['state']=='Ohio']=2004
df4
# ix enables you to select a subset of the rows and columns from a DataFrame with NumPy like notation plus axis labels
df4.ix[[1,2],['state']]
df4.ix[[3,6],[0,2]]
df4.ix[df4['year']<2003,[0,2]]
"""
Explanation: Indexing, selection, and filtering
End of explanation
"""
s1 = pd.Series([7.3, -2.5, 3.4, 1.5], index=['a', 'c', 'd', 'e'])
s2 = pd.Series([-2.1, 3.6, -1.5, 4, 3.1], index=['a', 'c', 'e', 'f', 'g'])
s1 + s2 #assigned NaN for those index which is not found in another series
df1 = pd.DataFrame(np.arange(9.).reshape((3, 3)), columns=list('bcd'), index=['Ohio', 'Texas', 'Colorado'])
df2 = pd.DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon'])
df1 + df2
"""
Explanation: Indexing options with DataFrame
obj[val] Select single column or sequence of columns from the DataFrame. Special case con-veniences: boolean array (filter rows), slice (slice rows), or boolean DataFrame (set values based on some criterion).
obj.ix[val] Selects single row of subset of rows from the DataFrame.
obj.ix[:, val] Selects single column of subset of columns.
obj.ix[val1, val2] Select both rows and columns.
reindex method Conform one or more axes to new indexes.
xs method Select single row or column as a Series by label.
icol, irow methods Select single column or row, respectively, as a Series by integer location.
get_value, set_value methods Select single value by row and column label.
Arithmetic and data alignment
End of explanation
"""
df1.add(df2, fill_value=0)
# when reindexing a Series or DataFrame, you can also specify a different fill value
df1.reindex(columns=df2.columns, fill_value=0)
"""
Explanation: Arithmetic methods with fill values
End of explanation
"""
frame = pd.DataFrame(np.arange(12.).reshape((4, 3)), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon'])
frame
series = frame.ix[0] # pickng first row
series
frame * series
# By default, arithmetic between DataFrame and Series matches the index of the Series on the DataFrame's columns,
# broadcasting down the rows:
frame - series
series2 = pd.Series(range(3), index=['b', 'e', 'f'])
frame * series2
"""
Explanation: Flexible arithmetic methods
add Method for addition (+)
sub Method for subtraction (-)
div Method for division (/)
mul Method for multiplication (*)
Operations between DataFrame and Series
End of explanation
"""
f = lambda x : x.max() - x.min()
frame = pd.DataFrame(np.random.randn(4, 3), columns=list('bde'), index=['Utah', 'Ohio', 'Texas', 'Oregon'])
print(frame)
frame.apply(f)
frame.apply(f, axis=1)
# defining a func
def f(x):
return pd.Series([x.max(), x.min()], index=['max', 'min'])
frame.apply(f)
frame.apply(f, axis=1)
format = lambda x: '%.2f' % x
frame.applymap(format)
"""
Explanation: Function application and mapping
End of explanation
"""
obj = pd.Series(range(4), index=['d', 'a', 'b', 'c'])
obj
# sorting on index
obj.sort_index()
frame = pd.DataFrame(np.arange(8).reshape((2, 4)), index=['three', 'one'], columns=['d', 'a', 'b', 'c'])
frame
frame.sort_index()
frame.sort_index(axis=1)
frame.sort_index(axis=1).sort_index()
frame.sort_index(axis=1, ascending=False)
# To sort a Series by its values, use its order method
sr = pd.Series(['2', np.nan, '-3', '5'])
sr
# sorting by value
sr.sort_values()
frame = pd.DataFrame({'b': [4, 7, -3, 2], 'a': [0, 1, 0, 1]})
frame
frame.sort_values(by='b')
frame.sort_values(by=['a', 'b'])
# ranking # Explore more
obj = pd.Series([7, -5, 7, 4, 2, 0, 4])
obj
obj.rank()
"""
Explanation: Sorting and ranking
End of explanation
"""
obj = pd.Series(range(5), index=['a', 'a', 'b', 'b', 'c'])
obj
obj.index.unique() # get unique index
obj.index.is_unique # check if index are unique
df = pd.DataFrame(np.random.randn(4, 3), index=['a', 'a', 'b', 'b'])
df
df.index.is_unique
df.ix['a'] # ix is used to select rows by index
df.ix[0]
"""
Explanation: Axis indexes with duplicate values
End of explanation
"""
|
JAmarel/Phys202 | Integration/IntegrationEx02.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy import integrate
"""
Explanation: Integration Exercise 2
Imports
End of explanation
"""
def integrand(x, a):
return 1.0/(x**2 + a**2)
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(a,))
return I
def integral_exact(a):
return 0.5*np.pi/a
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Indefinite integrals
Here is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc.
Find five of these integrals and perform the following steps:
Typeset the integral using LateX in a Markdown cell.
Define an integrand function that computes the value of the integrand.
Define an integral_approx funciton that uses scipy.integrate.quad to peform the integral.
Define an integral_exact function that computes the exact value of the integral.
Call and print the return value of integral_approx and integral_exact for one set of parameters.
Here is an example to show what your solutions should look like:
Example
Here is the integral I am performing:
$$ I_1 = \int_0^\infty \frac{dx}{x^2 + a^2} = \frac{\pi}{2a} $$
End of explanation
"""
def integrand(x, a):
return np.sqrt(a**2 - x**2)
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, a, args=(a,))
return I
def integral_exact(a):
return 0.25*np.pi
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 1
$$ I_1 = \int_0^a {\sqrt{a^2-x^2} dx} = \frac{\pi a^2}{4} $$
End of explanation
"""
def integrand(x):
return np.sin(x)**2
def integral_approx():
I, e = integrate.quad(integrand, 0, np.pi/2)
return I
def integral_exact():
return 0.25*np.pi
print("Numerical: ", integral_approx())
print("Exact : ", integral_exact())
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 2
$$ I_2 = \int_0^{\frac{\pi}{2}} {\sin^2{x}}{ } {dx} = \frac{\pi}{4} $$
End of explanation
"""
def integrand(x,a,b):
return 1/(a+ b*np.sin(x))
def integral_approx(a,b):
I, e = integrate.quad(integrand, 0, 2*np.pi,args=(a,b))
return I
def integral_exact(a,b):
return 2*np.pi/np.sqrt(a**2-b**2)
print("Numerical: ", integral_approx(10,0))
print("Exact : ", integral_exact(10,0))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 3
$$ I_3 = \int_0^{2\pi} \frac{dx}{a+b\sin{x}} = {\frac{2\pi}{\sqrt{a^2-b^2}}} $$
End of explanation
"""
def integrand(x):
return x/(np.exp(x)+1)
def integral_approx():
I, e = integrate.quad(integrand, 0, np.inf)
return I
def integral_exact():
return (1/12)*np.pi**2
print("Numerical: ", integral_approx())
print("Exact : ", integral_exact())
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 4
$$ I_4 = \int_0^{\infty} \frac{x}{e^{x}+1} = {\frac{\pi^2}{12}} $$
End of explanation
"""
def integrand(x):
return x/(np.exp(x)-1)
def integral_approx():
I, e = integrate.quad(integrand, 0, np.inf)
return I
def integral_exact():
return (1/6)*np.pi**2
print("Numerical: ", integral_approx())
print("Exact : ", integral_exact())
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 5
$$ I_5 = \int_0^{\infty} \frac{x}{e^{x}-1} = {\frac{\pi^2}{6}} $$
End of explanation
"""
|
kowey/attelo | doc/tut_parser.ipynb | gpl-3.0 | from __future__ import print_function
from os import path as fp
from attelo.io import (load_multipack)
CORPUS_DIR = 'example-corpus'
PREFIX = fp.join(CORPUS_DIR, 'tiny')
# load the data into a multipack
mpack = load_multipack(PREFIX + '.edus',
PREFIX + '.pairings',
PREFIX + '.features.sparse',
PREFIX + '.features.sparse.vocab',
verbose=True)
"""
Explanation: Parsers
An attelo parser converts “documents” (here: EDUs with some metadata) into graphs (with EDUs as nodes and relation labels between them). In API terms, a parser is something that enriches datapacks, progressively adding or stripping away information until we get a full graph.
Parsers follow the scikit-learn estimator and transformer conventions, ie. with a fit function to learn some model from training data and a transform function to convert (in our case) datapacks to enriched datapacks.
Preliminaries
To begin our exploration of attelo parsers, let's load up a tiny multipack of sample data.
End of explanation
"""
test_dpack = mpack.values()[0]
train_mpack = {k: mpack[k] for k in mpack.keys()[1:]}
print('multipack entries:', len(mpack))
print('train entries:', len(train_mpack))
"""
Explanation: We'll set aside one of the datapacks to test with, leaving the other two for training. We do this by hand for this simple example, but you may prefer to use the helper functions in attelo.fold when working with real data
End of explanation
"""
def print_results(dpack):
'summarise parser results'
for i, (edu1, edu2) in enumerate(dpack.pairings):
wanted = dpack.get_label(dpack.target[i])
got = dpack.get_label(dpack.graph.prediction[i])
print(i, edu1.id, edu2.id, '\t|', got, '\twanted:', wanted)
"""
Explanation: Trying a parser out 1 (attach)
Now that we have our training and test data, we can try feeding them to a simple parser. Before doing this, we'll take a quick detour to define a helper function to visualise our parse results.
End of explanation
"""
from attelo.decoding.baseline import (LastBaseline)
from attelo.learning import (SklearnAttachClassifier)
from attelo.parser.attach import (AttachPipeline)
from sklearn.linear_model import (LogisticRegression)
learner = SklearnAttachClassifier(LogisticRegression())
decoder = LastBaseline()
parser1 = AttachPipeline(learner=learner,
decoder=decoder)
# train the parser
train_dpacks = train_mpack.values()
train_targets = [x.target for x in train_dpacks]
parser1.fit(train_dpacks, train_targets)
# now run on a test pack
dpack = parser1.transform(test_dpack)
print_results(dpack)
"""
Explanation: As for parsing, we'll start with the attachment pipeline. It combines a learner with a decoder
End of explanation
"""
from attelo.learning import (SklearnLabelClassifier)
from attelo.parser.label import (SimpleLabeller)
from sklearn.linear_model import (LogisticRegression)
learner = SklearnLabelClassifier(LogisticRegression())
parser2 = SimpleLabeller(learner=learner)
# train the parser
parser2.fit(train_dpacks, train_targets)
# now run on a test pack
dpack = parser2.transform(test_dpack)
print_results(dpack)
"""
Explanation: Trying a parser out 2 (label)
In the output above, our predictions for every edge are either __UNK__ or UNRELATED. The attachment pipeline only predicts if edges will be attached or not. What we need is to be able to predict their labels.
End of explanation
"""
from attelo.parser.pipeline import (Pipeline)
# this is actually attelo.parser.full.PostlabelPipeline
parser3 = Pipeline(steps=[('attach', parser1),
('label', parser2)])
parser3.fit(train_dpacks, train_targets)
dpack = parser3.transform(test_dpack)
print_results(dpack)
"""
Explanation: That doesn't quite look right. Now we have labels, but none of our edges are UNRELATED. But this is because the simple labeller will apply labels on all unknown edges. What we need is to be able to combine the attach and label parsers in a parsing pipeline
Parsing pipeline
A parsing pipeline is a parser that combines other parsers in sequence. For purposes of learning/fitting, the individual steps can be thought of as being run in parallel (in practice, they are fitted in sequnce). For transforming though, they are run in order. A pipeline thus refines a datapack over the course of multiple parsers.
End of explanation
"""
|
muratcemkose/cy-rest-python | cytoscape-js/CytoscapeJs_and_igraph.ipynb | mit | from py2cytoscape.cytoscapejs import viewer as cyjs
from py2cytoscape import util
import json
import igraph as ig
"""
Explanation: Network analysis and visualization with py2cytoscape and igraph
What is Cytoscape?
- An open source platform for graph analysis and visualization
- Free! (for both academic and commercial use)
- De-facto standard platform in biotech community (11k+ publications)
Cytoscape Ecosystem
Cytoscape - A Java desktop application with plugin support
Cytoscape App Store - Central repository of all applications build on top of Cytoscape API
Cytoscape.js - JavaScript library for graph data visualization
End of explanation
"""
import matplotlib.pyplot as plt
g = ig.Graph.Barabasi(200)
positions = g.layout_kamada_kawai()
g_cyjs = util.from_igraph(g, layout=positions, scale=120)
cyjs.render(g_cyjs, style='default2')
%matplotlib inline
import networkx as nx
from py2cytoscape.util.util_networkx import *
def nx_layout(graph):
pos = nx.graphviz_layout(graph, prog='dot')
pos2 = map(lambda position: {'x': position[0], 'y':position[1] }, pos.values())
for node_idx in graph.nodes():
graph.node[node_idx]['position'] = pos2[node_idx]
sf100 = nx.barabasi_albert_graph(200, 4, 0.8)
nx_layout(sf100)
g3 = from_networkx(sf100)
cyjs.render(g3, style='default2')
yeast_network = json.load(open('../basic/sample_data/yeast.json'))
yeast_nx = to_networkx(yeast_network)
yeast_nx.nodes()
yeast_nx.nodes()
def nx_layout2(graph):
pos = nx.graphviz_layout(graph, prog='dot')
pos2 = map(lambda position: {'x': position[0], 'y':position[1] }, pos.values())
nodes = graph.nodes()
for i, nodeid in enumerate(nodes):
graph.node[nodeid]['position'] = pos2[i]
nx_layout2(yeast_nx)
g4 = from_networkx(yeast_nx)
cyjs.render(g4, style='Directed')
"""
Explanation: Quick Introduction: Graph libraries and py2cytoscape
Visualize networks generated by igraph
End of explanation
"""
networks = {}
layouts = cy.get_layouts()
# Load local network files
yeast_network = json.load(open('yeast2.cyjs'))
networks['Yeast PPI Network'] = yeast_network
kegg_pathway = json.load(open('kegg_tca.cyjs'))
networks['KEGG: TCA Cycle Human'] = kegg_pathway
# Load Visual Style file
vs_collection = json.load(open('kegg_style.json'))
styles = {}
for style in vs_collection:
style_settings = style['style']
title = style['title']
styles[title] = style_settings
print(styles['default'])
"""
Explanation: Static image
Interaction is hard (or no interaction)
Limited visual properties
1. Visualize network data created in Cytoscape desktop
Load network JSON files generated in Cytoscape desktop
End of explanation
"""
def render_graph(Network, Style, Layout):
cy.render(Network, Style, Layout)
interact(render_graph,
Network = DropdownWidget(values=networks, value=yeast_network),
Style = DropdownWidget(values=styles, value=styles['default']),
Layout = DropdownWidget(values=layouts, value=layouts['Preset'])
)
"""
Explanation: Visualization with Cytoscape.js module
End of explanation
"""
interact(render_graph,
Network = DropdownWidget(values=networks, value=kegg_pathway),
Style = DropdownWidget(values=styles, value=styles['KEGG Style']),
Layout = DropdownWidget(values=layouts, value=layouts['Preset'])
)
print(json.dumps(kegg_pathway, indent=2))
"""
Explanation: Render KEGG Pathway as an interactive visualization
End of explanation
"""
ba1 = nx.barabasi_albert_graph(100,3)
clustering = nx.clustering(ba1)
degrees = nx.degree(ba1)
nx.set_node_attributes(ba1, 'degree', degrees)
nx.set_node_attributes(ba1, 'clustering', clustering)
min_degree = min(degrees.values())
max_degree = max(degrees.values())
min_cl = min(clustering.values())
max_cl = max(clustering.values())
"""
Explanation: 2. Generate graph data with NetworkX
Use BA model to generate graph and calculate some metrics
End of explanation
"""
# Convert to Cytoscape.js compatible format
ba1_cyjs = cy.from_networkx(ba1)
# Create custom Visual Style programatically
new_directed = styles['Directed']
new_directed.append({
'selector':'node',
'css':{
'width': 'mapData(degree,' + str(min_degree) + ',' + str(max_degree) + ', 20, 80)',
'height': 'mapData(degree,' + str(min_degree) + ',' + str(max_degree) + ', 20, 80)',
'font-size': 'mapData(degree,' + str(min_degree) + ',' + str(max_degree) + ', 10, 50)',
'border-width': 1,
'border-width': 0,
'opacity': 0.9,
'color': '#222222',
'background-color': 'mapData(clustering,' + str(min_cl) + ',' + str(max_cl) + ', white, red)'
}
})
new_directed.append({
'selector':'edge',
'css':{
'width':0.5,
'opacity': 0.5,
'line-color': '#aaaaaa'
}
})
networks['BA Graph 1'] = ba1_cyjs
interact(render_graph,
Network = DropdownWidget(values=networks, value=ba1_cyjs),
Style = DropdownWidget(values=styles, value=styles['Directed']),
Layout = DropdownWidget(values=layouts, value=layouts['Breadthfirst'])
)
"""
Explanation: Customize the visualization based on network statistics (Visual Mapping)
End of explanation
"""
# Generate with igraph
generated1 = Graph.Watts_Strogatz(1,600, 4, 0.15)
layout = generated1.layout("lgl")
# Community detection
communities = generated1.community_label_propagation()
com_count = len(communities)
print('Communities = ' + str(com_count))
rainbow = RainbowPalette(n=com_count)
generated1.vs['community'] = communities.membership
print(generated1.vs[100]['community'])
# Assign color
for node in generated1.vs:
assigned_color = rainbow[node['community']]
node['color'] = 'rgba(' + str(assigned_color[0]*255) +',' + str(assigned_color[1]*255) + ',' + str(assigned_color[2]*255) + ')'
print(generated1.vs.attributes())
generated1_cyjs = cy.from_igraph(generated1, layout, 20)
# Override the existing style
new_style = styles['default']
new_style.append({
'selector':'node',
'css':{
'width':30,
'height':30,
'border-width': 0,
'content': 'data(community)',
'font-size': 22,
'background-color': 'data(color)'
}
})
new_style.append({
'selector':'edge',
'css':{
'width':2,
'opacity': 0.4
}
})
networks['WS200'] = generated1_cyjs
interact(render_graph,
Network = DropdownWidget(values=networks, value=generated1_cyjs),
Style = DropdownWidget(values=styles, value=styles['default']),
Layout = DropdownWidget(values=layouts, value=layouts['Preset'])
)
start = generated1.vs[1]
end = generated1.vs[156]
paths = generated1.get_all_shortest_paths(start, end)
communities = generated1.community_label_propagation()
print(len(communities))
generated1.vs['community'] = communities.membership
print(generated1.vs['community'])
edges = []
for path in paths:
for index, v in enumerate(path):
if index < len(path)-1:
edge = (v, path[index+1])
edges.append(edge)
"""
Explanation: 3. Generate & layout data with igraph, and visualize it in Cytoscape.js
End of explanation
"""
|
dadavidson/Python_Lab | Complete-Python-Bootcamp/Print Formatting.ipynb | mit | print 'This is a string'
"""
Explanation: Print Formatting
In this lecture we will briefly cover the various ways to format your print statements. As you code more and more, you will probably want to have print statements that can take in a variable into a printed string statement.
The most basic example of a print statement is:
End of explanation
"""
s = 'STRING'
print 'Place another string with a mod and s: %s' %(s)
"""
Explanation: Strings
You can use the %s to format strings into your print statements.
End of explanation
"""
print 'Floating point numbers: %1.2f' %(13.144)
print 'Floating point numbers: %1.0f' %(13.144)
print 'Floating point numbers: %1.5f' %(13.144)
print 'Floating point numbers: %10.2f' %(13.144)
print 'Floating point numbers: %25.2f' %(13.144)
"""
Explanation: Floating Point Numbers
Floating point numbers use the format %n1.n2f where the n1 is the total minimum number of digits the string should contain (these may be filled with whitespace if the entire number does not have this many digits. The n2 placeholder stands for how many numbers to show past the decimal point. Lets see some examples:
End of explanation
"""
print 'Here is a number: %s. Here is a string: %s' %(123.1,'hi')
print 'Here is a number: %r. Here is a string: %r' %(123.1,'hi')
"""
Explanation: Conversion Format methods.
It should be noted that two methods %s and %r actually convert any python object to a string using two separate methods: str() and repr(). We will learn more about these functions later on in the course, but you should note you can actually pass almost any Python object with these two methods and it will work:
End of explanation
"""
print 'First: %s, Second: %1.2f, Third: %r' %('hi!',3.14,22)
"""
Explanation: Multiple Formatting
Pass a tuple to the modulo symbol to place multiple formats in your print statements:
End of explanation
"""
print 'This is a string with an {p}'.format(p='insert')
# Multiple times:
print 'One: {p}, Two: {p}, Three: {p}'.format(p='Hi!')
# Several Objects:
print 'Object 1: {a}, Object 2: {b}, Object 3: {c}'.format(a=1,b='two',c=12.3)
"""
Explanation: Using the string .format() method
The best way to format objects into your strings for print statements is using the format method. The syntax is:
'String here {var1} then also {var2}'.format(var1='something1',var2='something2')
Lets see some examples:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/nerc/cmip6/models/ukesm1-0-mmh/ocnbgchem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'ukesm1-0-mmh', 'ocnbgchem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocnbgchem
MIP Era: CMIP6
Institute: NERC
Source ID: UKESM1-0-MMH
Topic: Ocnbgchem
Sub-Topics: Tracers.
Properties: 65 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:27
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
4. Key Properties --> Transport Scheme
5. Key Properties --> Boundary Forcing
6. Key Properties --> Gas Exchange
7. Key Properties --> Carbon Chemistry
8. Tracers
9. Tracers --> Ecosystem
10. Tracers --> Ecosystem --> Phytoplankton
11. Tracers --> Ecosystem --> Zooplankton
12. Tracers --> Disolved Organic Matter
13. Tracers --> Particules
14. Tracers --> Dic Alkalinity
1. Key Properties
Ocean Biogeochemistry key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean biogeochemistry model code (PISCES 2.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
"""
Explanation: 1.4. Elemental Stoichiometry
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe elemental stoichiometry (fixed, variable, mix of the two)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Elemental Stoichiometry Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe which elements have fixed/variable stoichiometry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all prognostic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all diagnotic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Damping
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any tracer damping used (such as artificial correction or relaxation to climatology,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Time stepping method for passive tracers transport in ocean biogeochemistry
2.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for passive tracers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for passive tracers (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Time stepping framework for biology sources and sinks in ocean biogeochemistry
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for biology sources and sinks
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for biology sources and sinks (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Transport Scheme
Transport scheme in ocean biogeochemistry
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transport scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Transport scheme used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Use Different Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Decribe transport scheme if different than that of ocean model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Boundary Forcing
Properties of biogeochemistry boundary forcing
5.1. Atmospheric Deposition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how atmospheric deposition is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
"""
Explanation: 5.2. River Input
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how river input is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Sediments From Boundary Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Sediments From Explicit Model
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from explicit sediment model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Gas Exchange
*Properties of gas exchange in ocean biogeochemistry *
6.1. CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.2. CO2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe CO2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.3. O2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is O2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. O2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe O2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.5. DMS Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is DMS gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. DMS Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify DMS gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.7. N2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.8. N2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.9. N2O Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2O gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.10. N2O Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2O gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.11. CFC11 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC11 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.12. CFC11 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC11 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.13. CFC12 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC12 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.14. CFC12 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC12 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.15. SF6 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is SF6 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.16. SF6 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify SF6 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.17. 13CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 13CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.18. 13CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 13CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.19. 14CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 14CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.20. 14CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 14CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.21. Other Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any other gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Carbon Chemistry
Properties of carbon chemistry biogeochemistry
7.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how carbon chemistry is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.2. PH Scale
Is Required: FALSE Type: ENUM Cardinality: 0.1
If NOT OMIP protocol, describe pH scale.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Constants If Not OMIP
Is Required: FALSE Type: STRING Cardinality: 0.1
If NOT OMIP protocol, list carbon chemistry constants.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Tracers
Ocean biogeochemistry tracers
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of tracers in ocean biogeochemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Sulfur Cycle Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sulfur cycle modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Nutrients Present
Is Required: TRUE Type: ENUM Cardinality: 1.N
List nutrient species present in ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Nitrous Species If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous species.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.5. Nitrous Processes If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous processes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Tracers --> Ecosystem
Ecosystem properties in ocean biogeochemistry
9.1. Upper Trophic Levels Definition
Is Required: TRUE Type: STRING Cardinality: 1.1
Definition of upper trophic level (e.g. based on size) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Upper Trophic Levels Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Define how upper trophic level are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
"""
Explanation: 10. Tracers --> Ecosystem --> Phytoplankton
Phytoplankton properties in ocean biogeochemistry
10.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of phytoplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Pft
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton functional types (PFT) (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Tracers --> Ecosystem --> Zooplankton
Zooplankton properties in ocean biogeochemistry
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of zooplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Zooplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Tracers --> Disolved Organic Matter
Disolved organic matter properties in ocean biogeochemistry
12.1. Bacteria Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there bacteria representation ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Lability
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe treatment of lability in dissolved organic matter
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Tracers --> Particules
Particulate carbon properties in ocean biogeochemistry
13.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is particulate carbon represented in ocean biogeochemistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, type(s) of particulate matter taken into account
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
"""
Explanation: 13.3. Size If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.4. Size If Discrete
Is Required: FALSE Type: STRING Cardinality: 0.1
If prognostic and discrete size, describe which size classes are used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Sinking Speed If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, method for calculation of sinking speed of particules
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
"""
Explanation: 14. Tracers --> Dic Alkalinity
DIC and alkalinity properties in ocean biogeochemistry
14.1. Carbon Isotopes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which carbon isotopes are modelled (C13, C14)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.2. Abiotic Carbon
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is abiotic carbon modelled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
"""
Explanation: 14.3. Alkalinity
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is alkalinity modelled ?
End of explanation
"""
|
hetaodie/hetaodie.github.io | assets/media/uda-ml/deep/azjc/卷积神经网络的例子/dog/dog_app_zh.ipynb | mit | from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
# load train, test, and validation datasets
train_files, train_targets = load_dataset('/data/dog_images/train')
valid_files, valid_targets = load_dataset('/data/dog_images/valid')
test_files, test_targets = load_dataset('/data/dog_images/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("/data/dog_images/train/*/"))]
# print statistics about the dataset
print('There are %d total dog categories.' % len(dog_names))
print('There are %s total dog images.\n' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training dog images.' % len(train_files))
print('There are %d validation dog images.' % len(valid_files))
print('There are %d test dog images.'% len(test_files))
"""
Explanation: 卷积神经网络(Convolutional Neural Network, CNN)
项目:实现一个狗品种识别算法App
在这个notebook文件中,有些模板代码已经提供给你,但你还需要实现更多的功能来完成这个项目。除非有明确要求,你无须修改任何已给出的代码。以'(练习)'开始的标题表示接下来的代码部分中有你需要实现的功能。这些部分都配有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示。
除了实现代码外,你还需要回答一些与项目及代码相关的问题。每个需要回答的问题都会以 '问题 X' 标记。请仔细阅读每个问题,并且在问题后的 '回答' 部分写出完整的答案。我们将根据 你对问题的回答 和 撰写代码实现的功能 来对你提交的项目进行评分。
提示:Code 和 Markdown 区域可通过 Shift + Enter 快捷键运行。此外,Markdown可以通过双击进入编辑模式。
项目中显示为_选做_的部分可以帮助你的项目脱颖而出,而不是仅仅达到通过的最低要求。如果你决定追求更高的挑战,请在此 notebook 中完成_选做_部分的代码。
让我们开始吧
在这个notebook中,你将迈出第一步,来开发可以作为移动端或 Web应用程序一部分的算法。在这个项目的最后,你的程序将能够把用户提供的任何一个图像作为输入。如果可以从图像中检测到一只狗,它会输出对狗品种的预测。如果图像中是一个人脸,它会预测一个与其最相似的狗的种类。下面这张图展示了完成项目后可能的输出结果。(……实际上我们希望每个学生的输出结果不相同!)
在现实世界中,你需要拼凑一系列的模型来完成不同的任务;举个例子,用来预测狗种类的算法会与预测人类的算法不同。在做项目的过程中,你可能会遇到不少失败的预测,因为并不存在完美的算法和模型。你最终提交的不完美的解决方案也一定会给你带来一个有趣的学习经验!
项目内容
我们将这个notebook分为不同的步骤,你可以使用下面的链接来浏览此notebook。
Step 0: 导入数据集
Step 1: 检测人脸
Step 2: 检测狗狗
Step 3: 从头创建一个CNN来分类狗品种
Step 4: 使用一个CNN来区分狗的品种(使用迁移学习)
Step 5: 建立一个CNN来分类狗的品种(使用迁移学习)
Step 6: 完成你的算法
Step 7: 测试你的算法
在该项目中包含了如下的问题:
问题 1
问题 2
问题 3
问题 4
问题 5
问题 6
问题 7
问题 8
问题 9
问题 10
问题 11
<a id='step0'></a>
步骤 0: 导入数据集
导入狗数据集
在下方的代码单元(cell)中,我们导入了一个狗图像的数据集。我们使用 scikit-learn 库中的 load_files 函数来获取一些变量:
- train_files, valid_files, test_files - 包含图像的文件路径的numpy数组
- train_targets, valid_targets, test_targets - 包含独热编码分类标签的numpy数组
- dog_names - 由字符串构成的与标签相对应的狗的种类
End of explanation
"""
import random
random.seed(8675309)
# 加载打乱后的人脸数据集的文件名
human_files = np.array(glob("/data/lfw/*/*"))
random.shuffle(human_files)
# 打印数据集的数据量
print('There are %d total human images.' % len(human_files))
"""
Explanation: 导入人脸数据集
在下方的代码单元中,我们导入人脸图像数据集,文件所在路径存储在名为 human_files 的 numpy 数组。
End of explanation
"""
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# 提取预训练的人脸检测模型
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
# 加载彩色(通道顺序为BGR)图像
img = cv2.imread(human_files[3])
# 将BGR图像进行灰度处理
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 在图像中找出脸
faces = face_cascade.detectMultiScale(gray)
# 打印图像中检测到的脸的个数
print('Number of faces detected:', len(faces))
# 获取每一个所检测到的脸的识别框
for (x,y,w,h) in faces:
# 在人脸图像中绘制出识别框
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# 将BGR图像转变为RGB图像以打印
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 展示含有识别框的图像
plt.imshow(cv_rgb)
plt.show()
"""
Explanation: <a id='step1'></a>
步骤1:检测人脸
我们将使用 OpenCV 中的 Haar feature-based cascade classifiers 来检测图像中的人脸。OpenCV 提供了很多预训练的人脸检测模型,它们以XML文件保存在 github。我们已经下载了其中一个检测模型,并且把它存储在 haarcascades 的目录中。
在如下代码单元中,我们将演示如何使用这个检测模型在样本图像中找到人脸。
End of explanation
"""
# 如果img_path路径表示的图像检测到了脸,返回"True"
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
"""
Explanation: 在使用任何一个检测模型之前,将图像转换为灰度图是常用过程。detectMultiScale 函数使用储存在 face_cascade 中的的数据,对输入的灰度图像进行分类。
在上方的代码中,faces 以 numpy 数组的形式,保存了识别到的面部信息。它其中每一行表示一个被检测到的脸,该数据包括如下四个信息:前两个元素 x、y 代表识别框左上角的 x 和 y 坐标(参照上图,注意 y 坐标的方向和我们默认的方向不同);后两个元素代表识别框在 x 和 y 轴两个方向延伸的长度 w 和 d。
写一个人脸识别器
我们可以将这个程序封装为一个函数。该函数的输入为人脸图像的路径,当图像中包含人脸时,该函数返回 True,反之返回 False。该函数定义如下所示。
End of explanation
"""
human_files_short = human_files[:100]
dog_files_short = train_files[:100]
## 请不要修改上方代码
## TODO: 基于human_files_short和dog_files_short
## 中的图像测试face_detector的表现
human_files_short_detect = 0
dog_files_short_detect = 0
for i in range(100):
if (face_detector(human_files_short[i])):
human_files_short_detect += 1
if (face_detector(dog_files_short[i])):
dog_files_short_detect += 1
print("The percentage of detecting human faces in human files is:", human_files_short_detect/human_files_short.size)
print("The percentage of detecting human faces in dog files is:", dog_files_short_detect/dog_files_short.size)
"""
Explanation: 【练习】 评估人脸检测模型
<a id='question1'></a>
问题 1:
在下方的代码块中,使用 face_detector 函数,计算:
human_files 的前100张图像中,能够检测到人脸的图像占比多少?
dog_files 的前100张图像中,能够检测到人脸的图像占比多少?
理想情况下,人图像中检测到人脸的概率应当为100%,而狗图像中检测到人脸的概率应该为0%。你会发现我们的算法并非完美,但结果仍然是可以接受的。我们从每个数据集中提取前100个图像的文件路径,并将它们存储在human_files_short和dog_files_short中。
End of explanation
"""
## (选做) TODO: 报告另一个面部检测算法在LFW数据集上的表现
### 你可以随意使用所需的代码单元数
"""
Explanation: <a id='question2'></a>
问题 2:
就算法而言,该算法成功与否的关键在于,用户能否提供含有清晰面部特征的人脸图像。
那么你认为,这样的要求在实际使用中对用户合理吗?如果你觉得不合理,你能否想到一个方法,即使图像中并没有清晰的面部特征,也能够检测到人脸?
回答:
不太合理,因为图片的来源不同,不能保证所有的图片的脸部都是清晰的。 如果脸部特征不太清晰,应对图片进行前期的预处理。
<a id='Selection1'></a>
选做:
我们建议在你的算法中使用opencv的人脸检测模型去检测人类图像,不过你可以自由地探索其他的方法,尤其是尝试使用深度学习来解决它:)。请用下方的代码单元来设计和测试你的面部监测算法。如果你决定完成这个_选做_任务,你需要报告算法在每一个数据集上的表现。
End of explanation
"""
from keras.applications.resnet50 import ResNet50
# 定义ResNet50模型
ResNet50_model = ResNet50(weights='imagenet')
"""
Explanation: <a id='step2'></a>
步骤 2: 检测狗狗
在这个部分中,我们使用预训练的 ResNet-50 模型去检测图像中的狗。下方的第一行代码就是下载了 ResNet-50 模型的网络结构参数,以及基于 ImageNet 数据集的预训练权重。
ImageNet 这目前一个非常流行的数据集,常被用来测试图像分类等计算机视觉任务相关的算法。它包含超过一千万个 URL,每一个都链接到 1000 categories 中所对应的一个物体的图像。任给输入一个图像,该 ResNet-50 模型会返回一个对图像中物体的预测结果。
End of explanation
"""
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# 用PIL加载RGB图像为PIL.Image.Image类型
img = image.load_img(img_path, target_size=(224, 224))
# 将PIL.Image.Image类型转化为格式为(224, 224, 3)的3维张量
x = image.img_to_array(img)
# 将3维张量转化为格式为(1, 224, 224, 3)的4维张量并返回
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
"""
Explanation: 数据预处理
在使用 TensorFlow 作为后端的时候,在 Keras 中,CNN 的输入是一个4维数组(也被称作4维张量),它的各维度尺寸为 (nb_samples, rows, columns, channels)。其中 nb_samples 表示图像(或者样本)的总数,rows, columns, 和 channels 分别表示图像的行数、列数和通道数。
下方的 path_to_tensor 函数实现如下将彩色图像的字符串型的文件路径作为输入,返回一个4维张量,作为 Keras CNN 输入。因为我们的输入图像是彩色图像,因此它们具有三个通道( channels 为 3)。
该函数首先读取一张图像,然后将其缩放为 224×224 的图像。
随后,该图像被调整为具有4个维度的张量。
对于任一输入图像,最后返回的张量的维度是:(1, 224, 224, 3)。
paths_to_tensor 函数将图像路径的字符串组成的 numpy 数组作为输入,并返回一个4维张量,各维度尺寸为 (nb_samples, 224, 224, 3)。 在这里,nb_samples是提供的图像路径的数据中的样本数量或图像数量。你也可以将 nb_samples 理解为数据集中3维张量的个数(每个3维张量表示一个不同的图像。
End of explanation
"""
from keras.applications.resnet50 import preprocess_input, decode_predictions
def ResNet50_predict_labels(img_path):
# 返回img_path路径的图像的预测向量
img = preprocess_input(path_to_tensor(img_path))
return np.argmax(ResNet50_model.predict(img))
"""
Explanation: 基于 ResNet-50 架构进行预测
对于通过上述步骤得到的四维张量,在把它们输入到 ResNet-50 网络、或 Keras 中其他类似的预训练模型之前,还需要进行一些额外的处理:
1. 首先,这些图像的通道顺序为 RGB,我们需要重排他们的通道顺序为 BGR。
2. 其次,预训练模型的输入都进行了额外的归一化过程。因此我们在这里也要对这些张量进行归一化,即对所有图像所有像素都减去像素均值 [103.939, 116.779, 123.68](以 RGB 模式表示,根据所有的 ImageNet 图像算出)。
导入的 preprocess_input 函数实现了这些功能。如果你对此很感兴趣,可以在 这里 查看 preprocess_input的代码。
在实现了图像处理的部分之后,我们就可以使用模型来进行预测。这一步通过 predict 方法来实现,它返回一个向量,向量的第 i 个元素表示该图像属于第 i 个 ImageNet 类别的概率。这通过如下的 ResNet50_predict_labels 函数实现。
通过对预测出的向量取用 argmax 函数(找到有最大概率值的下标序号),我们可以得到一个整数,即模型预测到的物体的类别。进而根据这个 清单,我们能够知道这具体是哪个品种的狗狗。
End of explanation
"""
def dog_detector(img_path):
prediction = ResNet50_predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
"""
Explanation: 完成狗检测模型
在研究该 清单 的时候,你会注意到,狗类别对应的序号为151-268。因此,在检查预训练模型判断图像是否包含狗的时候,我们只需要检查如上的 ResNet50_predict_labels 函数是否返回一个介于151和268之间(包含区间端点)的值。
我们通过这些想法来完成下方的 dog_detector 函数,如果从图像中检测到狗就返回 True,否则返回 False。
End of explanation
"""
### TODO: 测试dog_detector函数在human_files_short和dog_files_short的表现
human_files_short_detect = 0
dog_files_short_detect = 0
for i in range(100):
if (dog_detector(human_files_short[i])):
human_files_short_detect += 1
if (dog_detector(dog_files_short[i])):
dog_files_short_detect += 1
print("The percentage of detecting dogs in human files is:", human_files_short_detect/human_files_short.size)
print("The percentage of detecting dogs in dog files is:", dog_files_short_detect/dog_files_short.size)
"""
Explanation: 【作业】评估狗狗检测模型
<a id='question3'></a>
问题 3:
在下方的代码块中,使用 dog_detector 函数,计算:
human_files_short中图像检测到狗狗的百分比?
dog_files_short中图像检测到狗狗的百分比?
End of explanation
"""
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Keras中的数据预处理过程
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
"""
Explanation: <a id='step3'></a>
步骤 3: 从头开始创建一个CNN来分类狗品种
现在我们已经实现了一个函数,能够在图像中识别人类及狗狗。但我们需要更进一步的方法,来对狗的类别进行识别。在这一步中,你需要实现一个卷积神经网络来对狗的品种进行分类。你需要__从头实现__你的卷积神经网络(在这一阶段,你还不能使用迁移学习),并且你需要达到超过1%的测试集准确率。在本项目的步骤五种,你还有机会使用迁移学习来实现一个准确率大大提高的模型。
在添加卷积层的时候,注意不要加上太多的(可训练的)层。更多的参数意味着更长的训练时间,也就是说你更可能需要一个 GPU 来加速训练过程。万幸的是,Keras 提供了能够轻松预测每次迭代(epoch)花费时间所需的函数。你可以据此推断你算法所需的训练时间。
值得注意的是,对狗的图像进行分类是一项极具挑战性的任务。因为即便是一个正常人,也很难区分布列塔尼犬和威尔士史宾格犬。
布列塔尼犬(Brittany) | 威尔士史宾格犬(Welsh Springer Spaniel)
- | -
<img src="images/Brittany_02625.jpg" width="100"> | <img src="images/Welsh_springer_spaniel_08203.jpg" width="200">
不难发现其他的狗品种会有很小的类间差别(比如金毛寻回犬和美国水猎犬)。
金毛寻回犬(Curly-Coated Retriever) | 美国水猎犬(American Water Spaniel)
- | -
<img src="images/Curly-coated_retriever_03896.jpg" width="200"> | <img src="images/American_water_spaniel_00648.jpg" width="200">
同样,拉布拉多犬(labradors)有黄色、棕色和黑色这三种。那么你设计的基于视觉的算法将不得不克服这种较高的类间差别,以达到能够将这些不同颜色的同类狗分到同一个品种中。
黄色拉布拉多犬(Yellow Labrador) | 棕色拉布拉多犬(Chocolate Labrador) | 黑色拉布拉多犬(Black Labrador)
- | -
<img src="images/Labrador_retriever_06457.jpg" width="150"> | <img src="images/Labrador_retriever_06455.jpg" width="240"> | <img src="images/Labrador_retriever_06449.jpg" width="220">
我们也提到了随机分类将得到一个非常低的结果:不考虑品种略有失衡的影响,随机猜测到正确品种的概率是1/133,相对应的准确率是低于1%的。
请记住,在深度学习领域,实践远远高于理论。大量尝试不同的框架吧,相信你的直觉!当然,玩得开心!
数据预处理
通过对每张图像的像素值除以255,我们对图像实现了归一化处理。
End of explanation
"""
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
model = Sequential()
### TODO: 定义你的网络架构
model.add(Conv2D(filters=16, kernel_size=2, input_shape=(224, 224, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=2, activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dense(133, activation='softmax'))
model.summary()
## 编译模型
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
"""
Explanation: 【练习】模型架构
创建一个卷积神经网络来对狗品种进行分类。在你代码块的最后,执行 model.summary() 来输出你模型的总结信息。
我们已经帮你导入了一些所需的 Python 库,如有需要你可以自行导入。如果你在过程中遇到了困难,如下是给你的一点小提示——该模型能够在5个 epoch 内取得超过1%的测试准确率,并且能在CPU上很快地训练。
<a id='question4'></a>
问题 4:
在下方的代码块中尝试使用 Keras 搭建卷积网络的架构,并回答相关的问题。
你可以尝试自己搭建一个卷积网络的模型,那么你需要回答你搭建卷积网络的具体步骤(用了哪些层)以及为什么这样搭建。
你也可以根据上图提示的步骤搭建卷积网络,那么请说明为何如上的架构能够在该问题上取得很好的表现。
回答:
我选择根据上图提示搭建卷积神经网络。首先,搭建三层卷积层可以检测更高级的特征,以达到狗狗品种分类的目的。同时,两个卷积层之间的池化层有效降低了数据的复杂度,使得训练效率得到有效提升
End of explanation
"""
from keras.callbacks import ModelCheckpoint
### TODO: 设置训练模型的epochs的数量
epochs = 5
### 不要修改下方代码
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer], verbose=1)
## 加载具有最好验证loss的模型
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
"""
Explanation: 【练习】训练模型
<a id='question5'></a>
问题 5:
在下方代码单元训练模型。使用模型检查点(model checkpointing)来储存具有最低验证集 loss 的模型。
可选题:你也可以对训练集进行 数据增强,来优化模型的表现。
End of explanation
"""
# 获取测试数据集中每一个图像所预测的狗品种的index
dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
"""
Explanation: 测试模型
在狗图像的测试数据集上试用你的模型。确保测试准确率大于1%。
End of explanation
"""
bottleneck_features = np.load('/data/bottleneck_features/DogVGG16Data.npz')
train_VGG16 = bottleneck_features['train']
valid_VGG16 = bottleneck_features['valid']
test_VGG16 = bottleneck_features['test']
"""
Explanation: <a id='step4'></a>
步骤 4: 使用一个CNN来区分狗的品种
使用 迁移学习(Transfer Learning)的方法,能帮助我们在不损失准确率的情况下大大减少训练时间。在以下步骤中,你可以尝试使用迁移学习来训练你自己的CNN。
得到从图像中提取的特征向量(Bottleneck Features)
End of explanation
"""
VGG16_model = Sequential()
VGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))
VGG16_model.add(Dense(133, activation='softmax'))
VGG16_model.summary()
## 编译模型
VGG16_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
## 训练模型
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5',
verbose=1, save_best_only=True)
VGG16_model.fit(train_VGG16, train_targets,
validation_data=(valid_VGG16, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
## 加载具有最好验证loss的模型
VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')
"""
Explanation: 模型架构
该模型使用预训练的 VGG-16 模型作为固定的图像特征提取器,其中 VGG-16 最后一层卷积层的输出被直接输入到我们的模型。我们只需要添加一个全局平均池化层以及一个全连接层,其中全连接层使用 softmax 激活函数,对每一个狗的种类都包含一个节点。
End of explanation
"""
# 获取测试数据集中每一个图像所预测的狗品种的index
VGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_targets, axis=1))/len(VGG16_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
"""
Explanation: 测试模型
现在,我们可以测试此CNN在狗图像测试数据集中识别品种的效果如何。我们在下方打印出测试准确率。
End of explanation
"""
from extract_bottleneck_features import *
def VGG16_predict_breed(img_path):
# 提取bottleneck特征
bottleneck_feature = extract_VGG16(path_to_tensor(img_path))
# 获取预测向量
predicted_vector = VGG16_model.predict(bottleneck_feature)
# 返回此模型预测的狗的品种
return dog_names[np.argmax(predicted_vector)]
"""
Explanation: 使用模型预测狗的品种
End of explanation
"""
### TODO: 从另一个预训练的CNN获取bottleneck特征
bottleneck_features = np.load('/data/bottleneck_features/DogXceptionData.npz')
train_Xception = bottleneck_features['train']
valid_Xception = bottleneck_features['valid']
test_Xception = bottleneck_features['test']
"""
Explanation: <a id='step5'></a>
步骤 5: 建立一个CNN来分类狗的品种(使用迁移学习)
现在你将使用迁移学习来建立一个CNN,从而可以从图像中识别狗的品种。你的 CNN 在测试集上的准确率必须至少达到60%。
在步骤4中,我们使用了迁移学习来创建一个使用基于 VGG-16 提取的特征向量来搭建一个 CNN。在本部分内容中,你必须使用另一个预训练模型来搭建一个 CNN。为了让这个任务更易实现,我们已经预先对目前 keras 中可用的几种网络进行了预训练:
VGG-19 bottleneck features
ResNet-50 bottleneck features
Inception bottleneck features
Xception bottleneck features
这些文件被命名为为:
Dog{network}Data.npz
其中 {network} 可以是 VGG19、Resnet50、InceptionV3 或 Xception 中的一个。选择上方网络架构中的一个,他们已经保存在目录 /data/bottleneck_features/ 中。
【练习】获取模型的特征向量
在下方代码块中,通过运行下方代码提取训练、测试与验证集相对应的bottleneck特征。
bottleneck_features = np.load('/data/bottleneck_features/Dog{network}Data.npz')
train_{network} = bottleneck_features['train']
valid_{network} = bottleneck_features['valid']
test_{network} = bottleneck_features['test']
End of explanation
"""
### TODO: 定义你的框架
# 调用Xception的预训练模型
Xception_model = Sequential()
#加一个全局平均池化层避免过拟合
Xception_model.add(GlobalAveragePooling2D(input_shape=train_Xception.shape[1:]))
#添加Dropout层避免过拟合
Xception_model.add(Dropout(0.2))
#添加133个节点的全连接层,使用softmax激活函数输出每个狗狗品种的概率
Xception_model.add(Dense(133, activation='softmax'))
Xception_model.summary()
### TODO: 编译模型
Xception_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
"""
Explanation: 【练习】模型架构
建立一个CNN来分类狗品种。在你的代码单元块的最后,通过运行如下代码输出网络的结构:
<your model's name>.summary()
<a id='question6'></a>
问题 6:
在下方的代码块中尝试使用 Keras 搭建最终的网络架构,并回答你实现最终 CNN 架构的步骤与每一步的作用,并描述你在迁移学习过程中,使用该网络架构的原因。
回答:
Xception_model = Sequential()
这一步是调用Xception的预训练模型
Xception_model.add(GlobalAveragePooling2D(input_shape=train_Resnet50.shape[1:]))
这一步添加一个全局平均池化层避免过拟合
Xception_model.add(Dropout(0.2))
这一步是添加Dropout层避免过拟合
Xception_model.add(Dense(133, activation='softmax'))
这一步添加133个节点的全连接层,使用softmax激活函数输出每个狗狗品种的概率
使用该网络架构的原因是由于Xception具有如下优点:
1.相比传统的卷积神经网络如VGG复杂度降低,需要的参数数量下降。
2.可以做到更深,不会出现梯度消失的问题。
3.优化简单,分类准确度加深由于使用更深的网络。
4.Xception在众多图像识别领域中拔得头筹。
因此,选取Xception网络可以比之前的VGG网络取得更好的预测效果。
为什么这一架构会在这一分类任务中成功?
这四个架构都是经过反复多次实验确定的,非常有效果的架构。以Inception net为例,inception net是多层特征提取器,通过分别多次同时提取特征,然后叠加,就可以学到不同层次的特征,所以效果非常好。
为什么早期(第三步 )的尝试不成功?
第三步中,第一,使用的网络在架构上,非常浅,学到的特征非常少,其次学习库非常小,上面四个网络是在Imagenet上经过大量训练在不同种类的训练集上得来的,这是这个小库无法比拟的。
End of explanation
"""
### TODO: 训练模型
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.Xception1.hdf5',
verbose=1, save_best_only=True)
history = Xception_model.fit(train_Xception, train_targets,
validation_data=(valid_Xception, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
### TODO: 加载具有最佳验证loss的模型权重
Xception_model.load_weights('saved_models/weights.best.Xception1.hdf5')
"""
Explanation: 【练习】训练模型
<a id='question7'></a>
问题 7:
在下方代码单元中训练你的模型。使用模型检查点(model checkpointing)来储存具有最低验证集 loss 的模型。
当然,你也可以对训练集进行 数据增强 以优化模型的表现,不过这不是必须的步骤。
End of explanation
"""
### TODO: 在测试集上计算分类准确率
Xception_predictions = [np.argmax(Xception_model.predict(np.expand_dims(feature, axis=0))) for feature in test_Xception]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(Xception_predictions)==np.argmax(test_targets, axis=1))/len(Xception_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
"""
Explanation: 【练习】测试模型
<a id='question8'></a>
问题 8:
在狗图像的测试数据集上试用你的模型。确保测试准确率大于60%。
End of explanation
"""
### TODO: 写一个函数,该函数将图像的路径作为输入
### 然后返回此模型所预测的狗的品种
def Xception_predict_breed(img_path):
# extract bottleneck features
bottleneck_feature = extract_Xception(path_to_tensor(img_path))
# obtain predicted vector
predicted_vector = Xception_model.predict(bottleneck_feature)
# return dog breed that is predicted by the model
return dog_names[np.argmax(predicted_vector)]
"""
Explanation: 【练习】使用模型测试狗的品种
实现一个函数,它的输入为图像路径,功能为预测对应图像的类别,输出为你模型预测出的狗类别(Affenpinscher, Afghan_hound 等)。
与步骤5中的模拟函数类似,你的函数应当包含如下三个步骤:
根据选定的模型载入图像特征(bottleneck features)
将图像特征输输入到你的模型中,并返回预测向量。注意,在该向量上使用 argmax 函数可以返回狗种类的序号。
使用在步骤0中定义的 dog_names 数组来返回对应的狗种类名称。
提取图像特征过程中使用到的函数可以在 extract_bottleneck_features.py 中找到。同时,他们应已在之前的代码块中被导入。根据你选定的 CNN 网络,你可以使用 extract_{network} 函数来获得对应的图像特征,其中 {network} 代表 VGG19, Resnet50, InceptionV3, 或 Xception 中的一个。
<a id='question9'></a>
问题 9:
End of explanation
"""
### TODO: 设计你的算法
### 自由地使用所需的代码单元数吧
from IPython.core.display import Image, display
def dog_breed_algorithm(img_path):
if dog_detector(img_path) == 1:
print("hello, dog!")
display(Image(img_path,width=200,height=200))
print("Your predicted breed is ... ")
return print(Xception_predict_breed(img_path))
elif face_detector(img_path) == 1:
print("hello, human!")
display(Image(img_path,width=200,height=200))
print("You look like a ... ")
return print(Xception_predict_breed(img_path))
else:
display(Image(img_path,width=200,height=200))
return print("Could not identify a human or dog in the chosen image. Please try again.")
"""
Explanation: <a id='step6'></a>
步骤 6: 完成你的算法
实现一个算法,它的输入为图像的路径,它能够区分图像是否包含一个人、狗或两者都不包含,然后:
如果从图像中检测到一只__狗__,返回被预测的品种。
如果从图像中检测到__人__,返回最相像的狗品种。
如果两者都不能在图像中检测到,输出错误提示。
我们非常欢迎你来自己编写检测图像中人类与狗的函数,你可以随意地使用上方完成的 face_detector 和 dog_detector 函数。你__需要__在步骤5使用你的CNN来预测狗品种。
下面提供了算法的示例输出,但你可以自由地设计自己的模型!
<a id='question10'></a>
问题 10:
在下方代码块中完成你的代码。
End of explanation
"""
## TODO: 在你的电脑上,在步骤6中,至少在6张图片上运行你的算法。
## 自由地使用所需的代码单元数吧
for i in range(1, 7):
filename = 'images/' + str(i) + '.jpg'
print('filename = ' + filename)
dog_breed_algorithm(filename)
print('\n')
"""
Explanation: <a id='step7'></a>
步骤 7: 测试你的算法
在这个部分中,你将尝试一下你的新算法!算法认为__你__看起来像什么类型的狗?如果你有一只狗,它可以准确地预测你的狗的品种吗?如果你有一只猫,它会将你的猫误判为一只狗吗?
上传方式:点击左上角的Jupyter回到上级菜单,你可以看到Jupyter Notebook的右上方会有Upload按钮。
<a id='question11'></a>
问题 11:
在下方编写代码,用至少6张现实中的图片来测试你的算法。你可以使用任意照片,不过请至少使用两张人类图片(要征得当事人同意哦)和两张狗的图片。
同时请回答如下问题:
输出结果比你预想的要好吗 :) ?或者更糟 :( ?
提出至少三点改进你的模型的想法。
1.结果比我预想的好。该算法可以准确识别出图片中是否含有狗或者人
2. 1)对训练集进行数据增强以优化模型的表现
2)优化神经网络结构
3)增大数据集数据
End of explanation
"""
|
khalibartan/pgmpy | examples/Gaussian Bayesian Networks (GBNs).ipynb | mit | # from pgmpy.factors.continuous import LinearGaussianCPD
import sys
import numpy as np
import pgmpy
sys.path.insert(0, "../pgmpy/")
from pgmpy.factors.continuous import LinearGaussianCPD
mu = np.array([7, 13])
sigma = np.array([[4 , 3],
[3 , 6]])
cpd = LinearGaussianCPD('Y', evidence_mean = mu, evidence_variance=sigma, evidence=['X1', 'X2'])
cpd.variable, cpd.evidence
#### import numpy as np
%matplotlib inline
import pandas as pd
import seaborn as sns
import numpy as np
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import multivariate_normal
from matplotlib import pyplot
# Obtain the X and Y which are jointly gaussian from the distribution
mu_x = np.array([7, 13])
sigma_x = np.array([[4 , 3],
[3 , 6]])
# Variables
states = ['X1', 'X2']
# Generate samples from the distribution
X_Norm = multivariate_normal(mean=mu_x, cov=sigma_x)
X_samples = X_Norm.rvs(size=10000)
X_df = pd.DataFrame(X_samples, columns=states)
# Generate
X_df['P_X'] = X_df.apply(X_Norm.pdf, axis=1)
X_df.head()
g = sns.jointplot(X_df['X1'], X_df['X2'], kind="kde", height=10, space=0)
"""
Explanation: <img src="images/mcg.jpg", style="width: 100px">
Linear Gaussian Bayesian Networks (GBNs)
Generate $x_1$ $x_2$ and $Y$ from a Multivariate Gaussian Distribution with a Mean and a Variance.
What if the inputs to the linear regression were correlated? This often happens in linear dynamical systems. Linear Gaussian Models are useful for modeling probabilistic PCA, factor analysis and linear dynamical systems. Linear Dynamical Systems have variety of uses such as tracking of moving objects. This is an area where Signal Processing methods have a high overlap with Machine Learning methods. When the problem is treated as a state-space problem with added stochasticity, then the future samples depend on the past. The latent parameters, $\beta_i$ where $i \in [1,...,k]$ provide a linear combination of the univariate gaussian distributions as shown in the figure.
<img src="images/gbn.png", style="width: 400px">
The observed variable, $y_{jx}$ can be described as a sample that is drawn from the conditional distribution:
$$\mathcal{N}(y_{jx} | \sum_{i=1}^k \beta_i^T x_i + \beta_0; \sigma^2)$$
The latent parameters $\beta_is$ and $\sigma^2$ need to be determined.
End of explanation
"""
beta_vec = np.array([.7, .3])
beta_0 = 2
sigma_c = 4
def genYX(x):
x = [x['X1'], x['X2']]
var_mean = np.dot(beta_vec.transpose(), x) + beta_0
Yx_sample = np.random.normal(var_mean, sigma_c, 1)
return Yx_sample[0]
X_df['(Y|X)'] = X_df.apply(genYX, axis=1)
X_df.head()
sns.distplot(X_df['(Y|X)'])
# X_df.to_csv('gbn_values.csv', index=False)
cpd.fit(X_df, states=['(Y|X)', 'X1', 'X2'], estimator='MLE')
"""
Explanation: Linear Gaussian Models - The Process
The linear gaussian model in supervised learning scheme is nothing but a linear regression where inputs are drawn from a jointly gaussian distribution.
Determining the Latent Parameters via Maximum Likelihood Estimation (MLE)
The samples drawn from the conditional linear gaussian distributions are observed as:
$$ p(Y|X) = \cfrac{1}{\sqrt(2\pi\sigma_c^2} \times exp(\cfrac{(\sum_{i=1}^k \beta_i^T x_i + \beta_0 - x[m])^2}{2\sigma^2})$$
Taking log,
$$ log(p(Y|X)) = (\sum_{i=1}^k[-\cfrac{1}{2}log(2\pi\sigma^2) - \cfrac{1}{2\sigma^2}( \beta_i^T x_i + \beta_0 - x[m])^2)]$$
Differentiating w.r.t $\beta_i$, we can get k+1 linear equations as shown below:
The Condtional Distribution p(Y|X)
<img src="images/lgm.png", style="width: 700px">
The betas can easily be estimated by inverting the coefficient matrix and multiplying it to the right-hand side.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.20/_downloads/6684371ec2bc8e72513b3bdbec0d3a9f/plot_20_events_from_raw.ipynb | bsd-3-clause | import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60).load_data()
"""
Explanation: Parsing events from raw data
This tutorial describes how to read experimental events from raw recordings,
and how to convert between the two different representations of events within
MNE-Python (Events arrays and Annotations objects).
:depth: 1
In the introductory tutorial <overview-tut-events-section> we saw an
example of reading experimental events from a :term:"STIM" channel <stim
channel>; here we'll discuss :term:events and :term:annotations more
broadly, give more detailed information about reading from STIM channels, and
give an example of reading events that are in a marker file or included in the
data file as an embedded array. The tutorials tut-event-arrays and
tut-annotate-raw discuss how to plot, combine, load, save, and
export :term:events and :class:~mne.Annotations (respectively), and the
latter tutorial also covers interactive annotation of :class:~mne.io.Raw
objects.
We'll begin by loading the Python modules we need, and loading the same
example data <sample-dataset> we used in the introductory tutorial
<tut-overview>, but to save memory we'll crop the :class:~mne.io.Raw object
to just 60 seconds before loading it into RAM:
End of explanation
"""
raw.copy().pick_types(meg=False, stim=True).plot(start=3, duration=6)
"""
Explanation: The Events and Annotations data structures
Generally speaking, both the Events and :class:~mne.Annotations data
structures serve the same purpose: they provide a mapping between times
during an EEG/MEG recording and a description of what happened at those
times. In other words, they associate a when with a what. The main
differences are:
Units: the Events data structure represents the when in terms of
samples, whereas the :class:~mne.Annotations data structure represents
the when in seconds.
Limits on the description: the Events data structure represents the
what as an integer "Event ID" code, whereas the
:class:~mne.Annotations data structure represents the what as a
string.
How duration is encoded: Events in an Event array do not have a
duration (though it is possible to represent duration with pairs of
onset/offset events within an Events array), whereas each element of an
:class:~mne.Annotations object necessarily includes a duration (though
the duration can be zero if an instantaneous event is desired).
Internal representation: Events are stored as an ordinary
:class:NumPy array <numpy.ndarray>, whereas :class:~mne.Annotations is
a :class:list-like class defined in MNE-Python.
What is a STIM channel?
A :term:stim channel (short for "stimulus channel") is a channel that does
not receive signals from an EEG, MEG, or other sensor. Instead, STIM channels
record voltages (usually short, rectangular DC pulses of fixed magnitudes
sent from the experiment-controlling computer) that are time-locked to
experimental events, such as the onset of a stimulus or a button-press
response by the subject (those pulses are sometimes called TTL_ pulses,
event pulses, trigger signals, or just "triggers"). In other cases, these
pulses may not be strictly time-locked to an experimental event, but instead
may occur in between trials to indicate the type of stimulus (or experimental
condition) that is about to occur on the upcoming trial.
The DC pulses may be all on one STIM channel (in which case different
experimental events or trial types are encoded as different voltage
magnitudes), or they may be spread across several channels, in which case the
channel(s) on which the pulse(s) occur can be used to encode different events
or conditions. Even on systems with multiple STIM channels, there is often
one channel that records a weighted sum of the other STIM channels, in such a
way that voltage levels on that channel can be unambiguously decoded as
particular event types. On older Neuromag systems (such as that used to
record the sample data) this "summation channel" was typically STI 014;
on newer systems it is more commonly STI101. You can see the STIM
channels in the raw data file here:
End of explanation
"""
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5]) # show the first 5
"""
Explanation: You can see that STI 014 (the summation channel) contains pulses of
different magnitudes whereas pulses on other channels have consistent
magnitudes. You can also see that every time there is a pulse on one of the
other STIM channels, there is a corresponding pulse on STI 014.
.. TODO: somewhere in prev. section, link out to a table of which systems
have STIM channels vs. which have marker files or embedded event arrays
(once such a table has been created).
Converting a STIM channel signal to an Events array
If your data has events recorded on a STIM channel, you can convert them into
an events array using :func:mne.find_events. The sample number of the onset
(or offset) of each pulse is recorded as the event time, the pulse magnitudes
are converted into integers, and these pairs of sample numbers plus integer
codes are stored in :class:NumPy arrays <numpy.ndarray> (usually called
"the events array" or just "the events"). In its simplest form, the function
requires only the :class:~mne.io.Raw object, and the name of the channel(s)
from which to read events:
End of explanation
"""
testing_data_folder = mne.datasets.testing.data_path()
eeglab_raw_file = os.path.join(testing_data_folder, 'EEGLAB', 'test_raw.set')
eeglab_raw = mne.io.read_raw_eeglab(eeglab_raw_file)
print(eeglab_raw.annotations)
"""
Explanation: .. sidebar:: The middle column of the Events array
MNE-Python events are actually *three* values: in between the sample
number and the integer event code is a value indicating what the event
code was on the immediately preceding sample. In practice, that value is
almost always `0`, but it can be used to detect the *endpoint* of an
event whose duration is longer than one sample. See the documentation of
:func:`mne.find_events` for more details.
If you don't provide the name of a STIM channel, :func:~mne.find_events
will first look for MNE-Python config variables <tut-configure-mne>
for variables MNE_STIM_CHANNEL, MNE_STIM_CHANNEL_1, etc. If those are
not found, channels STI 014 and STI101 are tried, followed by the
first channel with type "STIM" present in raw.ch_names. If you regularly
work with data from several different MEG systems with different STIM channel
names, setting the MNE_STIM_CHANNEL config variable may not be very
useful, but for researchers whose data is all from a single system it can be
a time-saver to configure that variable once and then forget about it.
:func:~mne.find_events has several options, including options for aligning
events to the onset or offset of the STIM channel pulses, setting the minimum
pulse duration, and handling of consecutive pulses (with no return to zero
between them). For example, you can effectively encode event duration by
passing output='step' to :func:mne.find_events; see the documentation
of :func:~mne.find_events for details. More information on working with
events arrays (including how to plot, combine, load, and save event arrays)
can be found in the tutorial tut-event-arrays.
Reading embedded events as Annotations
Some EEG/MEG systems generate files where events are stored in a separate
data array rather than as pulses on one or more STIM channels. For example,
the EEGLAB format stores events as a collection of arrays in the :file:.set
file. When reading those files, MNE-Python will automatically convert the
stored events into an :class:~mne.Annotations object and store it as the
:attr:~mne.io.Raw.annotations attribute of the :class:~mne.io.Raw object:
End of explanation
"""
print(len(eeglab_raw.annotations))
print(set(eeglab_raw.annotations.duration))
print(set(eeglab_raw.annotations.description))
print(eeglab_raw.annotations.onset[0])
"""
Explanation: The core data within an :class:~mne.Annotations object is accessible
through three of its attributes: onset, duration, and
description. Here we can see that there were 154 events stored in the
EEGLAB file, they all had a duration of zero seconds, there were two
different types of events, and the first event occurred about 1 second after
the recording began:
End of explanation
"""
events_from_annot, event_dict = mne.events_from_annotations(eeglab_raw)
print(event_dict)
print(events_from_annot[:5])
"""
Explanation: More information on working with :class:~mne.Annotations objects, including
how to add annotations to :class:~mne.io.Raw objects interactively, and how
to plot, concatenate, load, save, and export :class:~mne.Annotations
objects can be found in the tutorial tut-annotate-raw.
Converting between Events arrays and Annotations objects
Once your experimental events are read into MNE-Python (as either an Events
array or an :class:~mne.Annotations object), you can easily convert between
the two formats as needed. You might do this because, e.g., an Events array
is needed for epoching continuous data, or because you want to take advantage
of the "annotation-aware" capability of some functions, which automatically
omit spans of data if they overlap with certain annotations.
To convert an :class:~mne.Annotations object to an Events array, use the
function :func:mne.events_from_annotations on the :class:~mne.io.Raw file
containing the annotations. This function will assign an integer Event ID to
each unique element of raw.annotations.description, and will return the
mapping of descriptions to integer Event IDs along with the derived Event
array. By default, one event will be created at the onset of each annotation;
this can be modified via the chunk_duration parameter of
:func:~mne.events_from_annotations to create equally spaced events within
each annotation span (see chunk-duration, below, or see
fixed-length-events for direct creation of an Events array of
equally-spaced events).
End of explanation
"""
custom_mapping = {'rt': 77, 'square': 42}
(events_from_annot,
event_dict) = mne.events_from_annotations(eeglab_raw, event_id=custom_mapping)
print(event_dict)
print(events_from_annot[:5])
"""
Explanation: If you want to control which integers are mapped to each unique description
value, you can pass a :class:dict specifying the mapping as the
event_id parameter of :func:~mne.events_from_annotations; this
:class:dict will be returned unmodified as the event_dict.
.. TODO add this when the other tutorial is nailed down:
Note that this event_dict can be used when creating
:class:~mne.Epochs from :class:~mne.io.Raw objects, as demonstrated
in :doc:epoching_tutorial_whatever_its_name_is.
End of explanation
"""
mapping = {1: 'auditory/left', 2: 'auditory/right', 3: 'visual/left',
4: 'visual/right', 5: 'smiley', 32: 'buttonpress'}
onsets = events[:, 0] / raw.info['sfreq']
durations = np.zeros_like(onsets) # assumes instantaneous events
descriptions = [mapping[event_id] for event_id in events[:, 2]]
annot_from_events = mne.Annotations(onset=onsets, duration=durations,
description=descriptions,
orig_time=raw.info['meas_date'])
raw.set_annotations(annot_from_events)
"""
Explanation: To make the opposite conversion (from Events array to
:class:~mne.Annotations object), you can create a mapping from integer
Event ID to string descriptions, and use the :class:~mne.Annotations
constructor to create the :class:~mne.Annotations object, and use the
:meth:~mne.io.Raw.set_annotations method to add the annotations to the
:class:~mne.io.Raw object. Because the sample data <sample-dataset>
was recorded on a Neuromag system (where sample numbering starts when the
acquisition system is initiated, not when the recording is initiated), we
also need to pass in the orig_time parameter so that the onsets are
properly aligned relative to the start of recording:
End of explanation
"""
raw.plot(start=5, duration=5)
"""
Explanation: Now, the annotations will appear automatically when plotting the raw data,
and will be color-coded by their label value:
End of explanation
"""
# create the REM annotations
rem_annot = mne.Annotations(onset=[5, 41],
duration=[16, 11],
description=['REM'] * 2)
raw.set_annotations(rem_annot)
(rem_events,
rem_event_dict) = mne.events_from_annotations(raw, chunk_duration=1.5)
"""
Explanation: Making multiple events per annotation
As mentioned above, you can generate equally-spaced events from an
:class:~mne.Annotations object using the chunk_duration parameter of
:func:~mne.events_from_annotations. For example, suppose we have an
annotation in our :class:~mne.io.Raw object indicating when the subject was
in REM sleep, and we want to perform a resting-state analysis on those spans
of data. We can create an Events array with a series of equally-spaced events
within each "REM" span, and then use those events to generate (potentially
overlapping) epochs that we can analyze further.
End of explanation
"""
print(np.round((rem_events[:, 0] - raw.first_samp) / raw.info['sfreq'], 3))
"""
Explanation: Now we can check that our events indeed fall in the ranges 5-21 seconds and
41-52 seconds, and are ~1.5 seconds apart (modulo some jitter due to the
sampling frequency). Here are the event times rounded to the nearest
millisecond:
End of explanation
"""
|
jhconning/Dev-II | notebooks/lognormal.ipynb | bsd-3-clause | mystring = 'economics'
"""
Explanation: Equilibrium Size Distribution of Farms
Like many of these notebooks this one was written quickly.
Indeterminacy of size distribution with constant returns to scale technology
In an earlier analysis we described the optimal consumption and production allocations of a farm household that took product and factor prices as given. We argued that if the production function $F(T,L)$ was linear homogenous in land and labor than efficiency in allocation could be achieved when both land and labor markets were competitive and even when one of the two factor markets is shutdown. That's because by the definition of what it means to be linear homogenous we have:
$$F(\tau T,\tau L) = \tau F(T,L) $$
which means that
$$P_A F_L(\tau T,\tau L) = P_A F_L(T,L) =w $$
so any farm that operates using the same land-labor ratio will have the same marginal value product, so for any given market wage and rental the optimal scale of production cannot be pinned down, all we can determine is the optimal land-to-labor ratio. The size distribution of farms is indeterminate (but also irrelevant since there is an infinite number of efficient ways to distribute the efficient output among farms that all have access to the same technology -- and every farm makes zero profits).
If in a scenario like this we shut down the land market then the size distribution of farms becomes determined simply by however much land each household has in its endowment. Efficiency in production allocation will still be achieved however if there is a competitive labor market as households hire in or hire out labor to bring the land to labor ratio to a level that efficiently equalizes marginal value products across farms.
Non-traded skills, diseconomies of scale and determinate size distribution
Suppose that we instead had a production function $\hat F(S,T,L)$ which is linear homogenous in its three arguments $S, T$, and $L$ where $S$ is refers to a non-traded farming skill or ability, $T$ is tradable land and $L$ is tradable labor. Consider for example Cobb-Douglas function of the form:
$$\hat F(S,T,L) = S^{1-\gamma} \left [ T^\alpha L^{1-\alpha} \right ]^\gamma$$
where $\gamma$ and $\alpha$ are both numbers greater than 0 and less than or equal to 1. This production function is clearly linear homogenous in its three arguments. Suppose now however that farming skill $S$ is non-traded and each household had an endowmment of exactly $S=1$ units of farming skill. The conditional production
$$F(T,L) =\hat F(S=1,T,L)$$
$$F(T,L) = \left [ T^\alpha L^{1-\alpha} \right ]^\gamma$$
is clearly linear homogenous of degree $\gamma$ and if $\gamma$ is strictly less than one then it's a production function subject to diseconomies of scale. In contrast to a productino function subject to constant returns to scale where the marginal cost and average cost of production to a price taking firm are constant, when production is subject to diseconomies of scale the firm's marginal cost and average cost curves will both be upward sloping: as the firm doubles its inputs output less than doubles so average (and marginal) cost increases with output. This will then determine an optimal scale to the firm (where marginal cost equals the market price of output).
Note also that if there are two firms that are otherwise identical but where one firm has a larger endowment of the non-traded farming skill input, then it will be optimal for this more skilled farmer to operate at a larger scale. This can be seen very simply by noting that the marginal product of labor and capital are both augmented by farming skill.
This then means that if there is some initial distribution of farming skill $S$ across households then the equilibrium size distribution of operational farm sizes will be in some way proportional to that distribution -- farm households with higher farming skills will operate larger farms that use both more land and labor. Since the production function is homogenous in land and labor it's also homothetic and though different farms will operate at different scale they will still all operate using the same land-labor ratio in an efficient competitive allocation.
Lucas (1978) calls a model very similar to this a 'span of control model. The basic idea is that because farm management/supervision ability cannot be hired on the market due perhaps to moral hazard considerations. As a farmer attempts to use their fixed farming skill to supervise a larger and larger farm they face diseconomies of scale ($F_{ST} <0$ and $F_{SL} <0$) or rising costs. This will mean that a household that starts with a large endowment of land but only medium farming ability would find it optimal to operate a farm up to a certain scale and then lease out remaining land to the market (because the shadow return to employing land on a yet larger farm would fall below the land rental rate they can get from the market.
Lucas' (1978) ,pde; complicates things by assuming that the household has to choose between allocating its time endowment between being a pure farm manager/supervisor or being a laborer. This then leads to a partition of households: those above some threshold level of farming skill become full time farm operators who hire in labor and possibly land all those below this threshold do not operate farms and derive income only from selling labor and possibly land to the market.
It turns out that simplifying Lucas' model yields a richer model of the farm economy. In the model that follows every household has an endowment of farming skill, household labor and land, all of which will be supplied inelastically. Farming skill is independent of labor use, so the household is not forced to choose between farm supervision and work in the labor market, they can do both.
Competitive Factor Markets with no distortions
Let's first study the benchmark cases where though there is no market for the non-traded farming skill, the other two factors land and labor can be costlessly hired in or out on competitive markets, resulting in competitive efficient equilibria.
Equal distribution of farming skill across households
Consider the simplest version of the model. Every household has access to the same Cobb Douglas production technology, has $S=1$ units of farming skill and there is some distribution of the total endowment of land $\bar T$ and labor $\bar L$ across households. So tradable endowments $(\bar T_i, \bar L_i)$ follow some distribution $\Gamma$.
To fix ideas suppose that there is an integer number of farm households $\bar L$. Since every farm household has the same technology and farming skill and face the same equilibrium market factor prices $w$ and $v$ they'll choose the same optimal factor demands call them $(T^D(w,r),L^D(w,r))$. Equilibrium in the factor markets requires:
$$\bar L L^D(w,r) = \bar L$$
$$\bar L T^D(w,r) = \bar T$$
it follows that the optimal farm size will employ $L^ = 1$ worker, $T^ =\tau^ = \frac{\bar T}{\bar L}$ units of land and will produce $F \left ( 1,\tau^, 1 \right)$ units of output. Equilibrium factor prices are also then simply determined as $w^ = F_L \left ( 1,\tau^, 1 \right)$ and $r^ = F_T \left ( 1,\tau^, 1 \right)$. If the household has a land endowment $\bar T_i$ larger than (less than) this optimal farm scale $\tau^$ the household leases out (leases in) land and likewise any household has a labor endowment $\bar L_i$ larger than (less than) this optimal farm scale $L^$ hires out (hires in) labor.
Unequal distribution of farming skill across households
This simple model is easily adapted to the situation where households have access to the same technology but differ in their initial level of farm management/supervision skill. Conceptually farmers with larger non-traded farming skill will operate proportionately larger farms but the land-to-labor ratio is equalized across farms. It's quite easy to demonstrate that if the initial distribution of skills is distributed as a log-normal then so too will the distribution of optimal farm sizes. Once again the farm household will lease out (lease in) land and hire out (hire in) labor depending on whether their initial holding of the traded land endowment exceeds (falls short of) the optimal land size for their farming skill and whether their initial holding of the traded labor endowment exceeds (falls short of) the optimal labor demand for their farming skill.
If we add in a fixed cost to operating a farm then farmers of very low skill will find it unprofitable to operate a farm and will become pure laborers and lease out all land.
Depending on the initial allocation of non-traded skills and tradable inputs households the model delivers an endogenous fourfold classification of the types of engagement in this farm labor economy (the labels I use here are similar to Eswaran and Kotwal's (1986) paper):
* pure laborers: low skill households do not operate farms.
* laborer-cultivators: low-medium skill households operate small farms and also seek outside employment
* capitalist: higher skill households operate larger farms with hired labor.
In the economy described thus far, farm household decisions are separable and the initial distribution of tradable land and labor does not matter for efficiency in production.
Note that because our production function was linear homogenous in the three inputs we could shut down one of the markets -- the market for farming skill -- and still achieve efficiency in production.
We can think of the Eswaran and Kotwal (1986) model in these terms. They describe a model with land, labor and labor supervision. We've shut down the market for supervisory labor whereas they tell a more complicated story about moral hazard which at the end of the day introduces a similar type of diminishing returns to supervision. That market imperfection by itself is not enough to matter for efficiency in allocation but they then go ahead and introduce a second distortion: they impose the requirement that farms must pay for their land and labor hiring in advance of the crop but then make a farm household's access to capital proportional to it's initial holding of land (they also impose a fixed cost of operation similar to that described above).
The working capital constraint means that some farms will not be able to achieve their optimum scale of operation and the initial distribution of land will now determine both efficiency in allocation and the endogenous agrarian structure: the proportion of different types of farm operators in the economy.
Let's see a simple simulation of this model after a brief note on the code we will use to solve for equilibria.
A note on python implementation and object-oriented programming
Most of the code to run the model below is contained in a separate module geqfarm.py outside of this notebook that will be imported as any other library.
It's often said that in python 'everything is an object.' Objects have attributes and methods. 'Attributes' can be thought of as data or variables that describe the object and 'methods' can be thought of as functions that operate on object.
Take a very simple example. When in python you declare a string variable like so:
End of explanation
"""
# return the string capitalized
mystring.upper()
# count the number of occurunces of the letter 'o'
mystring.count('o')
# tell me if the string ends with the letter 'M'
mystring.endswith('M')
"""
Explanation: python treats mystring as an instance of a string object. One then has access to a long list of attributes and methods associated with this object. In a jupyter notebook if you type the variable name mystring followed by a period and then hit the tab key you will see a list of available attributes and methods. Here are a few:
End of explanation
"""
import numpy as np
from geqfarm import *
"""
Explanation: Class statements to create new objects
Let's import the geqfarm.py libary:
End of explanation
"""
myeconomy= Economy(20)
"""
Explanation: If you take a look at the code you will see how I have used class statements to create a new prototype `Economy object. An object of type Economy has attributes such as the number of households in the economy, parameters of the production function, and arrays that summarize the initial distribution of skill, land and labor across households. Once this class of object is defined one can make assignments such as the following:
End of explanation
"""
myeconomy.GAMMA
"""
Explanation: This creates myeconomy as an instance of an Economy object. Several attributes are set to default values. We can easily find out what these are. For instance this is an economy with a production function with the $\gamma$ paramter which measures the extent of homogeneity or diseconomies of scale. To find out what value it's set to we just type:
End of explanation
"""
myeconomy.GAMMA = 0.9
"""
Explanation: And we can easily change it to another value:
End of explanation
"""
myeconomy.print_params()
"""
Explanation: I've written a method to get a convenient summary of all important parameters:
End of explanation
"""
N = 5
E = Economy(N)
E.ALPHA = 0.5
E.GAMMA = 0.90
"""
Explanation: For example, the number of households is $N=20$, total land endowment and labor force are both set to 100 and $s$ is an array of skills across each of those 20 households. For the moment every household has the same skill level.
Let's work with a different simpler economy of just $N=5$ identical households. We'll call this new economy just E.
End of explanation
"""
eqnE = E.smallhold_eq([E.TBAR, E.LBAR], E.s)
"""
Explanation: I've written a simple object method which calculates a competitive equilibrium allocation for the economy. I've set it up to take as required inputs a vector of economywide endownments and an array summarizing the skill distribution. It returns a 'named-tuple' with the vector of equilibrium factor prices, and the optimal land and labor allocations on each farm.
End of explanation
"""
eqnE.w
"""
Explanation: The competitive equilibrium land rent and labor wage that clears this market are given by:
End of explanation
"""
eqnE.X
"""
Explanation: The optimal allocation of land and labor to each of the five farm skill groups is given by:
End of explanation
"""
E.s[0] = 1.2
E.s
"""
Explanation: Since every one of the 5 farm household has the same skill and there are 100 units of land and labor the optimal allocation is for every farm to operate with 20 units of land and 20 units of labor.
Suppose however we now increase the skill of only the very first household, household zero.
End of explanation
"""
eqnE = E.smallhold_eq([E.TBAR, E.LBAR], E.s)
"""
Explanation: Let's recalculate the competive equilibrium:
End of explanation
"""
eqnE.w
"""
Explanation: Since there is more skill in this economy we would expect real wages and rents to rise, as indeed they do:
End of explanation
"""
eqnE.X
"""
Explanation: Since farm household zero now has a considerably higher level of skill compare to other households it's optimal scale of operations increases and, since it bids on the market for the same factors driving up the costs to other farms, the optimal scal of operations of the other farms shrinks.
End of explanation
"""
%load_ext autoreload
%autoreload 2
import numpy as np
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = (10, 8)
np.set_printoptions(precision=4)
%matplotlib inline
"""
Explanation: Equilibria with credit market distortions
We can easily modify to model above to get a simplified version of the model used in Eswaran and Kotwal's 1996 Economic Journal paper "Access to Capital and Agrarian Production Organization."
The model is as above except that now we add in a farm-specific working capital constraint, or the requirement that farms must be able to hire their inputs up-front prior to sale of their crop.
Latifundia Economics: factor market-power distortions
End of explanation
"""
N = 5
"""
Explanation: What follows is a simple description of my (2007) 'Latifundia Economics' paper, which I used to try to explain the Latifundia-Minifundia complex of bimodal distribution of farm sizes that existed for long periods of time in many parts of historical Latin America and other parts of the world.
To understand the main arguments, consider the simplest case of a single landlord (or a cartel of landlords) surrounded by a fringe of small landowning or landless agricultural households. If the lanlord owns a large fraction of the land endowment a standard partial-equilibrium analysis of non-price discriminating monopoly suggests the landlord would drive up the rental price of land by withholding land from the lease market. In a general equilibrium setting however there is another effect: by restricting other farmers' access to land landlords also lower the marginal product of labor on those farms. This increases the supply of labor to landlord estates at any given wage increasing landlords' potential income from monopsony rents. This can lead to equilibria where landlords increase the size of their production estates scale well above efficient scale in a competitive economy. A Latifundia-Minifundia type economy can emerge in which landlords operate large estates employing overly land-intensive production techniques while a large mass of farmers operate inefficiently and labor-intensive small parcels of land and sell labor to the landlord estate(s).
Let's start with a simplified statement of the problem with just two types of households -- landlords and 'peasants' and a very simple discrete distribution of farming skills. We'll later extend this to a much more general setting.
The economy has $\overline{T}$ units of cultivable land and $\overline{L}$ households with one unit of labor each. The economy-wide land to labor ratio is therefore $\overline{t}=\overline{T}/\overline{L}$.
Household are indexed $i=1..$ $\overline{L}$ and each household has a non-traded farming skill level $s_{i}$ drawn from a known distribution $Z$. \ There is also an initial distribution of property rights over land. We'll make more flexible assumptions later but for now suffice to say that there is a group of "landlord" households (fraction $\lambda $ of the total) who together own fraction $\theta $ of the land endowment. In some scenarios below they will collude to coordinate their factor market demands and supplies as if they were a single landlord `cartel'. \
As the $\lambda \overline{L}$ landlord households own $\theta \overline{T}$ units of land, peasant households own the remaining $(1-\theta )\overline{T}$ units. The average non-landlord household therefore owns $\frac{(1-\theta )\overline{t}}{(1-\lambda )}$ units and, for the moment, all peasant households have the same initial land endowment. Under these assumptions it can be shown that the land Gini coefficient is exactly $\left[ \theta -\lambda \right] .$
A single tradable good such as corn is produced and consumed in the economy at a unity price fixed by trade with the world market. \ Households maximize utility from consumption subject to household income from farm production plus net factor sales.
All peasant households have access to the same production technology represented by a standard concave production function $\widehat{F}(T,L,s)$ assumed for now to be linearly homogenous in its three arguments: land $T,$labor $L$, and a third factor which we label $s.$This last factor is assumed to be a non-traded factor that captures farming skill or labor supervision ability$.$
In the illustrative simulations below we assume a Cobb-Douglas form
$$\widehat{F}(T,L,s)=s^{1-\gamma }\cdot \left[ T^{\alpha }L^{1-\alpha }\right] ^{\gamma }$$
Latifundia Economics (factor market power distorted equilibria)
The following contrived example helps to starkly highlight the mechanisms behind the factor-market power distorted equilibrium.
TO start simple assume that there just N=5 farmers each with a skill level normalized to 1.
End of explanation
"""
E = Economy(N) # an instance takes N length as parameter
s = np.ones(N) # skill distribution in the economy
E.ALPHA = 0.5
E.GAMMA = 0.90
"""
Explanation: Now create an economy and change a few parameters from their default. The $\gamma$ parameter which measures the degree of homogeneity in production is purposefully set very high.. We are very close to assuming constant returns to scale (but setting it just below 1 is needed to make sure the size-distribution remains determinate).
End of explanation
"""
E.TBAR,E.LBAR
"""
Explanation: The Economy has default resource endowment:
End of explanation
"""
Xc = E.smallhold_eq([E.TBAR,E.LBAR],s)
Xc
(Xrc,Xr,wc,wr) = scene_print(E,10, detail=True)
"""
Explanation: So as expected the efficient (competitive) resource allocation has every farmer operating a farm of equal unit size. The smallhold_eq method gives the competitive (efficient) allocation for a subeconomy with XE = (TE,LE) endowments and skill distribution s. It returns a named tuple with equilibrium facor prices and X=[T,L] allocations.
If XE and s are those of the entire economy then we've found the economywide competitive allocation.
End of explanation
"""
factor_plot(E,Xrc,Xr)
"""
Explanation: Thus far we've said nothing of the ownership of land or labor. Let's assume every household has one unit of labor but that the 'landlord' class (which WLOG we index to be the last skill group s[-1]) owns fraction $\theta$ of the land. Assuming a uniform distribution of households across skills every skill group has Lbar/N households, and so there are that many landlords who act as a single cartel.
The following code is useful for printing and plotting out equilibria as a function of initial landlords' land ownership share.
We pass our economy instance to the scenarios function and it solves for competitive and market-power equilibria (at different levels of theta) and prints out the results.
Let's plot the results.. THis is the classic diagram from Latifundia Economics..
End of explanation
"""
TLratio_plot(E,Xrc,Xr)
E.s[-1]=1.10
"""
Explanation: In the example above the 'landlord' farmer was in every way the same as the other farmers, the only difference being he had more land ownership (fraction $\theta$ of the total). He had the same skill parameter as every other farmer. In an efficient equilibrium his operational farm size should therefore be the same size as every other farmer. The plot above shows how monopoly power (which rises with $\theta$ allows the monopolist to distort the economy -- he withholds land from the lease market to drive up the land rental rate and, since this deprives the 'fringe' of farmers of land, lowers the marginal product of labor on each smallholder farm, increasing the smallholder labor supply to the market which pushes down the labor wage. Hence we see how at higher levels of $\theta$ the landlord expands the size of his estate and establish monopsony power wages.
A key force keeping the landlord from becoming too large is the fact that their are diseconomies of scale in production. THe landlord is expanding the scale of his operation (raising the land to labor ration on his farm in this example) earn more via distorted factor prices, but he balances off the increase in extraction from disorted wages against the cost of operating an inefficiently large farm (i.e. the cost of being too big).
Now let's see the effect of making the landlord just a little bit more 'skilled' than the others. This lowers the cost of being big. But note that it also makes him bigger at lower theta and makes what I call the 'size monopsony or 'Feenstra' effect matter more..
So let's raise the landlord farmer's productivity 10% relative to the rest of the farmers.
End of explanation
"""
(Xrc,Xr,wc,wr) = scene_print(E,10,detail=True)
factor_plot(E,Xrc,Xr)
"""
Explanation: Let's recalculate the new equilibria under the different scenarios.
End of explanation
"""
TLratio_plot(E,Xrc,Xr)
from scipy.stats import lognorm
def intLabor(s,mu,sigma,al,ak,phi,rho,gam):
val1=LaborLandRat(s,al,ak,phi,rho,gam)
val2=lognorm.pdf(s,sigma,mu)
return val1*val2
fig, ax = plt.subplots(1, 1)
s = 0.954
mean, var, skew, kurt = lognorm.stats(s, moments='mvsk')
x = np.linspace(lognorm.ppf(0.01, s), lognorm.ppf(0.99, s), 100)
ax.plot(x, lognorm.pdf(x, s),'r-', lw=5, alpha=0.6, label='lognorm pdf')
s = 0.1 # shape parameter
x = np.linspace(lognorm.ppf(0.01, s), lognorm.ppf(0.99, s), 100)
#x = np.linspace(0.1,5, 100)
loc = 0
scale = 1
fig, ax = plt.subplots(1, 1)
rv = lognorm(s, loc, scale)
ax.plot(x, rv.pdf(x), 'ko', lw=2, label='frozen pdf')
plt.show()
lognorm.ppf(0.99, s)
"""
Explanation: Given that he is more skilled than before the landlord's efficient scale of production has increased. This lowers the cost of being big. Interestingly at low $\theta$ this leads the landlord to hire less land and labor ...
End of explanation
"""
|
vzg100/Post-Translational-Modification-Prediction | .ipynb_checkpoints/Phosphorylation Sequence Tests -MLP -dbptm+ELM-filterBenchmark-checkpoint.ipynb | mit | from pred import Predictor
from pred import sequence_vector
from pred import chemical_vector
"""
Explanation: Template for test
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_s_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="S", imbalance_function=i)
y.supervised_training("mlp_adam")
y.benchmark("Data/Benchmarks/phos.csv", "S")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_s_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="S", imbalance_function=i, random_data="Data/Benchmarks/phos.csv")
x.supervised_training("mlp_adam")
x.benchmark("Data/Benchmarks/phos.csv", "S")
del x
"""
Explanation: Controlling for Random Negatve vs Sans Random in Imbalanced Techniques using S, T, and Y Phosphorylation.
Included is N Phosphorylation however no benchmarks are available, yet.
Training data is from phospho.elm and benchmarks are from dbptm.
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_Y_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="Y", imbalance_function=i)
y.supervised_training("mlp_adam")
y.benchmark("Data/Benchmarks/phos.csv", "Y")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_Y_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="Y", imbalance_function=i, random_data="Data/Benchmarks/phos.csv")
x.supervised_training("mlp_adam")
x.benchmark("Data/Benchmarks/phos.csv", "Y")
del x
"""
Explanation: Y Phosphorylation
End of explanation
"""
par = ["pass", "ADASYN", "SMOTEENN", "random_under_sample", "ncl", "near_miss"]
for i in par:
print("y", i)
y = Predictor()
y.load_data(file="Data/Training/clean_t_filtered.csv")
y.process_data(vector_function="sequence", amino_acid="T", imbalance_function=i)
y.supervised_training("mlp_adam")
y.benchmark("Data/Benchmarks/phos.csv", "T")
del y
print("x", i)
x = Predictor()
x.load_data(file="Data/Training/clean_t_filtered.csv")
x.process_data(vector_function="sequence", amino_acid="T", imbalance_function=i, random_data="Data/Benchmarks/phos.csv")
x.supervised_training("mlp_adam")
x.benchmark("Data/Benchmarks/phos.csv", "T")
del x
"""
Explanation: T Phosphorylation
End of explanation
"""
|
GoogleCloudPlatform/gcp-getting-started-lab-jp | machine_learning/cloud_ai_platform/bigquery_ml.ipynb | apache-2.0 | from google.colab import auth
auth.authenticate_user()
print('認証されました。')
"""
Explanation: <a href="https://colab.research.google.com/github/GoogleCloudPlatform/gcp-getting-started-lab-jp/blob/master/machine_learning/cloud_ai_platform/bigquery_ml.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
```
BigQuery ML で出生児の体重を予測
BigQueryの Natality データセットを使用して出生児の体重を予測します。
このデータセットには1969年から2008年までの米国の出生に関する詳細が含まれています。
BigQueryの詳細は BigQuery ドキュメント および ライブラリリファレンスドキュメントを参照してください。
事前準備
まだ作成していない場合は Google Cloud Platform プロジェクトを作成します。
課金設定 を有効にします。
BigQuery API を有効にします。
Google アカウントの認証を実行します。
下記のコードを実行すると、認証コードを取得するための画面へのリンクが表示されるので、そのリンク先へアクセスし、BigQuery への権限を持つアカウントで認証します。その後の画面で表示される認証コードをコピーして Colaboratory のテキストエリアへ入力します。
End of explanation
"""
project_id = 'your-project-id' #@param {type:"string"}
"""
Explanation: プロジェクト ID を設定します
End of explanation
"""
%%bigquery --project {project_id} data
SELECT *
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
LIMIT 500
import seaborn
from matplotlib import pyplot as plt
fg = seaborn.FacetGrid(data=data, hue='plurality', size=6,aspect=1.67)
fg = fg.map(plt.scatter, 'mother_age' ,'weight_pounds').add_legend()
fg = fg.set_axis_labels(x_var="Mother's age", y_var="Weight pounds")
_ = data.hist(column='weight_pounds',by='is_male', layout=(1,2),
sharey=True, sharex=True)
import numpy as np
x = data.gestation_weeks
y = data.weight_pounds
data.plot(kind="scatter",x="gestation_weeks",y="weight_pounds",
figsize=[10,6], ylim=0, xlim=20)
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
plt.plot(x,p(x),"r")
plt.title("Weight pounds by Gestation Weeks.")
plt.show()
"""
Explanation: データセットの確認
モデルを作成する前に、 Natality データセットの内容を確認していきましょう。
End of explanation
"""
%%bigquery --project {project_id} _df
SELECT
weight_pounds, -- 出生児の体重 (目的変数)
is_male, -- 出生児の性別
mother_age, -- 母親の年齢
plurality, -- 同時に出生した数
gestation_weeks -- 妊娠週
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
LIMIT 10
"""
Explanation: 説明変数を決定する
データセットを見てみると、出生時体重を適切に予測するために活用できそうな、いくつかの列があります。BQMLでは、すべての文字列はカテゴリ値と見なされ、すべての数値型は連続値と見なされます。
End of explanation
"""
%%bash -s "$project_id"
gcloud config set project $1
bq --location=US mk -d demo
"""
Explanation: モデルのトレーニング
予測に使用する列を選択することで、BigQueryでモデルを作成(トレーニング)することが可能になります。まず、モデルを保存するためのデータセットが必要になります。 (エラーが発生した場合は、BigQueryコンソールからデータセットを作成してください)。
End of explanation
"""
%%bigquery --project {project_id}
-- 線形モデルを作成する
CREATE or REPLACE MODEL demo.babyweight_model
OPTIONS
(
model_type='linear_reg', -- モデルの種類を指定する
input_label_cols=['weight_pounds'], -- 目的変数を指定する
data_split_method='AUTO_SPLIT' -- データ分割方法を指定する
) AS
SELECT
weight_pounds, -- 出生児の体重 (目的変数)
is_male, -- 出生児の性別
mother_age, -- 母親の年齢
plurality, -- 同時に出生した数
gestation_weeks -- 妊娠週
FROM
publicdata.samples.natality -- natality データ・セットを指定する
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0;
"""
Explanation: デモデータセットの準備が整ったら、線形回帰モデルを作成してモデルを訓練することができます。
実行には約 3分 かかります。
End of explanation
"""
%%bigquery --project {project_id}
SELECT * FROM ML.TRAINING_INFO(MODEL demo.babyweight_model)
"""
Explanation: 学習済みモデルの学習結果を確認する
End of explanation
"""
%%bigquery --project {project_id}
select * from ML.EVALUATE(MODEL demo.babyweight_model);
"""
Explanation: 学習済みモデルの性能を確認する
End of explanation
"""
%%bigquery --project {project_id}
SELECT * FROM ML.WEIGHTS(MODEL demo.babyweight_model)
"""
Explanation: 学習済みモデルのパラメータを確認する
End of explanation
"""
%%bigquery --project {project_id}
WITH temp_data AS (
SELECT
weight_pounds,
is_male,
mother_age,
plurality AS plurality,
gestation_weeks
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
LIMIT 10
)
SELECT * FROM ML.PREDICT(MODEL demo.babyweight_model,
(SELECT * FROM temp_data))
"""
Explanation: BQML モデルで予測を実行
訓練されたモデルで値を予測することが可能になりました。
ml.predict関数を利用すると、モデルの出力予測列名はpredicted_ <label_column_name>になります。
End of explanation
"""
%%bigquery --project {project_id}
SELECT
*
FROM
ml.PREDICT(MODEL demo.babyweight_model,
(SELECT
TRUE AS is_male,
28 AS mother_age,
1 AS plurality,
38 AS gestation_weeks))
"""
Explanation: 28際の母親から38週で生まれた出生児の体重を以下のように予測してみます。
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.23/_downloads/e23ed246a9a354f899dfb3ce3b06e194/10_overview.ipynb | bsd-3-clause | import os
import numpy as np
import mne
"""
Explanation: Overview of MEG/EEG analysis with MNE-Python
This tutorial covers the basic EEG/MEG pipeline for event-related analysis:
loading data, epoching, averaging, plotting, and estimating cortical activity
from sensor data. It introduces the core MNE-Python data structures
~mne.io.Raw, ~mne.Epochs, ~mne.Evoked, and ~mne.SourceEstimate, and
covers a lot of ground fairly quickly (at the expense of depth). Subsequent
tutorials address each of these topics in greater detail.
We begin by importing the necessary Python modules:
End of explanation
"""
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
"""
Explanation: Loading data
MNE-Python data structures are based around the FIF file format from
Neuromag, but there are reader functions for a wide variety of other
data formats <data-formats>. MNE-Python also has interfaces to a
variety of publicly available datasets <datasets>,
which MNE-Python can download and manage for you.
We'll start this tutorial by loading one of the example datasets (called
"sample-dataset"), which contains EEG and MEG data from one subject
performing an audiovisual experiment, along with structural MRI scans for
that subject. The mne.datasets.sample.data_path function will automatically
download the dataset if it isn't found in one of the expected locations, then
return the directory path to the dataset (see the documentation of
~mne.datasets.sample.data_path for a list of places it checks before
downloading). Note also that for this tutorial to run smoothly on our
servers, we're using a filtered and downsampled version of the data
(:file:sample_audvis_filt-0-40_raw.fif), but an unfiltered version
(:file:sample_audvis_raw.fif) is also included in the sample dataset and
could be substituted here when running the tutorial locally.
End of explanation
"""
print(raw)
print(raw.info)
"""
Explanation: By default, ~mne.io.read_raw_fif displays some information about the file
it's loading; for example, here it tells us that there are four "projection
items" in the file along with the recorded data; those are :term:SSP
projectors <projector> calculated to remove environmental noise from the MEG
signals, plus a projector to mean-reference the EEG channels; these are
discussed in the tutorial tut-projectors-background. In addition to
the information displayed during loading, you can get a glimpse of the basic
details of a ~mne.io.Raw object by printing it; even more is available by
printing its info attribute (a dictionary-like object <mne.Info> that
is preserved across ~mne.io.Raw, ~mne.Epochs, and ~mne.Evoked objects).
The info data structure keeps track of channel locations, applied
filters, projectors, etc. Notice especially the chs entry, showing that
MNE-Python detects different sensor types and handles each appropriately. See
tut-info-class for more on the ~mne.Info class.
End of explanation
"""
raw.plot_psd(fmax=50)
raw.plot(duration=5, n_channels=30)
"""
Explanation: ~mne.io.Raw objects also have several built-in plotting methods; here we
show the power spectral density (PSD) for each sensor type with
~mne.io.Raw.plot_psd, as well as a plot of the raw sensor traces with
~mne.io.Raw.plot. In the PSD plot, we'll only plot frequencies below 50 Hz
(since our data are low-pass filtered at 40 Hz). In interactive Python
sessions, ~mne.io.Raw.plot is interactive and allows scrolling, scaling,
bad channel marking, annotation, projector toggling, etc.
End of explanation
"""
# set up and fit the ICA
ica = mne.preprocessing.ICA(n_components=20, random_state=97, max_iter=800)
ica.fit(raw)
ica.exclude = [1, 2] # details on how we picked these are omitted here
ica.plot_properties(raw, picks=ica.exclude)
"""
Explanation: Preprocessing
MNE-Python supports a variety of preprocessing approaches and techniques
(maxwell filtering, signal-space projection, independent components analysis,
filtering, downsampling, etc); see the full list of capabilities in the
:mod:mne.preprocessing and :mod:mne.filter submodules. Here we'll clean
up our data by performing independent components analysis
(~mne.preprocessing.ICA); for brevity we'll skip the steps that helped us
determined which components best capture the artifacts (see
tut-artifact-ica for a detailed walk-through of that process).
End of explanation
"""
orig_raw = raw.copy()
raw.load_data()
ica.apply(raw)
# show some frontal channels to clearly illustrate the artifact removal
chs = ['MEG 0111', 'MEG 0121', 'MEG 0131', 'MEG 0211', 'MEG 0221', 'MEG 0231',
'MEG 0311', 'MEG 0321', 'MEG 0331', 'MEG 1511', 'MEG 1521', 'MEG 1531',
'EEG 001', 'EEG 002', 'EEG 003', 'EEG 004', 'EEG 005', 'EEG 006',
'EEG 007', 'EEG 008']
chan_idxs = [raw.ch_names.index(ch) for ch in chs]
orig_raw.plot(order=chan_idxs, start=12, duration=4)
raw.plot(order=chan_idxs, start=12, duration=4)
"""
Explanation: Once we're confident about which component(s) we want to remove, we pass them
as the exclude parameter and then apply the ICA to the raw signal. The
~mne.preprocessing.ICA.apply method requires the raw data to be loaded into
memory (by default it's only read from disk as-needed), so we'll use
~mne.io.Raw.load_data first. We'll also make a copy of the ~mne.io.Raw
object so we can compare the signal before and after artifact removal
side-by-side:
End of explanation
"""
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5]) # show the first 5
"""
Explanation: Detecting experimental events
The sample dataset includes several :term:"STIM" channels <stim channel>
that recorded electrical signals sent from the stimulus delivery computer (as
brief DC shifts / squarewave pulses). These pulses (often called "triggers")
are used in this dataset to mark experimental events: stimulus onset,
stimulus type, and participant response (button press). The individual STIM
channels are combined onto a single channel, in such a way that voltage
levels on that channel can be unambiguously decoded as a particular event
type. On older Neuromag systems (such as that used to record the sample data)
this summation channel was called STI 014, so we can pass that channel
name to the mne.find_events function to recover the timing and identity of
the stimulus events.
End of explanation
"""
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'buttonpress': 32}
"""
Explanation: The resulting events array is an ordinary 3-column :class:NumPy array
<numpy.ndarray>, with sample number in the first column and integer event ID
in the last column; the middle column is usually ignored. Rather than keeping
track of integer event IDs, we can provide an event dictionary that maps
the integer IDs to experimental conditions or events. In this dataset, the
mapping looks like this:
+----------+----------------------------------------------------------+
| Event ID | Condition |
+==========+==========================================================+
| 1 | auditory stimulus (tone) to the left ear |
+----------+----------------------------------------------------------+
| 2 | auditory stimulus (tone) to the right ear |
+----------+----------------------------------------------------------+
| 3 | visual stimulus (checkerboard) to the left visual field |
+----------+----------------------------------------------------------+
| 4 | visual stimulus (checkerboard) to the right visual field |
+----------+----------------------------------------------------------+
| 5 | smiley face (catch trial) |
+----------+----------------------------------------------------------+
| 32 | subject button press |
+----------+----------------------------------------------------------+
End of explanation
"""
fig = mne.viz.plot_events(events, event_id=event_dict, sfreq=raw.info['sfreq'],
first_samp=raw.first_samp)
"""
Explanation: Event dictionaries like this one are used when extracting epochs from
continuous data; the / character in the dictionary keys allows pooling
across conditions by requesting partial condition descriptors (i.e.,
requesting 'auditory' will select all epochs with Event IDs 1 and 2;
requesting 'left' will select all epochs with Event IDs 1 and 3). An
example of this is shown in the next section. There is also a convenient
~mne.viz.plot_events function for visualizing the distribution of events
across the duration of the recording (to make sure event detection worked as
expected). Here we'll also make use of the ~mne.Info attribute to get the
sampling frequency of the recording (so our x-axis will be in seconds instead
of in samples).
End of explanation
"""
reject_criteria = dict(mag=4000e-15, # 4000 fT
grad=4000e-13, # 4000 fT/cm
eeg=150e-6, # 150 µV
eog=250e-6) # 250 µV
"""
Explanation: For paradigms that are not event-related (e.g., analysis of resting-state
data), you can extract regularly spaced (possibly overlapping) spans of data
by creating events using mne.make_fixed_length_events and then proceeding
with epoching as described in the next section.
Epoching continuous data
The ~mne.io.Raw object and the events array are the bare minimum needed to
create an ~mne.Epochs object, which we create with the ~mne.Epochs class
constructor. Here we'll also specify some data quality constraints: we'll
reject any epoch where peak-to-peak signal amplitude is beyond reasonable
limits for that channel type. This is done with a rejection dictionary; you
may include or omit thresholds for any of the channel types present in your
data. The values given here are reasonable for this particular dataset, but
may need to be adapted for different hardware or recording conditions. For a
more automated approach, consider using the autoreject package_.
End of explanation
"""
epochs = mne.Epochs(raw, events, event_id=event_dict, tmin=-0.2, tmax=0.5,
reject=reject_criteria, preload=True)
"""
Explanation: We'll also pass the event dictionary as the event_id parameter (so we can
work with easy-to-pool event labels instead of the integer event IDs), and
specify tmin and tmax (the time relative to each event at which to
start and end each epoch). As mentioned above, by default ~mne.io.Raw and
~mne.Epochs data aren't loaded into memory (they're accessed from disk only
when needed), but here we'll force loading into memory using the
preload=True parameter so that we can see the results of the rejection
criteria being applied:
End of explanation
"""
conds_we_care_about = ['auditory/left', 'auditory/right',
'visual/left', 'visual/right']
epochs.equalize_event_counts(conds_we_care_about) # this operates in-place
aud_epochs = epochs['auditory']
vis_epochs = epochs['visual']
del raw, epochs # free up memory
"""
Explanation: Next we'll pool across left/right stimulus presentations so we can compare
auditory versus visual responses. To avoid biasing our signals to the left or
right, we'll use ~mne.Epochs.equalize_event_counts first to randomly sample
epochs from each condition to match the number of epochs present in the
condition with the fewest good epochs.
End of explanation
"""
aud_epochs.plot_image(picks=['MEG 1332', 'EEG 021'])
"""
Explanation: Like ~mne.io.Raw objects, ~mne.Epochs objects also have a number of
built-in plotting methods. One is ~mne.Epochs.plot_image, which shows each
epoch as one row of an image map, with color representing signal magnitude;
the average evoked response and the sensor location are shown below the
image:
End of explanation
"""
frequencies = np.arange(7, 30, 3)
power = mne.time_frequency.tfr_morlet(aud_epochs, n_cycles=2, return_itc=False,
freqs=frequencies, decim=3)
power.plot(['MEG 1332'])
"""
Explanation: <div class="alert alert-info"><h4>Note</h4><p>Both `~mne.io.Raw` and `~mne.Epochs` objects have `~mne.Epochs.get_data`
methods that return the underlying data as a
:class:`NumPy array <numpy.ndarray>`. Both methods have a ``picks``
parameter for subselecting which channel(s) to return; ``raw.get_data()``
has additional parameters for restricting the time domain. The resulting
matrices have dimension ``(n_channels, n_times)`` for `~mne.io.Raw` and
``(n_epochs, n_channels, n_times)`` for `~mne.Epochs`.</p></div>
Time-frequency analysis
The :mod:mne.time_frequency submodule provides implementations of several
algorithms to compute time-frequency representations, power spectral density,
and cross-spectral density. Here, for example, we'll compute for the auditory
epochs the induced power at different frequencies and times, using Morlet
wavelets. On this dataset the result is not especially informative (it just
shows the evoked "auditory N100" response); see here
<inter-trial-coherence> for a more extended example on a dataset with richer
frequency content.
End of explanation
"""
aud_evoked = aud_epochs.average()
vis_evoked = vis_epochs.average()
mne.viz.plot_compare_evokeds(dict(auditory=aud_evoked, visual=vis_evoked),
legend='upper left', show_sensors='upper right')
"""
Explanation: Estimating evoked responses
Now that we have our conditions in aud_epochs and vis_epochs, we can
get an estimate of evoked responses to auditory versus visual stimuli by
averaging together the epochs in each condition. This is as simple as calling
the ~mne.Epochs.average method on the ~mne.Epochs object, and then using
a function from the :mod:mne.viz module to compare the global field power
for each sensor type of the two ~mne.Evoked objects:
End of explanation
"""
aud_evoked.plot_joint(picks='eeg')
aud_evoked.plot_topomap(times=[0., 0.08, 0.1, 0.12, 0.2], ch_type='eeg')
"""
Explanation: We can also get a more detailed view of each ~mne.Evoked object using other
plotting methods such as ~mne.Evoked.plot_joint or
~mne.Evoked.plot_topomap. Here we'll examine just the EEG channels, and see
the classic auditory evoked N100-P200 pattern over dorso-frontal electrodes,
then plot scalp topographies at some additional arbitrary times:
End of explanation
"""
evoked_diff = mne.combine_evoked([aud_evoked, vis_evoked], weights=[1, -1])
evoked_diff.pick_types(meg='mag').plot_topo(color='r', legend=False)
"""
Explanation: Evoked objects can also be combined to show contrasts between conditions,
using the mne.combine_evoked function. A simple difference can be
generated by passing weights=[1, -1]. We'll then plot the difference wave
at each sensor using ~mne.Evoked.plot_topo:
End of explanation
"""
# load inverse operator
inverse_operator_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-inv.fif')
inv_operator = mne.minimum_norm.read_inverse_operator(inverse_operator_file)
# set signal-to-noise ratio (SNR) to compute regularization parameter (λ²)
snr = 3.
lambda2 = 1. / snr ** 2
# generate the source time course (STC)
stc = mne.minimum_norm.apply_inverse(vis_evoked, inv_operator,
lambda2=lambda2,
method='MNE') # or dSPM, sLORETA, eLORETA
"""
Explanation: Inverse modeling
Finally, we can estimate the origins of the evoked activity by projecting the
sensor data into this subject's :term:source space (a set of points either
on the cortical surface or within the cortical volume of that subject, as
estimated by structural MRI scans). MNE-Python supports lots of ways of doing
this (dynamic statistical parametric mapping, dipole fitting, beamformers,
etc.); here we'll use minimum-norm estimation (MNE) to generate a continuous
map of activation constrained to the cortical surface. MNE uses a linear
:term:inverse operator to project EEG+MEG sensor measurements into the
source space. The inverse operator is computed from the
:term:forward solution for this subject and an estimate of the
covariance of sensor measurements <tut_compute_covariance>. For this
tutorial we'll skip those computational steps and load a pre-computed inverse
operator from disk (it's included with the sample data
<sample-dataset>). Because this "inverse problem" is underdetermined (there
is no unique solution), here we further constrain the solution by providing a
regularization parameter specifying the relative smoothness of the current
estimates in terms of a signal-to-noise ratio (where "noise" here is akin to
baseline activity level across all of cortex).
End of explanation
"""
# path to subjects' MRI files
subjects_dir = os.path.join(sample_data_folder, 'subjects')
# plot
stc.plot(initial_time=0.1, hemi='split', views=['lat', 'med'],
subjects_dir=subjects_dir)
"""
Explanation: Finally, in order to plot the source estimate on the subject's cortical
surface we'll also need the path to the sample subject's structural MRI files
(the subjects_dir):
End of explanation
"""
|
aufziehvogel/kaggle | quora-question-pairs/notebooks/1.0-sk-initial-overview.ipynb | mit | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
df = pd.read_csv('../data/raw/train.csv')
df.head()
"""
Explanation: Initial Overview
First we want to have a look at the data.
End of explanation
"""
questions = pd.concat([df['question1'], df['question2']])
df_combined = pd.DataFrame({'question': questions})
# There seems to be some error in the loaded data, we should investigate later (some value seems to be float)
df_combined['question'] = df_combined['question'].apply(str)
df_combined['text_length'] = df_combined['question'].apply(len)
df_combined.sort_values(by='text_length', ascending=False).iloc[0]['question']
"""
Explanation: Ok, so we're getting a pretty simple input format: Row-ID, Question-ID 1 and 2, the titles for question 1 and 2 and a marker if this question is a duplicate. According to the Kaggle competition question1 and question2 are the full text of the question. So let's see if this is really full text or just the title by looking at the longest sample we have.
I am wondering if this list is fully connected between all question or just randomly and if some of those questions are in the data multiple times.
End of explanation
"""
question_ids = pd.concat([df['qid1'], df['qid2']])
df_combined = pd.Series(question_ids)
df_combined.value_counts().sort_values(ascending=False).head()
"""
Explanation: According to my knowledge with quora, this is indeed a full text question. I am just wondering, if there is no short title for this question. As far as I know, each question has a short title (and some additionally have a long description like this).
End of explanation
"""
questions = pd.concat([df['question1'], df['question2']])
df_combined = pd.Series(questions)
df_combined.value_counts().sort_values(ascending=False).head()
"""
Explanation: Yes, some of them are there multiple times, but not too often. Let's see if the IDs really match the texts.
End of explanation
"""
question_title = 'What are the best ways to lose weight?'
df[(df['question1'] == question_title) & (df['question2'] == question_title)]
"""
Explanation: We see that there seem to be some question with different ID, but the same title. If we're lucky, there is a match of those and they are in the data set as duplicates.
End of explanation
"""
ids1 = df[(df['question1'] == question_title)]['qid1'].value_counts()
ids2 = df[(df['question2'] == question_title)]['qid2'].value_counts()
ids1.add(ids2, fill_value=0)
"""
Explanation: Unfortunately, they are not. So let's at least verify the ID of the second question with this title to make sure that there is nothing wrong with our counting code.
End of explanation
"""
questions = len(pd.concat([df['qid1'], df['qid2']]).unique())
samples = len(df)
print('%d questions and %d samples' % (questions, samples))
"""
Explanation: The question IDs are fine, there are four questions with the same title, but only one of them occurs in a lot of matches in this duplicate list.
Let's finally check how many unique questions and how many samples we got.
End of explanation
"""
sns.countplot(df['is_duplicate']);
"""
Explanation: There are two questions per sample, so there will be a lot of questions which only occur a single time in the whole data set.
Number of Duplicates
Let's see how often they decided that two questions are duplicates in this dataset and how often not. This is important to make sure that our model will not be biased by the data it has seen (e.g. get an accuracy score of 99% by just betting "no", just because 99% of the training data is "no").
End of explanation
"""
|
ioannispartalas/Kaggle | WhatsCooking/whats_cooking.ipynb | gpl-3.0 | train = pd.read_json("train.json")
matplotlib.style.use('ggplot')
cuisine_group = train.groupby('cuisine')
cuisine_group.size().sort_values(ascending=True).plot.barh()
plt.show()
"""
Explanation: Let's do a quick inspection of the data by plotting the distribution of the different types of cuisines in the dataset.
End of explanation
"""
lemmatizer = WordNetLemmatizer()
train = pd.read_json("train.json")
train['ing'] = [' '.join([lemmatizer.lemmatize(preprocess(ingr)) for ingr in recette]).strip() for recette in train['ingredients']]
tfidf = TfidfVectorizer(sublinear_tf=True,max_df=0.5,ngram_range=(1,2),stop_words='english',norm='l2',binary=False)
tfidf.fit(train['ing'])
X_train = tfidf.transform(train['ing'])
y_train = train['cuisine']
# encode string labels
lenc = LabelEncoder()
lenc.fit(y_train)
y_train_enc = lenc.transform(y_train)
#power normalization
X_train.data**=0.5
normalize(X_train,copy=False)
"""
Explanation: Italian and mexican categories dominate the recipes dataset. We may want later to take this into account in order to make the problem more balanced.
We start by performing basic preprocessing and lemmatizing the words in the indredients part. Then we vectorize by using the <a href="https://en.wikipedia.org/wiki/Tf%E2%80%93idf">$td-idf$</a> representation. Note, that we use as features unigrams and bigrams.
End of explanation
"""
test = pd.read_json("test.json")
test['ing'] = [' '.join([lemmatizer.lemmatize(preprocess(ingr)) for ingr in recette]).strip() for recette in test['ingredients']]
X_test = tfidf.transform(test['ing'])
X_test.data**=0.5
normalize(X_test,copy=False)
categories = train['cuisine'].unique()
clf = LinearSVC(C=0.5,multi_class='ovr',dual=True)
crossValidateClassifier(X_train,y_train,clf)
"""
Explanation: Note that here we user power scaling which reduces further the effect of frequent terms. After the scaling we re-normalize the data. We use the square root as default value, but one should optimize this value through random search.
In the following we apply the same transformation on the test data.
End of explanation
"""
clf = LogisticRegression(C=10.0)
crossValidateClassifier(X_train,y_train,clf)
"""
Explanation: We choose Support Vector Machines in order to train the model, as they provide state-of-the-art results in text classification problems. The cross-validation gives an average of 79.19% in terms of accuracy. Let's try a logistic regression model.
End of explanation
"""
clf = LinearSVC(C=0.5,multi_class='ovr',dual=True)
test['cuisine']=train_and_test(clf,X_train,y_train,X_test)
test[['id','cuisine']].to_csv("lr_c0.5_power_norm.csv",index=False)
"""
Explanation: Accuracy is slightly smaller than SVM's. One should normally try a search (grid/random) in the parameters space for each classifier in order to select the best one.
Great, now we are ready to train the model selected and make predictions for the test set. This will give a descent score of 79.31% in the leaderboard. For my final solution I used Vowpal Wabbit with SGD as a base classifier and quadratic features which was sufficient for getting 14th place.
End of explanation
"""
|
mne-tools/mne-tools.github.io | dev/_downloads/51cca4c9f4bd40623cb6bfa890e2eb4b/20_erp_stats.ipynb | bsd-3-clause | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind
import mne
from mne.channels import find_ch_adjacency, make_1020_channel_selections
from mne.stats import spatio_temporal_cluster_test
np.random.seed(0)
# Load the data
path = mne.datasets.kiloword.data_path() / 'kword_metadata-epo.fif'
epochs = mne.read_epochs(path)
# These data are quite smooth, so to speed up processing we'll (unsafely!) just
# decimate them
epochs.decimate(4, verbose='error')
name = "NumberOfLetters"
# Split up the data by the median length in letters via the attached metadata
median_value = str(epochs.metadata[name].median())
long_words = epochs[name + " > " + median_value]
short_words = epochs[name + " < " + median_value]
"""
Explanation: Visualising statistical significance thresholds on EEG data
MNE-Python provides a range of tools for statistical hypothesis testing
and the visualisation of the results. Here, we show a few options for
exploratory and confirmatory tests - e.g., targeted t-tests, cluster-based
permutation approaches (here with Threshold-Free Cluster Enhancement);
and how to visualise the results.
The underlying data comes from :footcite:DufauEtAl2015; we contrast long vs.
short words. TFCE is described in :footcite:SmithNichols2009.
End of explanation
"""
time_windows = ((.2, .25), (.35, .45))
elecs = ["Fz", "Cz", "Pz"]
index = ['condition', 'epoch', 'time']
# display the EEG data in Pandas format (first 5 rows)
print(epochs.to_data_frame(index=index)[elecs].head())
report = "{elec}, time: {tmin}-{tmax} s; t({df})={t_val:.3f}, p={p:.3f}"
print("\nTargeted statistical test results:")
for (tmin, tmax) in time_windows:
long_df = long_words.copy().crop(tmin, tmax).to_data_frame(index=index)
short_df = short_words.copy().crop(tmin, tmax).to_data_frame(index=index)
for elec in elecs:
# extract data
A = long_df[elec].groupby("condition").mean()
B = short_df[elec].groupby("condition").mean()
# conduct t test
t, p = ttest_ind(A, B)
# display results
format_dict = dict(elec=elec, tmin=tmin, tmax=tmax,
df=len(epochs.events) - 2, t_val=t, p=p)
print(report.format(**format_dict))
"""
Explanation: If we have a specific point in space and time we wish to test, it can be
convenient to convert the data into Pandas Dataframe format. In this case,
the :class:mne.Epochs object has a convenient
:meth:mne.Epochs.to_data_frame method, which returns a dataframe.
This dataframe can then be queried for specific time windows and sensors.
The extracted data can be submitted to standard statistical tests. Here,
we conduct t-tests on the difference between long and short words.
End of explanation
"""
# Calculate adjacency matrix between sensors from their locations
adjacency, _ = find_ch_adjacency(epochs.info, "eeg")
# Extract data: transpose because the cluster test requires channels to be last
# In this case, inference is done over items. In the same manner, we could
# also conduct the test over, e.g., subjects.
X = [long_words.get_data().transpose(0, 2, 1),
short_words.get_data().transpose(0, 2, 1)]
tfce = dict(start=.4, step=.4) # ideally start and step would be smaller
# Calculate statistical thresholds
t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(
X, tfce, adjacency=adjacency,
n_permutations=100) # a more standard number would be 1000+
significant_points = cluster_pv.reshape(t_obs.shape).T < .05
print(str(significant_points.sum()) + " points selected by TFCE ...")
"""
Explanation: Absent specific hypotheses, we can also conduct an exploratory
mass-univariate analysis at all sensors and time points. This requires
correcting for multiple tests.
MNE offers various methods for this; amongst them, cluster-based permutation
methods allow deriving power from the spatio-temoral correlation structure
of the data. Here, we use TFCE.
End of explanation
"""
# We need an evoked object to plot the image to be masked
evoked = mne.combine_evoked([long_words.average(), short_words.average()],
weights=[1, -1]) # calculate difference wave
time_unit = dict(time_unit="s")
evoked.plot_joint(title="Long vs. short words", ts_args=time_unit,
topomap_args=time_unit) # show difference wave
# Create ROIs by checking channel labels
selections = make_1020_channel_selections(evoked.info, midline="12z")
# Visualize the results
fig, axes = plt.subplots(nrows=3, figsize=(8, 8))
axes = {sel: ax for sel, ax in zip(selections, axes.ravel())}
evoked.plot_image(axes=axes, group_by=selections, colorbar=False, show=False,
mask=significant_points, show_names="all", titles=None,
**time_unit)
plt.colorbar(axes["Left"].images[-1], ax=list(axes.values()), shrink=.3,
label="µV")
plt.show()
"""
Explanation: The results of these mass univariate analyses can be visualised by plotting
:class:mne.Evoked objects as images (via :class:mne.Evoked.plot_image)
and masking points for significance.
Here, we group channels by Regions of Interest to facilitate localising
effects on the head.
End of explanation
"""
|
wrightaprilm/squamates | ExploratoryNotebooks/heatmap.ipynb | mit | import pandas as pd
from pandas import *
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: This script generates a heatmap from data indicating the probability of oviparity as the root state of squamates as a function of model parameters.
End of explanation
"""
data = pd.read_csv("../Data/Heatmap/TO.csv", index_col = False, header = False)
data.columns = ['A','B','C']
data
"""
Explanation: First, let's get some data.
End of explanation
"""
h_data = data.pivot(index='A', columns='B', values='C')
h_data.ix[0.1,0.1] = .036
h_data
plt.pcolor(h_data,cmap=plt.cm.RdBu,edgecolors='k')
plt.xticks(np.arange(0.5, len(h_data.columns), 1), h_data.columns)
plt.yticks(np.arange(0.5, len(h_data.index), 1), h_data.index)
cbar = plt.colorbar()
plt.tight_layout()
plt.savefig('TO.svg', bbox_inches='tight', dpi=300)
"""
Explanation: If you're like me, generally, you store data one variable per column. This isn't ideal for heatmaps. Matplotlib's heatmap assumes data have one variable down the x-axis of the spreadsheet and one along the top, or y-axis. Pandas can melt our data to be in this format.
End of explanation
"""
|
jaimefrio/pydatabcn2017 | taking_numpy_in_stride/Taking NumPy In Stride - Student Version.ipynb | unlicense | a = np.arange(3)
type(a)
"""
Explanation: Array views and slicing
A NumPy array is an object of numpy.ndarray type:
End of explanation
"""
a = np.arange(3)
a.base is None
a[:].base is None
"""
Explanation: All ndarrays have a .base attribute.
If this attribute is not None, then the array is a view of some other object's memory, typically another ndarray.
This is a very powerful tool, because allocating memory and copying memory contents are expensive operations, but updating metadata on how to interpret some already allocated memory is cheap!
The simplest way of creating an array's view is by slicing it:
End of explanation
"""
np.info(a)
"""
Explanation: Let's look more closely at what an array's metadata looks like. NumPy provides the np.info function, which can list for us some low level attributes of an array:
End of explanation
"""
def info_for_two(one_array, another_array):
"""Prints side-by-side results of running np.info on its inputs."""
def info_as_ordered_dict(array):
"""Converts return of np.infor into an ordered dict."""
import collections
import io
buffer = io.StringIO()
np.info(array, output=buffer)
data = (
item.split(':') for item in buffer.getvalue().strip().split('\n'))
return collections.OrderedDict(
((key, value.strip()) for key, value in data))
one_dict = info_as_ordered_dict(one_array)
another_dict = info_as_ordered_dict(another_array)
name_w = max(len(name) for name in one_dict.keys())
one_w = max(len(name) for name in one_dict.values())
another_w = max(len(name) for name in another_dict.values())
output = (
f'{name:<{name_w}} : {one:>{one_w}} : {another:>{another_w}}'
for name, one, another in zip(
one_dict.keys(), one_dict.values(), another_dict.values()))
print('\n'.join(output))
"""
Explanation: By the end of the workshop you will understand what most of these mean.
But rather than listen through a lesson, you get to try and figure what they mean yourself.
To help you with that, here's a function that prints the information from two arrays side by side:
End of explanation
"""
# Your code goes here
"""
Explanation: Exercise 1.
Create a one dimensional NumPy array with a few items (consider using np.arange).
Compare the printout of np.info on your array and on slices of it (use the [start:stop:step] indexing syntax, and make sure to try steps other than one).
Do you see any patterns?
End of explanation
"""
# Your code goes here
"""
Explanation: Exercise 1 debrief
Every array has an underlying block of memory assigned to it.
When we slice an array, rather than making a copy of it, NumPy makes a view, reusing the memory block, but interpreting it differently.
Lets take a look at what NumPy did for us in the above examples, and make sense of some of the changes to info.
shape: for a one dimensional array shape is a single item tuple, equal to the total number of items in the array. You can get the shape of an array as its .shape attribute.
strides: is also a single item tuple for one-dimensional arrays, its value being the number of bytes to skip in memory to get to the next item. And yes, strides can be negative. You can get this as the .strides attribute of any array.
data pointer: this is the address in memory of the first byte of the first item of the array. Note that this doesn't have to be the same as the first byte of the underlying memory block! You rarely need to know the exact address of the data pointer, but it's part of the string representation of the arrays .data attribute.
itemsize: this isn't properly an attribute of the array, but of it's data type. It is the number of bytes that an array item takes up in memory. You can get this value from an array as the .itemsize attribute of its .dtype attribute, i.e. array.dtype.itemsize.
type: this lets us know how each array item should be interpreted e.g. for calculations. We'll talk more about this later, but you can get an array's type object through its .dtype attribute.
contiguous: this is one of several boolean flags of an array. Its meaning is a little more specific, but for now lets say it tells us whether the array items use the memory block efficiently, without leaving unused spaces between items. It's value can be checked as the .contiguous attribute of the arrays .flags attribute
Exercise 2
Take a couple or minutes to familiarize yourself with the NumPy array's attributes discussed above:
Create a small one dimensional array of your choosing.
Look at its .shape, .strides, .dtype, .flags and .data attributes.
For .dtype and .flags, store them into a separate variable, and use tab completion on those to explore their subattributes.
End of explanation
"""
# Your code goes here
"""
Explanation: A look at data types
Similarly to how we can change the shape, strides and data pointer of an array through slicing, we can change how it's items are interpreted by changing it's data type.
This is done by calling the array's .view() method, and passing it the new data type.
But before we go there, lets look a little closer at dtypes. You are hopefully familiar with the basic NumPy numerical data types:
| Type Family | NumPy Defined Types | Character Codes |
| :---: |
| boolean | np.bool | '?' |
| unsigned integers | np.uint8 - np.uint64 | 'u1', 'u2', 'u4', 'u8' |
| signed integers | np.int8 - np.int64 | 'i1', 'i2', 'i4', 'i8' |
| floating point | np.float16 - np.float128 | 'f2', 'f4', 'f8', 'f16' |
| complex | np.complex64, np.complex128 | 'c8', 'c16' |
You can create a new data type by calling its constructor, np.dtype(), with either a NumPy defined type, or the character code.
Character codes can have '<' or '>' prepended, to indicate whether the type is little or big endian. If unspecified, native encoding is used, which for all practical purposes is going to be little endian.
Exercise 3
Let's play a little with dtype views:
Create a simple array of a type you feel comfortable you understand, e.g. np.arange(4, dtype=np.uint16).
Take a view of type np.uint8 of your array. This will give you the raw byte contents of your array. Is this what you were expecting?
Take a few views of your array, with dtypes of larger itemsize, or changing the endianess of the data type. Try to predict what the output will be before running the examples.
Take a look at the wikipedia page on single precision floating point numbers, more specifically its examples of encodings. Create arrays of four np.uint8 values which, when viewed as a np.float32 give the values 1, -2, and 1/3.
End of explanation
"""
# Your code goes here
"""
Explanation: The Constructor They Don't Want You To Know About.
You typically construct your NumPy arrays using one of the many factory fuctions provided, np.array() being the most popular.
But it is also possible to call the np.ndarray object constructor directly.
You will typically not want to do this, because there are probably simpler alternatives.
But it is a great way of putting your understanding of views of arrays to the test!
You can check the full documentation, but the np.ndarray constructor takes the following arguments that we care about:
shape: the shape of the returned array,
dtype: the data type of the returned array,
buffer: an object to reuse the underlying memory from, e.g. an existing array or its .data attribute,
offset: by how many bytes to move the starting data pointer of the returned array relative to the passed buffer,
strides: the strides of the returned array.
Exercise 4
Write a function, using the np.ndarray constructor, that takes a one dimensional array and returns a reversed view of it.
End of explanation
"""
# Your code goes here
"""
Explanation: Reshaping Into Higher Dimensions
So far we have sticked to one dimensional arrays. Things get substantially more interesting when we move into higher dimensions.
One way of getting views with a different number of dimensions is by using the .reshape() method of NumPy arrays, or the equivalent np.reshape() function.
The first argument to any of the reshape functions is the new shape of the array. When providing it, keep in mind:
the total size of the array must stay unchanged, i.e. the product of the values of the new shape tuple must be equal to the product of the values of the old shape tuple.
by entering -1 for one of the new dimensions, you can have NumPy compute its value for you, but the other dimensions must be compatible with the calculated one being an integer.
.reshape() can also take an order= kwarg, which can be set to 'C' (as the programming language) or 'F' (for the Fortran programming language). This correspond to row and column major orders, respectively.
Exercise 5
Let's look at how multidimensional arrays are represented in NumPy with an exercise.
Create a small linear array with a total length that is a multiple of two different small primes, e.g. 6 = 2 * 3.
Reshape the array into a two dimensional one, starting with the default order='C'. Try both possible combinations of rows and columns, e.g. (2, 3) and (3, 2). Look at the resulting arrays, and compare their metadata. Do you understand what's going on?
Try the same reshaping with order='F'. Can you see what the differences are?
If you feel confident with these, give a higher dimensional array a try.
End of explanation
"""
a = np.arange(12, dtype=float)
a
a.reshape(4, 3).sum(axis=-1)
"""
Explanation: Exercise 5 debrief
As the examples show, an n-dimensional array will have an n item tuple .shape and .strides. The number of dimensions can be directly queried from the .ndim attribute.
The shape tells us how large the array is along each dimension, the strides tell us how many bytes to skip in memory to get to the next item along each dimension.
When we reshape an array using C order, a.k.a. row major order, items along higher dimensions are closer in memory. When we use Fortran orser, a.k.a. column major order, it is items along smaller dimensions that are closer.
Reshaping with a purpose
One typical use of reshaping is to apply some aggregation function to equal subdivision of an array.
Say you have, e.g. a 12 item 1D array, and would like to compute the sum of every three items. This is how this is typically accomplished:
End of explanation
"""
a.reshape(4, 3).var(axis=-1)
"""
Explanation: You can apply fancier functions than .sum(), e.g. let's compute the variance of each group:
End of explanation
"""
# Your code goes here
"""
Explanation: Exercise 6
Your turn to do a fancier reshaping: we will compute the average of a 2D array over non-overlapping rectangular patches:
Choose to small numbers m and n, e.g. 3 and 4.
Create a 2D array, with number of rows a multiple of one of those numbers, and number of columns a multiple of the other, e.g. 15 x 24.
Reshape and aggregate to create a 2D array holding the sums over non overlapping m x n tiles, e.g. a 5 x 6 array.
Hint: .sum() can take a tuple of integers as axis=, so you can do the whole thing in a single reshape from 2D to 4D, then aggregate back to 2D. If tyou find this confusing, doing two aggregations will also work.
End of explanation
"""
# Your code goes here
"""
Explanation: Rearranging dimensions
Once we have a multidimensional array, rearranging the order of its dimensions is as simple as rearranging its .shape and .tuple attributes. You could do this with np.ndarray, but it would be a pain. NumPy has a bunch of functions for doing that, but they are all watered down versions of np.transpose, which takes a tuple with the desired permutation of the array dimensions.
Exercise 7
Write a function roll_axis_to_end that takes an array and an axis, and makes that axis the last dimension of the array.
For extra credit, rewrite your function using np.ndarray.
End of explanation
"""
# Your code goes here
"""
Explanation: Playing with strides
For the rest of the workshop we are going to dome some fancy tricks with strides, to create interesting views of an existing array.
Exercise 8
Create a function to extract the diagonal of a 2-D array, using the np.ndarray constructor.
End of explanation
"""
# Your code goes here
"""
Explanation: Exercise 9
Something very interesting happens when we set a stride to zero. Give that idea some thought and then:
Create two functions, stacked_column_vector and stacked_row_vector, that take a 1D array (the vector), and an integer n, and create a 2D view of the array that stack n copies of the vector, either as columns or rows of the view.
Use this functions to create an outer_product function that takes two 1D vectors and computes their outer product.
End of explanation
"""
# Your code goes here
"""
Explanation: Exercise 10
In the last exercise we used zero strides to reuse an item more than once in the resulting view. Let's try to build on that idea:
Write a function that takes a 1D array and a window integer value, and creates a 2D view of the array, each row a view through a sliding window of size window into the original array.
Hint: There are len(array) - window + 1 such "views through a window".
Another hint: Here's a small example expected run:
>>> sliding_window(np.arange(4), 2)
[[0, 1],
[1, 2],
[2, 3]]
End of explanation
"""
from numpy.lib.stride_tricks import as_strided
np.info(as_strided)
"""
Explanation: Parting pro tip
NumPy's worst kept secret is the existence of a mostly undocumented, mostly hidden, as_strided function, that makes creating views with funny strides much easier (and also much more dangerous!) than using np.ndarray. Here's the available documentation:
End of explanation
"""
|
the-deep-learners/study-group | nn-from-scratch/MNIST-nn-SGD-flex_arch.ipynb | mit | # Import libraries
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.metrics import accuracy_score
import pickle
import sys
"""
Explanation: A neural network from first principles
The code below was adpated from the code supplied in Andrew Ng's Coursera course on machine learning. The original code was written in Matlab/Octave and in order to further my understanding and enhance my Python skills, I ported it over to Python.
To start, various libraries are imported.
End of explanation
"""
# Load data
with open('./data/pickled/xtrain.pickle', 'rb') as f:
xtrain = pickle.load(f)
with open('./data/pickled/ytrain.pickle', 'rb') as f:
ytrain = pickle.load(f)
with open('./data/pickled/xtest.pickle', 'rb') as f:
xtest = pickle.load(f)
with open('./data/pickled/ytest.pickle', 'rb') as f:
ytest = pickle.load(f)
with open('./data/pickled/xval.pickle', 'rb') as f:
xval = pickle.load(f)
with open('./data/pickled/yval.pickle', 'rb') as f:
yval = pickle.load(f)
"""
Explanation: First, we load the data. For details, please see the accompanying notebook MNIST-loader.ipynb for details.
End of explanation
"""
# Sigmoid function
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
"""
Explanation: Now let's define some useful functions for the neural network to use. First is the sigmoid activation function:
End of explanation
"""
def nn(weights,x,y):
### Initialization
n = len(x)
activations = [np.array(0) for i in range(n_layers)]
activations[0] = x
deltas = [np.array(0) for i in range(n_layers-1)]
bias = np.ones((n,1))
### Forward propagation
for w,l in zip(weights,range(1,n_layers)):
inputs = np.concatenate((bias,activations[l-1]),axis=1)
activations[l] = sigmoid(np.dot(inputs,w.T))
### Output error
deltas[-1] = activations[-1] - y
### Back propagation
for l in range(2,n_layers):
deltas[-l] = np.dot(deltas[-(l-1)],weights[-(l-1)][:,1:]) * activations[-l]*(1-activations[-l])
# Update the weights / biases
for w,l in zip(weights,range(len(layers)-1,0,-1)):
w[:,1:] = w[:,1:] * (1-rate*Lambda/n) - np.dot(deltas[-l].T,activations[-(l+1)])*rate/n
w[:,:1] -= np.sum(deltas[-l])*rate/n
return weights
"""
Explanation: The neural network
Below is the real meat of this notebook: the neural network funcion. The function, as defined, takes only one argument: theta. These are the weights as they are known in the moment when the function is called. At the start, these will be randomly initialized, however as the network learns these values will be changed and improved in order to minimize the cost function.
Let's walk through the basics. Firstly, the weights are provided in a "rolled" format, in other words instead of two matrices of weights, we have a single long list of weights. The first job is to "unroll" the weights into two matrices that can be efficiently used by the numpy matrix multiplication methods. To do this, we take the first (n+1)hidden (i.e. 40120 = 820) values, and reshape them into a 20X401 matrix. Next, we take the remaining 210 values (i.e. classes(hidden+1)) and reshape them into a 10X21 matrix. The n+1 and hidden+1* take into account the bias term, which I'll discuss below.
Forward propagation
Next, we perform forward propagation. This is relatively simple: we multiply the input values in the layer by the weights for that layer, sum the total and add the bias term, and then and apply the sigmoid function. Recall from the reading in week one:
$$
z = w.x+b
$$
$$
a = \sigma(z) = \frac{1}{(1+e^{-z})}
$$
Since w.x is the dot product, it implies the sum. Basic matrix multiplication says that when multiplying two matrices that have the same internal dimension (ie number of columns in matrix one is the same as number of rows in matrix two), each element in row i of matrix one is multiplied by each element in column i of matrix two, and all of those products are summed. This value goes into the first row of the resulting matrix. Subsequently, the same is repeated for the remaining columns of the second matrix, and the first row of the output matrix is completed. This process goes on for all remaining rows in the first matrix.
In our example, the first matrix contains MNIST images in a row-wise format. The second matrix contains the weights for each connection between the input layers and the hidden layers. So following the logic from above, the first row of the input matrix (i.e. the first image in the data set) is multiplied by each of 10 sets of weights (10 columns in the weights matrix), one for each hidden layer. Because it's matrix mulitplication, all of these products are automatically summed.
A quick note about bias
If you look at the code below (and elsewhere in this notebook) you'll find a number of n+1's and hidden+1's, etc. These account for bias, the b term in the equation above. Every time forward propagation is run, and extra column of ones is appended onto the end of the matrix (these are not a part of the actual data). When the weights are randomly initialized, they too have an extra weight included for this bias term (i.e. the dimensions are n+1Xhidden). These two values, bias in the input matrix and bias in the weights matrix, are multiplied during matrix multiplication and their product is added to the total sum for that neuron. Because the value for bias in the input matrix is always 1, the actual value of the bias is thus coded in the weights and can be learned just like a regular weight.
So to sum it all up, for each connection between a node in the input (i.e. a feature, a pixel in the image) and a node in the hidden layer, the input value is multiplied by the weight of each connection and these products for all features are added. To incorporate bias, we include an extra input value of 1 and multiply is by it's own weight. The sigmoid function is applied to this sum, and generates the value of the hidden layer for this particular data point.
Continuing on with forward propagation
Now we have the values of the hidden layer, we repeat this process once again with the weights for the connections between the hidden layer and the output layer. Nothing changes here, except for the sizes of the matrices. Recall that we had n input nodes and, say, 20 hidden layer nodes. That means we had n+1 weights (adding 1 for the bias term), so here we will have hidden+1 weights.
At the end of the second forward propagation, we will have a matrix with a row for each example in the data set and a column for each output class in the neural network (i.e. 10). The columns will contain the value the neural network determined for each class. If the network learned how to identify handwritten digits, the highest of these values will correspond with the correct output. At this point, however, our network has done no learning so we wouldn't expect anything better than a random guess (since the weights were randomly initialized!)
The cost function
Next comes the cost function. Here, we implement the cross entropy cost function with weight decay or L2 regularization. I implemented this in two lines for clarity's sake. First, the unregularized cost is determined and subsequently the regularization term is added.
Note: In the SGD version of this notebook, I removed the cost function. The scipy.optimize version required the cost to be calculated during training, however for SGD the cost is incoporated into the weight updates (or rather, its derivative w.r.t the weights/biases is incoporated) and so computing the cost each time is a waste of resources since it won't be used. Instead, I moved the cost calculation into the predict function which is 1) only called if the various monitoring parameters are set to True when training is inititated, and 2) is only calculated once per epoch, instead of on each minibatch (i.e. for 30 epochs and a mini-batch size of 10, it is calculated 30 times, instead of 15 000 times).
And finally, back propagation
First, we find the difference between the output values in the 10 classes and the real value. In this case, the real value for each of the 10 possible digits is a vector or length 10, with a 1 in the position representing the number in question and 0's everywhere else. For example, the number 3 is represented by [0 0 0 1 0 0 0 0 0 0]. Since the outputs are from sigmoid neurons, the values will be between 0 and 1 with the highest value indicating the number the model predicted. Sticking with the above example, we might expect our model to output something like [0.1 0.2 0.1 0.6 0.1 0.2 0.2 0.1 0.2 0.3]. Subtracting these two will give a measure of the error. The larger the value, the more incorrect that class prediction was.
To perform backpropagation, first we find the delta for the final layer (in this case, d3). This is simply the actual value (which is one-hot encoded) subtracted from the neural networks prediction for that value.
Next, we multiple the error from layer 3 by the weights that produced layer three's activation values. This is a matrix multiplication which automatically sums the totals. In this case, the matrices have the dimensions [3750X10] and [10X11], for a dot product of [3750X11]. We can simply perform an elementwise multiplication with the derivative of the sigmoid function with respect to the activations in layer 2 to get the error at layer 2.
Since we only have three layers here, we're done. There is no error on layer 1 since this was the input layer. We can't find the error on the raw values that are input to the network!
Now we would use these two delta values to update the weights and biases and then run the network again. Rinse and repeat until the cost function is appreciably minimized.
Wrapping this function up
As you can see, the nn function takes in a set of weights, performs forward propagation to predict the output, calculates the regularized cost using cross entropy and L2 regularization, and then performs backpropagation to determine the rate of change of the cost function with respect to the biases and weights, subsequently using the learning rate and the Lambda regularization parameter to update the weights and biases. The weights are rolled into a vector which and returned by the nn() function. We skipped past this at the beginning of the function, but you look back, you can see that the first thing done in the function is to unroll the weights and reshape them into matrices for efficient matrix multiplication during forward and backpropagation.
End of explanation
"""
def predict(weights,x,y):
### Initialization
n = len(x)
activations = [np.array(0) for i in range(n_layers)]
activations[0] = x
bias = np.ones((n,1))
### Forward propagation
for w,l in zip(weights,range(1,n_layers)):
inputs = np.concatenate((bias,activations[l-1]),axis=1)
activations[l] = sigmoid(np.dot(inputs,w.T))
# Cost function: regularized cross entropy
C = np.sum(np.nan_to_num(-y*np.log(activations[-1]) - (1-y)*(np.log(1-activations[-1]))))/n
ws_sum_squares = 0
for l in range(n_layers-1):
ws_sum_squares += np.sum(weights[l][:,1:]**2)
C += ((Lambda/(2*n))) * ws_sum_squares # Add regularization to the cost function
return np.argmax(activations[-1],axis=1),C
"""
Explanation: Next is the predict function. This function takes the learned weights and performs forward propagation through the netwwork using the x values supplied in the arguments. The effect of this is essentially to predict the output class of the given data using the weights that have been learned. We also calculate the cost here, because the actual cost value (and it's calculation) is only necessary if monitoring is set to True. Note: this function is only called by the accuracy tools at the end, and thus doesn't need to perform backpropagation or do any learning.
End of explanation
"""
def weight_init(L_in,L_out):
np.random.seed(13) # This makes testing consistent.
return np.random.normal(scale=1/np.sqrt(L_in), size=(L_out,L_in+1))
"""
Explanation: We initialize theta with a set of random weights with a standard deviation of $ 1/\sqrt{n} $
End of explanation
"""
def SGD(x,y,monitor_cost,monitor_train_acc,monitor_test_acc):
# Make list of weights arrays
weights = [np.array(0) for i in range(len(layers)-1)]
for l in range(len(layers)-1):
weights[l] = weight_init(layers[l],layers[l+1]) #[layers-1,[L_in+1,Lout]]
def shuffle(x,y):
state = np.random.get_state()
np.random.shuffle(x)
np.random.set_state(state)
np.random.shuffle(y)
return x,y
costs, test_acc, train_acc = [],[],[]
for j in range(epochs):
# Shuffle the data
x,y = shuffle(x,y)
# Seperate x,y mini-batches
mini_x = [x[k:k+minibatchsize] for k in range(0,len(x),minibatchsize)]
mini_y = [y[k:k+minibatchsize] for k in range(0,len(y),minibatchsize)]
# Iterate through pairs of mini-batches, calling nn() on each pair
for x_mini,y_mini in zip(mini_x,mini_y):
weights = nn(weights,x_mini,y_mini)
# If statements for monitoring. This ensures the predict() function isn't called unnecessarily
if monitor_cost | monitor_train_acc:
ypred, C = predict(weights,x,y)
if monitor_cost:
costs.append(C)
if monitor_train_acc:
train_acc.append(accuracy_score(np.argmax(y,axis=1),ypred))
if monitor_test_acc:
test_acc.append(accuracy_score(np.argmax(ytest,axis=1),predict(weights,xtest,ytest)[0]))
# Write progress monitor
progress = (j+1)/(epochs)*100.0
bar = 20
hashes = '#'*(int(round(progress/100*bar)))
spaces = ' '*(bar-len(hashes))
sys.stdout.write('\r[{0}] {1}%'.format(hashes + spaces, round(progress,2)))
return weights,costs,train_acc,test_acc
"""
Explanation: Stochastic gradient descent
This function handles the SGD part of the learning, and will be called later on in the script when we're ready to learn the model.
First, the function calls weight_init() to initialize the starting weights. Empty lists are created for storing the costs and accuracies over the course of learning. Next, the function loops over the number of epochs. In each loop, the x and y matrices are shuffled and divided into mini-batches. Looping through all of the mini-batches, the nn() function is called to perform forward and backpropagation and update the weights accordingly. If the monitor flags are set when calling SDG() the predict function will produce the cost and accuracies and store them in the empty lists we created earlier.
End of explanation
"""
# Model parameters
m = np.int(xtrain.shape[1]) # Number of features in each example
layers = [m, 100, 10]
n_layers = len(layers)
# Learning parameters
Lambda = 0.01
epochs = 40
minibatchsize = 50
rate = 0.3
# Train the model
weights, costs, train_acc, test_acc = SGD(xtrain,ytrain,True,True,True)
# Plot the results
# Note: don't bother calling unless the monitor parameters are set...
plot()
accuracy_score(np.argmax(yval,axis=1),predict(weights,xval,yval)[0])
"""
Explanation: Finally, we train the model
First, we specify the model parameters. Lambda is the regularization parameter, which protects against overfitting. The variable classes specifies the number of nodes in the output layer, m is the number of features in the data set (this also doubles as the number of input layers, see below), and epochs, minibatchsize and rate parameters are fairly self explanatory.
The layers variable is a list, wherin each element of the list corresponds to a layer in the network (including the input and output layers). For example, the three layer network we've been working with until now is defined by [784, 100, 10], i.e. 784 features in the input layer, 100 neurons in the single hidden layer, and 10 output neurons.
Now that all of the various elements have been coded, and the parameters have been set, we're ready to train the model using the training set, and plot the cost/accuracies.
End of explanation
"""
def plot():# Visualize the cost and accuracy
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(221)
ax.plot(np.arange(epochs), costs, "-")
ax.set_xlim([0, epochs])
ax.set_xlabel('Epoch')
ax.set_ylabel('Cost')
ax.set_title('Cost over epochs')
ax = fig.add_subplot(222)
ax.plot(np.arange(epochs), train_acc, "-",color='blue',label="Training data, final acc: "+str(train_acc[-1]))
ax.plot(np.arange(epochs), test_acc, "-",color='orange',label="Testing data, final acc: "+str(test_acc[-1]))
ax.set_xlim([0, epochs])
ax.set_xlabel('Epoch')
ax.set_ylabel('Accuracy')
plt.legend(loc='lower right')
ax.set_title('Accuracy over epochs')
plt.show()
"""
Explanation: Visualizing cost and accuracy as a function of epochs
This quick code simply plots the cost versus number of epochs and training and testing set accuracies versus number of epochs
End of explanation
"""
# Visualize the data
def drawplot(draw,x,y):
if draw:
n = x.shape[0]
idx = np.random.randint(0,n,size=100) # Make an array of random integers between 0 and n
fig, ax = plt.subplots(10, 10) # make the plots
img_size = math.sqrt(m) # Specify the image size (in these case sqrt(m) = 28)
for i in range(10):
for j in range(10):
Xi = x[idx[i*10+j],:].reshape(int(img_size), int(img_size)) # get each example and resize
ax[i,j].set_axis_off() # Turns off the axes for all the subplots for clarity
ax[i,j].imshow(Xi, aspect='auto',cmap='gray') # plots the current image in the correct position
plt.show()
drawplot(True,xtrain,ytrain)
# Interactive printer function
def printer(x,y,weights):
idx = np.random.randint(len(x),size=1)
img_size = int(math.sqrt(m))
xi = x[idx,:].reshape(img_size,img_size)
yi = predict(weights,x[idx,:],y[idx,:])[0]
plt.title('The predicted value is %i\n The true value is %i' %(yi,np.argmax(y[idx,:],axis=1)))
plt.imshow(xi, aspect='auto',cmap='gray')
plt.axis('off')
plt.show()
# Running this cell will draw a single image
# The predicted and real value for y is printed above
printer(xtest,ytest,weights)
"""
Explanation: Visualizing the handwritten numbers
Here are two quick functions to visualize the actual data. First, we randomly select 100 data points and plot them. The second function grabs a single random data point, plots the image and uses the model above to predict the output.
End of explanation
"""
|
ccasotto/rmtk | rmtk/vulnerability/mdof_to_sdof/first_mode/first_mode.ipynb | agpl-3.0 | %matplotlib inline
from rmtk.vulnerability.common import utils
from rmtk.vulnerability.mdof_to_sdof.first_mode import first_mode
pushover_file = "../../../../../rmtk_data/capacity_curves_Vb-dfloor.csv"
idealised_type = 'quadrilinear'; # 'bilinear', 'quadrilinear' or 'none'
capacity_curves = utils.read_capacity_curves(pushover_file)
[sdof_capacity_curves, sdof_idealised_capacity] = first_mode.mdof_to_sdof(capacity_curves, idealised_type)
"""
Explanation: MDOF to equivalent SDOF using the first mode only
This IPython notebook converts a pushover curve for an MDOF system into an equivalent SDOF capacity curve, considering the first mode of vibration only. The supplied pushover curve, which can be both in terms of base shear and roof displacement (Vb-droof) and base shear and floor displacements (Vb-dfloor), is transformed into an equivalent SDOF capacity curve, which is in terms of spectral acceleration and spectral displacement.
Note that this method assumes that the first mode shape Φ has been normalised to unit amplitude at the roof, i.e. Φn = 1, where n denotes the roof level.
The user has the option to derive the yielding Sa and Sd, if needed, using an idealisation of the sdof capacity curve, either bilinear or quadrilinear. To do so set the variable idealised_type to 'quadrilinear' or 'bilinear', if the idealisation is not required then set it to 'none'.
End of explanation
"""
capacity_to_save = sdof_idealised_capacity
utils.save_SdSa_capacity_curves(capacity_to_save,'../../../../../rmtk_data/capacity_curves_sdof_first_mode.csv')
if idealised_type is not 'none':
idealised_capacity = utils.idealisation(idealised_type, sdof_capacity_curves)
utils.plot_idealised_capacity(idealised_capacity, sdof_capacity_curves, idealised_type)
else:
utils.plot_capacity_curves(capacity_curves)
utils.plot_capacity_curves(sdof_capacity_curves)
"""
Explanation: Save capacity curves
Please define what capacity curve should be saved assigning the variable capacity_to_save one of the following:
1. capacity_to_save = sdof_idealised_capacity: idealised capacity curve is saved. If idealised_type was previously set to none, an error will be raised because the variable sdof_idealised_capacity is empty.
2. capacity_to_save = sdof_capacity_curves: full capacity curve is saved.
End of explanation
"""
deformed_shape_file = "../../../../../rmtk_data/ISD_Sd.csv"
[ISD_vectors, Sd_vectors] = first_mode.define_deformed_shape(capacity_curves, deformed_shape_file)
"""
Explanation: Defined deformed shape for converting ISD damage model
This function allows to define the relationship between the maximum Inter-Storey Drift (ISD) along the building height and spectral displacement. This relationship serves the purpuse of converting interstorey drift damage thresholds to spectral displacement damage threshold, if damage_model['type']=interstorey drift of the MDOF system wants to be used for the equivalent SDOF system.
If capacity_curves['type'] = 'Vb-dfloor' the relationship is extracted from the displacement at each storey, otherwise a linear relationship is assumed.
End of explanation
"""
|
william-gray/data-science-python | ML-regression/PredictingHousePrices.ipynb | mit | import os
from urllib import urlretrieve
import graphlab
# Limit number of worker processes. This preserves system memory, which prevents hosted notebooks from crashing.
graphlab.set_runtime_config('GRAPHLAB_DEFAULT_NUM_PYLAMBDA_WORKERS', 4)
URL = 'https://d396qusza40orc.cloudfront.net/phoenixassets/home_data.csv'
def get_data(filename='home_data.csv', url=URL, force_download=False):
"""Download and cache the fremont data
Parameters
----------
filename: string (optional)
location to save the data
url: string (optional)
force_download: bool (optional)
if True, force redownload of data
Returns
-------
data: graphlab SFrame. Similer to a pandas DataFrame,
but with capacity for faster analysis of larger data sets
"""
if force_download or not os.path.exists(filename):
urlretrieve(url, filename)
sf = graphlab.SFrame('home_data.csv')
return sf
#sales = get_data()
#sales.head()
"""
Explanation: Predicting house prices using Linear Regression
(See Getting Started with SFrames for setup instructions)
End of explanation
"""
sales = graphlab.SFrame('home_data.gl')
sales
"""
Explanation: Load house sales data
Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
End of explanation
"""
graphlab.canvas.set_target('ipynb')
sales.show(view='Scatter Plot', x='sqft_living', y='price')
"""
Explanation: Exploring the data
The house price is correlated with the number of square feet of living space.
End of explanation
"""
train_data, test_data = sales.random_split(.8, seed=0)
"""
Explanation: Create a simple regression model of sqft_living to price
Split data into training and testing.
We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).
End of explanation
"""
sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'])
"""
Explanation: Build the regression model using only sqft_living as a feature
End of explanation
"""
print test_data['price'].mean()
print sqft_model.evaluate(test_data)
"""
Explanation: Evaluate the simple model
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(test_data['sqft_living'], test_data['price'], '.',
test_data['sqft_living'], sqft_model.predict(test_data), '-')
"""
Explanation: RMSE of about \$255,170!
Visualize the prediction
End of explanation
"""
sqft_model.coefficients
"""
Explanation: Above: blue dots are original data, green line is the prediction from the simple regression.
Below: we can view the learned regression coefficients.
End of explanation
"""
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
sales[my_features].show()
sales.show(view='BoxWhisker Plot', x='bathrooms', y='price')
"""
Explanation: Explore other features in the data
To build a more elaborate model, we will explore using more features.
End of explanation
"""
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features,validation_set=None)
print my_features
"""
Explanation: Pull the bar at the bottom to view more of the data.
98039 is the most expensive zip code.
Build a regression model with more features
End of explanation
"""
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
"""
Explanation: Comparing the results of the simple model with adding more features
End of explanation
"""
house1 = sales[sales['id'] =='5309101200']
house1
"""
Explanation: The RMSE goes down from \$255,170 to \$179,508 with more features.
Apply learned models to predict prices of 3 houses
The first house we will use is considered an "average" house in Seattle.
End of explanation
"""
print house1['price']
print sqft_model.predict(house1)
print my_features_model.predict(house1)
"""
Explanation: <img src="http://info.kingcounty.gov/Assessor/eRealProperty/MediaHandler.aspx?Media=2916871">
End of explanation
"""
house2 = sales[sales['id']=='1925069082']
house2
"""
Explanation: In this case, the model with more features provides a worse prediction than the simpler model with only 1 feature. However, on average, the model with more features is better.
Prediction for a second, fancier house
We will now examine the predictions for a fancier house.
End of explanation
"""
print sqft_model.predict(house2)
print my_features_model.predict(house2)
"""
Explanation: <img src="https://ssl.cdn-redfin.com/photo/1/bigphoto/302/734302_0.jpg">
End of explanation
"""
bill_gates = {'bedrooms':[8],
'bathrooms':[25],
'sqft_living':[50000],
'sqft_lot':[225000],
'floors':[4],
'zipcode':['98039'],
'condition':[10],
'grade':[10],
'waterfront':[1],
'view':[4],
'sqft_above':[37500],
'sqft_basement':[12500],
'yr_built':[1994],
'yr_renovated':[2010],
'lat':[47.627606],
'long':[-122.242054],
'sqft_living15':[5000],
'sqft_lot15':[40000]}
"""
Explanation: In this case, the model with more features provides a better prediction. This behavior is expected here, because this house is more differentiated by features that go beyond its square feet of living space, especially the fact that it's a waterfront house.
Last house, super fancy
Our last house is a very large one owned by a famous Seattleite.
End of explanation
"""
print my_features_model.predict(graphlab.SFrame(bill_gates))
"""
Explanation: <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d9/Bill_gates%27_house.jpg/2560px-Bill_gates%27_house.jpg">
End of explanation
"""
Fancy_zip = sales[sales['zipcode']=='98039']
Fancy_zip['price'].mean()
sqftover2000 = sales[sales['sqft_living'] > 2000]
sqftover2000under4000 = sqftover2000[sqftover2000['sqft_living'] < 4000]
import numpy as np
advanced_features = [
'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode',
'condition', # condition of house
'grade', # measure of quality of construction
'waterfront', # waterfront property
'view', # type of view
'sqft_above', # square feet above ground
'sqft_basement', # square feet in basement
'yr_built', # the year built
'yr_renovated', # the year renovated
'lat', 'long', # the lat-long of the parcel
'sqft_living15', # average sq.ft. of 15 nearest neighbors
'sqft_lot15', # average lot size of 15 nearest neighbors
]
advanced_features_model = graphlab.linear_regression.create(train_data, target='price', features=advanced_features, validation_set=None)
print my_features_model.evaluate(test_data)['rmse'] - advanced_features_model.evaluate(test_data)['rmse']
print advanced_features_model.predict(house1)
"""
Explanation: The model predicts a price of over $13M for this house! But we expect the house to cost much more. (There are very few samples in the dataset of houses that are this fancy, so we don't expect the model to capture a perfect prediction here.)
End of explanation
"""
|
ledeprogram/algorithms | class10/donow/radhikapc_DoNow_10.ipynb | gpl-3.0 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import dateutil.parser
import pg8000
from pandas import DataFrame
from sklearn.externals.six import StringIO
import pydotplus
from sklearn import tree
from sklearn.cross_validation import train_test_split
from sklearn import metrics
conn = pg8000.connect(host="training.c1erymiua9dx.us-east-1.rds.amazonaws.com", user='dot_student', password='qgis', database='training')
cursor = conn.cursor()
cursor.execute("select column_name from information_schema.columns where table_name='winequality'")
column_list = []
for row in cursor.fetchall():
column_list.append(row[0])
column_list
database=cursor.execute("SELECT * FROM winequality")
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_sql("SELECT * FROM winequality", conn)
df.head()
df.columns
df.info()
"""
Explanation: Create a classifier to predict the wine color from wine quality attributes using this dataset: http://archive.ics.uci.edu/ml/datasets/Wine+Quality
The data is in the database we've been using
host='training.c1erymiua9dx.us-east-1.rds.amazonaws.com'
database='training'
port=5432
user='dot_student'
password='qgis'
table name = 'winequality'
Query for the data and create a numpy array
End of explanation
"""
numpyMatrix = df.as_matrix()
numpyMatrix
x = numpyMatrix[:,:11]
x
y = numpyMatrix[:,11:]
y
"""
Explanation: Split the data into features (x) and target (y, the last column in the table)
Remember you can cast the results into an numpy array and then slice out what you want
End of explanation
"""
dt = tree.DecisionTreeClassifier()
dt = dt.fit(x,y)
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.25,train_size=0.75)
dt = dt.fit(x_train,y_train)
def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confussion_matrix=True):
y_pred=clf.predict(X)
if show_accuracy:
print("Accuracy:{0:.3f}".format(metrics.accuracy_score(y, y_pred)),"\n")
if show_classification_report:
print("Classification report")
print(metrics.classification_report(y,y_pred),"\n")
if show_confussion_matrix:
print("Confusion matrix")
print(metrics.confusion_matrix(y,y_pred),"\n")
measure_performance(x_test,y_test,dt) #measure on the test data (rather than train)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True color')
plt.xlabel('Predicted color')
y_pred = dt.fit(x_train, y_train).predict(x_test)
"""
Explanation: Create a decision tree with the data
End of explanation
"""
from sklearn.cross_validation import cross_val_score
x = numpyMatrix[:,:11]
x
y = numpyMatrix[:,11]
y
scores = cross_val_score(dt,x,y,cv=10)
scores
np.mean(scores)
"""
Explanation: Run 10-fold cross validation on the model
End of explanation
"""
cursor.execute("Select * FROM winequality")
colnames = [desc[0] for desc in cursor.description]
colnames
plt.plot(dt.feature_importances_,'o')
plt.ylim(-5,10)
plt.ylim(-5,10)
"""
Explanation: If you have time, calculate the feature importance and graph based on the code in the slides from last class
Use this tip for getting the column names from your cursor object
End of explanation
"""
|
mrcinv/matpy | 01a_enacbe.ipynb | gpl-2.0 | import sympy as sym
x = sym.symbols("x") # spremenljivka x je matematični simbol
"""
Explanation: << nazaj: Uvod
Enačbe in neenačbe
V tem delu si bomo ogledali različne pristope, kako se spopademo z enačbami. Spoznali bomo nekaj dodatnih knjižnic za python: SymPy, matplotlib in SciPy.
Simbolično reševanje s SymPy
Simbolično reševanje je reševanje enačb s simboli. Ločimo ga od numeričnega reševanja enačb, pri katerem računamo z decimalnimi približki števil. Na vajah navadno uporabljamo simbolično reševanje. Enačbo, ki jo rešujemo, mrcvarimo, dokler ni zapisana v obliki, iz katere lahko preprosto razberemo njeno rešitev. V Pythonu lahko nekaj podobnega počnemo s SymPy.
Primer
Poišči vse rešitve enačbe
$$x+\frac{2}{x}=3.$$
Rešitev
Enačbo najprej pomnožimo z $x$ in preoblikujemo v polinomsko enačbo
$$ x^2+2-3x=0,$$
v kateri faktoriziramo levo stran
$$(x-2)(x-1)=0.$$
Sklepamo, da je leva stran enaka $0$, če je en od faktorjev enak $0$.
Tako dobimo dve možnosti
\begin{eqnarray}
x-2=0 & \implies & x=2\
x-1=0 & \implies & x=1.
\end{eqnarray}
Sympy
Poskusimo priti do rešitve še s Pythonom. Najprej naložimo knjižnico za simbolično računanje SymPy, nato pa deklariramo, naj se spremenljivka x obravnava kot matematični simbol.
End of explanation
"""
enacba = sym.Eq(x+2/x,3)
enacba
"""
Explanation: Za začetek povsem sledimo korakom, ki smo jih naredili „na roke“. Povzamimo „algoritem“
vse člene damo na levo stran
enačbo pomnožimo z $x$
levo stran faktoriziramo
iz faktorjev preberemo rešitev
End of explanation
"""
sym.init_printing() # lepši izpis formul
enacba
# vse člene damo na levo stran in pomnožimo z x
leva = (enacba.lhs - enacba.rhs)*x
leva
# levo stran razpišemo/zmnožimo
leva = sym.expand(leva)
leva
# levo stran faktoriziramo
leva = sym.factor(leva)
leva
"""
Explanation: Vključimo izpis formul v lepši obliki, ki ga omogoča SymPy.
End of explanation
"""
# rešitve enačbe najlažje dobimo s funkcijo solve
resitve = sym.solve(enacba)
resitve
"""
Explanation: Od tu naprej postane precej komplicirano, kako rešitve programsko izluščiti iz zadnjega rezultata. Če nas zanimajo le rešitve, lahko zgornji postopek izpustimo in preprosto uporabimo funkcijo solve.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
t = np.arange(-1,3,0.01) # zaporedje x-ov, v katerih bomo tabelirali funkcijo
leva_f = sym.lambdify(x,enacba.lhs) # lambdify iz leve strani enačbe naredi python funkcijo, ki jo uporabimo na t
desna_f = sym.lambdify(x,enacba.rhs) # podobno za desno stran (rhs - right hand side, lhs - left hand side)
plt.plot(t,leva_f(t)) # leva stran /funkcija leva_f deluje po komponentah seznama t
plt.plot(t,[desna_f(ti) for ti in t]) # funkcija desna_t je konstanta (število 3) in zato ne vrne seznama iste dolžine kot t
plt.ylim(0,5)
plt.plot(resitve,[leva_f(r) for r in resitve],'or')
plt.show()
"""
Explanation: Grafična rešitev
Rešitve enačbe si lahko predstavljamo grafično. Iščemo vrednosti $x$, pri katerih je leva stran enaka desni. Če narišemo graf leve in desne strani na isto sliko, so rešitve enačbe ravno x-koordinate presečišča obeh grafov. Za risanje grafov uporabimo knjižnico matplotlib. Graf funkcije narišemo tako, da funkcijo tabeliramo v veliko točkah. Da lažje računamo s tabelami, uporabimo tudi knjižnico numpy, ki je namenjena delu z vektorji in matrikami.
End of explanation
"""
import disqus
%reload_ext disqus
%disqus matpy
"""
Explanation: Naloga
Poišči vse rešitve enačbe
$$x^2-2=1/x.$$
Uporabi sympy.solve in grafično predstavi rešitve.
naprej: neenačbe >>
End of explanation
"""
|
mayankjohri/LetsExplorePython | Section 1 - Core Python/Chapter 12 - Introspection/Chapter14_Introspection.ipynb | gpl-3.0 | trospection or reflection is the ability of software to identify and report their own internal structures, such as types, variabl# Getting some information
# about global objects in the program
from types import ModuleType
def info(n_obj):
# Create a referênce to the object
obj = globals()[n_obj]
# Show object information
print ('Name of object:', n_obj)
print ('Identifier:', id(obj))
print ('Typo:', type(obj))
print ('Representation:', repr(obj))
# If it is a module
if isinstance(obj, ModuleType):
print( 'itens:')
for item in dir(obj):
print (item)
print
# Showing information
for n_obj in dir()[:10]: # The slice [:10] is used just to limit objects
info(n_obj)
"""
Explanation: Chapter 14: Introspection
Introspection or reflection is the ability of software to identify and report their own internal structures, such as types, variable scope, methods and attributes.
Native interpreter functions for introspection:
<table>
<tr>
<th>Function</th>
<th>Returns</th>
</tr>
<tr>
<td><code>type(object)</code></td>
<td>The typo (class) of the object</td>
</tr>
<tr>
<td><code>id(object)</code></td>
<td>object identifier</td>
</tr>
<tr>
<td><code>locals()</code></td>
<td>local variables dictionary</td>
</tr>
<tr>
<td><code>globals()</code></td>
<td>global variables dictionary</td>
</tr>
<tr>
<td><code>vars(object)</code></td>
<td>object symbols dictionary</td>
</tr>
<tr>
<td><code>len(object)</code></td>
<td>size of an object</td>
</tr>
<tr>
<td><code>dir(object)</code></td>
<td>A list of object structures</td>
</tr>
<tr>
<td><code>help(object)</code></td>
<td>Object doc strings</td>
</tr>
<tr>
<td><code>repr(object)</code></td>
<td>Object representation</td>
</tr>
<tr>
<td><code>isinstance(object, class)</code></td>
<td>True if object is derived from class</td>
</tr>
<tr>
<td><code>issubclass(subclass, class)</code></td>
<td>True if object inherits the class</td>
</tr>
</table>
The object identifier is a unique number that is used by the interpreter for identifying the objects internally.
Example:
End of explanation
"""
import types
s = ''
if isinstance(s, types.StringType):
print 's is a string.'
"""
Explanation: Python also has a module called types, which has the definitions of the basic types of the interpreter.
Example:
End of explanation
"""
import os.path
# inspect: "friendly" introspection module
import inspect
print 'Object:', inspect.getmodule(os.path)
print 'Class?', inspect.isclass(str)
# Lists all functions that exist in "os.path"
print 'Member:',
for name, struct in inspect.getmembers(os.path):
if inspect.isfunction(struct):
print name,
"""
Explanation: Through introspection, it is possible to determine the fields of a database table, for example.
Inspect
The module inspect provides a set of high-level functions that allow for introspection to investigate types, collection items, classes, functions, source code and the runtime stack of the interpreter.
Example:
End of explanation
"""
import inspect
def myself():
return inspect.stack()[1][3]
def parent_function():
return inspect.stack()[2][3]
def power(expo):
print("I am at {name}, {parent}".format(name=myself(), parent=parent_function()))
def inner(num):
print("I am at {name}, {parent}".format(name=myself(), parent=parent_function()))
return num**expo
return inner
def test_power(a, b):
p = power(a)
p(b)
d = power(10)
d(10)
test_power(10, 5)
"""
Explanation: The functions that work with the stack of the interpreter should be used with caution because it is possible to create cyclic references (a variable that points to the stack item that has the variable itself). The existence of references to stack items slows the destruction of the items by the garbage collector of the interpreter.
End of explanation
"""
|
postBG/DL_project | intro-to-tensorflow/intro_to_tensorflow.ipynb | mit | import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
"""
Explanation: <h1 align="center">TensorFlow Neural Network Lab</h1>
<img src="image/notmnist.png">
In this lab, you'll use all the tools you learned from Introduction to TensorFlow to label images of English letters! The data you are using, <a href="http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html">notMNIST</a>, consists of images of a letter from A to J in different fonts.
The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!
To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "All modules imported".
End of explanation
"""
## 이미 로컬로 파일을 다운로드 받았으므로 이제 이것은 돌리지 않아도 됨.
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
"""
Explanation: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
End of explanation
"""
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
a, b = 0.1, 0.9
min_value, max_value = 0, 255
return a + ((image_data - min_value) * (b - a)) / (max_value - min_value)
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
"""
Explanation: <img src="image/Mean_Variance_Image.png" style="height: 75%;width: 75%; position: relative; right: 5%">
Problem 1
The first problem involves normalizing the features for your training and test data.
Implement Min-Max scaling in the normalize_grayscale() function to a range of a=0.1 and b=0.9. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.
Since the raw notMNIST image data is in grayscale, the current values range from a min of 0 to a max of 255.
Min-Max Scaling:
$
X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}
$
If you're having trouble solving problem 1, you can view the solution here.
End of explanation
"""
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
"""
Explanation: Checkpoint
All your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
End of explanation
"""
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
"""
Explanation: Problem 2
Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.
<img src="image/network_diagram.png" style="height: 40%;width: 40%; position: relative; right: 10%">
For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network.
For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/resources/dims_types.html#data-types">float32</a> tensors:
- features
- Placeholder tensor for feature data (train_features/valid_features/test_features)
- labels
- Placeholder tensor for label data (train_labels/valid_labels/test_labels)
- weights
- Variable Tensor with random numbers from a truncated normal distribution.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal">tf.truncated_normal() documentation</a> for help.
- biases
- Variable Tensor with all zeros.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#zeros"> tf.zeros() documentation</a> for help.
If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available here.
End of explanation
"""
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 10
learning_rate = 0.08
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
"""
Explanation: <img src="image/Learn_Rate_Tune_Image.png" style="height: 70%;width: 70%">
Problem 3
Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.
Parameter configurations:
Configuration 1
* Epochs: 1
* Learning Rate:
* 0.8
* 0.5
* 0.1
* 0.05
* 0.01
Configuration 2
* Epochs:
* 1
* 2
* 3
* 4
* 5
* Learning Rate: 0.2
The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.
If you're having trouble solving problem 3, you can view the solution here.
End of explanation
"""
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
"""
Explanation: Best Hyper-parameters
epochs: 1, Learning Rate: 0.1 -> Validation accuracy: 0.734
epochs: 5, Learning Rate: 0.2 -> Validation accuracy: 0.760
epochs: 5, Learning Rate: 0.1 -> Validation accuracy: 0.766
Test
You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
End of explanation
"""
|
hktxt/MachineLearning | ML/Week 3 Assessment Orthogonal Projections.ipynb | gpl-3.0 | # PACKAGE: DO NOT EDIT
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import numpy as np
from sklearn.datasets import fetch_olivetti_faces, fetch_lfw_people
from ipywidgets import interact
%matplotlib inline
image_shape = (64, 64)
# Load faces data
dataset = fetch_olivetti_faces()
faces = dataset.data
"""
Explanation: Week 3 Assessment: Orthogonal Projections
Learning Objectives
In this week, we will write functions which perform orthogonal projections.
By the end of this week, you should be able to
Write code that projects data onto lower-dimensional subspaces.
Understand the real world applications of projections.
We highlight some tips and tricks which would be useful when you implement numerical
algorithms that you have never encountered before.
You are invited to think about these concepts when you
write your program.
The important thing is to learn to map from mathematical equations to code. It is not always
easy to do so, but you will get better at it with more practice.
We will apply this to project high-dimensional face images onto lower dimensional basis which we call "eigenfaces". We will also revisit the problem of linear regression, but from the perspective of solving normal equations,
the concept which you apply to derive the formula for orthogonal projections. We will apply this to predict housing
prices for the Boston housing dataset, which is a classic example in machine learning.
If you are having issues with the grader, be sure to checkout the Q&A.
If you are stuck with the programming assignments, you can visit the discussion forum and discuss with your peers.
End of explanation
"""
import numpy.testing as np_test
def test_property_projection_matrix(P):
"""Test if the projection matrix satisfies certain properties.
In particular, we should have P @ P = P, and P = P^T
"""
np_test.assert_almost_equal(P, P @ P)
np_test.assert_almost_equal(P, P.T)
def test_property_projection(x, p):
"""Test orthogonality of x and its projection p."""
np_test.assert_almost_equal(np.dot(p-x, p), 0)
# GRADED FUNCTION: DO NOT EDIT THIS LINE
# Projection 1d
# ===YOU SHOULD EDIT THIS FUNCTION===
def projection_matrix_1d(b):
"""Compute the projection matrix onto the space spanned by `b`
Args:
b: ndarray of dimension (D,), the basis for the subspace
Returns:
P: the projection matrix
"""
D, = b.shape
b = b[:, np.newaxis]
P = np.dot(b,b.T)/np.dot(b.T,b) # EDIT THIS
return P
# ===YOU SHOULD EDIT THIS FUNCTION===
def project_1d(x, b):
"""Compute the projection matrix onto the space spanned by `b`
Args:
x: the vector to be projected
b: ndarray of dimension (D,), the basis for the subspace
Returns:
y: projection of x in space spanned by b
"""
p = np.matmul(projection_matrix_1d(b), x[:, np.newaxis]).ravel() # EDIT THIS
return p
# Projection onto general subspace
# ===YOU SHOULD EDIT THIS FUNCTION===
def projection_matrix_general(B):
"""Compute the projection matrix onto the space spanned by `B`
Args:
B: ndarray of dimension (D, M), the basis for the subspace
Returns:
P: the projection matrix
"""
#P = np.dot(np.dot(B,(np.dot(B.T,B)).I),B.T) # EDIT THIS
P = np.dot(np.dot( B, np.linalg.pinv(np.dot(B.T,B))), B.T)
return P
# ===YOU SHOULD EDIT THIS FUNCTION===
def project_general(x, B):
"""Compute the projection matrix onto the space spanned by `B`
Args:
B: ndarray of dimension (D, E), the basis for the subspace
Returns:
y: projection of x in space spanned by b
"""
p = np.dot(projection_matrix_general(B),x) # EDIT THIS
return p
"""
Explanation: Advice for testing numerical algorithms
Testing machine learning algorithms (or numerical algorithms in general)
is sometimes really hard as it depends on the dataset
to produce an answer, and you will never be able to test your algorithm on all the datasets
we have in the world. Nevertheless, we have some tips for you to help you identify bugs in
your implementations.
1. Test on small dataset
Test your algorithms on small dataset: datasets of size 1 or 2 sometimes will suffice. This
is useful because you can (if necessary) compute the answers by hand and compare them with
the answers produced by the computer program you wrote. In fact, these small datasets can even have special numbers,
which will allow you to compute the answers by hand easily.
2. Find invariants
Invariants refer to properties of your algorithm and functions that are maintained regardless
of the input. We will highlight this point later in this notebook where you will see functions,
which will check invariants for some of the answers you produce.
Invariants you may want to look for:
1. Does your algorithm always produce a positive/negative answer, or a positive definite matrix?
2. If the algorithm is iterative, do the intermediate results increase/decrease monotonically?
3. Does your solution relate with your input in some interesting way, e.g. orthogonality?
When you have a set of invariants, you can generate random inputs and make
assertions about these invariants. This is sometimes known as fuzzing, which has proven to be a very effective technique for identifying bugs in programs.
Finding invariants is hard, and sometimes there simply isn't any invariant. However, DO take advantage of them if you can find them. They are the most powerful checks when you have them.
1. Orthogonal Projections
Recall that for projection of a vector $x$ onto a 1-dimensional subspace $U$ with basis vector $\boldsymbol b$ we have
$${\pi_U}(\boldsymbol x) = \frac{\boldsymbol b\boldsymbol b^T}{{\lVert \boldsymbol b \rVert}^2}\boldsymbol x $$
And for the general projection onto an M-dimensional subspace $U$ with basis vectors $\boldsymbol b_1,\dotsc, \boldsymbol b_M$ we have
$${\pi_U}(\boldsymbol x) = \boldsymbol B(\boldsymbol B^T\boldsymbol B)^{-1}\boldsymbol B^T\boldsymbol x $$
where
$$\boldsymbol B = (\boldsymbol b_1|...|\boldsymbol b_M)$$
Your task is to implement orthogonal projections. We can split this into two steps
1. Find the projection matrix $\boldsymbol P$ that projects any $\boldsymbol x$ onto $U$.
2. The projected vector $\pi_U(\boldsymbol x)$ of $\boldsymbol x$ can then be written as $\pi_U(\boldsymbol x) = \boldsymbol P\boldsymbol x$.
Note that for orthogonal projections, we have the following invariants:
End of explanation
"""
# Orthogonal projection in 2d
# define basis vector for subspace
b = np.array([2,1]).reshape(-1,1)
# point to be projected later
x = np.array([1,2]).reshape(-1, 1)
# Test 1D
np_test.assert_almost_equal(projection_matrix_1d(np.array([1, 2, 2])),
np.array([[1, 2, 2],
[2, 4, 4],
[2, 4, 4]]) / 9)
np_test.assert_almost_equal(project_1d(np.ones(3),
np.array([1, 2, 2])),
np.array([5, 10, 10]) / 9)
B = np.array([[1, 0],
[1, 1],
[1, 2]])
# Test General
np_test.assert_almost_equal(projection_matrix_general(B),
np.array([[5, 2, -1],
[2, 2, 2],
[-1, 2, 5]]) / 6)
np_test.assert_almost_equal(project_general(np.array([6, 0, 0]), B),
np.array([5, 2, -1]))
print('correct')
# Write your own test cases here, use random inputs, utilize the invariants we have!
"""
Explanation: We have included some unittest for you to test your implementation.
End of explanation
"""
from sklearn.datasets import fetch_olivetti_faces, fetch_lfw_people
from ipywidgets import interact
%matplotlib inline
image_shape = (64, 64)
# Load faces data
dataset = fetch_olivetti_faces()
faces = dataset.data
mean = faces.mean(axis=0)
std = faces.std(axis=0)
faces_normalized = (faces - mean) / std
"""
Explanation: 2. Eigenfaces (optional)
Next, we will take a look at what happens if we project some dataset consisting of human faces onto some basis we call
the "eigenfaces".
End of explanation
"""
B = np.load('eigenfaces.npy')[:50] # we use the first 50 dimensions of the basis, you should play around with the dimension here.
print("the eigenfaces have shape {}".format(B.shape))
"""
Explanation: The data for the basis has been saved in a file named eigenfaces.py, first we load it into the variable B.
End of explanation
"""
plt.figure(figsize=(10,10))
plt.imshow(np.hstack(B[:5]), cmap='gray');
"""
Explanation: Along the first dimension of B, each instance is a 64x64 image, an "eigenface". Let's visualize
a few of them.
End of explanation
"""
@interact(i=(0, 10))
def show_eigenface_reconstruction(i):
original_face = faces_normalized[i].reshape(64, 64)
# project original_face onto the vector space spanned by B_basis,
# you should take advantage of the functions you have implemented above
# to perform the projection. First, reshape B such that it represents the basis
# for the eigenfaces. Then perform orthogonal projection which would give you
# `face_reconstruction`.
B_basis = B.reshape(50, 64*64).T
face_reconstruction = project_general(original_face, B_basis)
plt.figure()
plt.imshow(np.hstack([original_face, face_reconstruction]), cmap='gray')
plt.show()
"""
Explanation: Take a look at what happens if we project our faces onto the basis spanned by these "eigenfaces". This requires
us to reshape B into the same shape as the matrix representing the basis as we have done earlier. Then we can
reuse the functions we implemented earlier to compute the projection matrix and the projection. Complete the code below to visualize the reconstructed faces that lie on the subspace spanned by the "eigenfaces".
End of explanation
"""
x = np.linspace(0, 10, num=50)
random = np.random.RandomState(42) # we use the same random seed so we get deterministic output
theta = random.randn() # we use a random theta, our goal is to perform linear regression which finds theta_hat that minimizes the objective
y = theta * x + random.rand(len(x)) # our theta is corrupted by some noise, so that we do not get (x,y) on a line
plt.scatter(x, y);
plt.xlabel('x');
plt.ylabel('y');
X = x.reshape(-1,1)
Y = y.reshape(-1,1)
theta_hat = np.linalg.solve(X.T @ X,
X.T @ Y)
"""
Explanation: Question:
What would happen to the reconstruction as we increase the dimension of our basis?
Modify the code above to visualize it.
3. Least square for predicting Boston housing prices (optional)
Consider the case where we have a linear model for predicting housing prices. We are predicting the housing prices based on features in the
housing dataset. If we collect the features in a vector $\boldsymbol{x}$, and the price of the houses as $y$. Assuming that we have
a prediction model in the way such that $\hat{y}_i = f(\boldsymbol {x}_i) = \boldsymbol \theta^T\boldsymbol{x}_i$.
If we collect the dataset of $n$ datapoints $\boldsymbol x_i$ in a data matrix $\boldsymbol X$, we can write down our model like this:
$$
\begin{bmatrix}
\boldsymbol {x}_1^T \
\vdots \
\boldsymbol {x}_n^T
\end{bmatrix} \boldsymbol {\theta} = \begin{bmatrix}
y_1 \
\vdots \
y_n
\end{bmatrix}.
$$
That is,
$$
\boldsymbol X\boldsymbol{\theta} = \boldsymbol {y}.
$$
where $\boldsymbol y$ collects all house prices $y_1,\dotsc, y_n$ of the training set.
Our goal is to find the best $\boldsymbol \theta$ that minimizes the following (least squares) objective:
$$
\begin{eqnarray}
&\sum^n_{i=1}{\lVert \boldsymbol \theta^T\boldsymbol {x}_i - y_i \rVert^2} \
&= (\boldsymbol X\boldsymbol {\theta} - \boldsymbol y)^T(\boldsymbol X\boldsymbol {\theta} - \boldsymbol y).
\end{eqnarray}
$$
Note that we aim to minimize the squared error between the prediction $\boldsymbol \theta^T\boldsymbol {x}_i$ of the model and the observed data point $y_i$ in the training set.
To find the optimal (maximum likelihood) parameters $\boldsymbol \theta^*$, we set the gradient of the least-squares objective to $\boldsymbol 0$:
$$
\begin{eqnarray}
\nabla_{\boldsymbol\theta}(\boldsymbol X{\boldsymbol \theta} - \boldsymbol y)^T(\boldsymbol X{\boldsymbol \theta} - \boldsymbol y) &=& \boldsymbol 0 \
\iff \nabla_{\boldsymbol\theta}(\boldsymbol {\theta}^T\boldsymbol X^T - \boldsymbol y^T)(\boldsymbol X\boldsymbol {\theta} - \boldsymbol y) &=& \boldsymbol 0 \
\iff \nabla_{\boldsymbol\theta}(\boldsymbol {\theta}^T\boldsymbol X^T\boldsymbol X\boldsymbol {\theta} - \boldsymbol y^T\boldsymbol X\boldsymbol \theta - \boldsymbol \theta^T\boldsymbol X^T\boldsymbol y + \boldsymbol y^T\boldsymbol y ) &=& \boldsymbol 0 \
\iff 2\boldsymbol X^T\boldsymbol X\boldsymbol \theta - 2\boldsymbol X^T\boldsymbol y &=& \boldsymbol 0 \
\iff \boldsymbol X^T\boldsymbol X\boldsymbol \theta &=& \boldsymbol X^T\boldsymbol y.
\end{eqnarray}
$$
The solution,\boldsymbol which gives zero gradient solves the normal equation
$$\boldsymbol X^T\boldsymbol X\boldsymbol \theta = \boldsymbol X^T\boldsymbol y.$$
If you recall from the lecture on projection onto n-dimensional subspace, this is exactly the same as the normal equation we have for projection (take a look at the notes here if you don't remember them).
This means our optimal parameter vector, which minimizes our objective, is given by
$$\boldsymbol \theta^* = (\boldsymbol X^T\boldsymbol X)^{-1}\boldsymbol X^T\boldsymbol y.$$
Let's put things into perspective and try to find the best parameter $\theta^*$
of the line $y = \theta x$, where $x,\theta\in\mathbb{R}$ for a given a training set $\boldsymbol X\in\mathbb{R}^n$ and $\boldsymbol y\in\mathbb{R}^n$.
Note that in our example, the features $x_i$ are only scalar, such that the parameter $\theta$ is also only a scalar. The derivation above holds for general parameter vectors (not only for scalars).
Note: This is exactly the same problem as linear regression which was discussed in Mathematics for Machine Learning: Multivariate Calculus. However, rather than finding the optimimal $\theta^*$ with gradient descent, we can solve this using the normal equation.
End of explanation
"""
fig, ax = plt.subplots()
ax.scatter(x, y);
xx = [0, 10]
yy = [0, 10 * theta_hat[0,0]]
ax.plot(xx, yy, 'red', alpha=.5);
ax.set(xlabel='x', ylabel='y');
print("theta = %f" % theta)
print("theta_hat = %f" % theta_hat)
"""
Explanation: We can show how our $\hat{\theta}$ fits the line.
End of explanation
"""
N = np.arange(10, 10000, step=10)
# Your code here which calculates θ* for different sample size.
thetao = np.dot(np.dot(np.linalg.pinv(np.dot(X.T, X)), X.T) ,y)
"""
Explanation: What would happen to $\lVert {\theta^*} - \theta \rVert$ if we increased the number of datapoints?
Make your hypothesis, and write a small program to confirm it!
End of explanation
"""
from sklearn.datasets import load_boston
boston = load_boston()
boston_X, boston_y = boston.data, boston.target
print("The housing dataset has size {}".format(boston_X.shape))
print("The prices has size {}".format(boston_X.shape))
boston_theta_hat = np.dot(np.dot(np.linalg.pinv(np.dot(X.T, X)), X.T) ,y) ## EDIT THIS to predict boston_theta_hat
"""
Explanation: We see how we can find the best $\theta$. In fact, we can extend our methodology to higher dimensional dataset. Let's now try applying the same methodology to the boston housing prices dataset.
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.13.0/examples/notebooks/generated/kernel_density.ipynb | bsd-3-clause | %matplotlib inline
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.distributions.mixture_rvs import mixture_rvs
"""
Explanation: Kernel Density Estimation
Kernel density estimation is the process of estimating an unknown probability density function using a kernel function $K(u)$. While a histogram counts the number of data points in somewhat arbitrary regions, a kernel density estimate is a function defined as the sum of a kernel function on every data point. The kernel function typically exhibits the following properties:
Symmetry such that $K(u) = K(-u)$.
Normalization such that $\int_{-\infty}^{\infty} K(u) \ du = 1$ .
Monotonically decreasing such that $K'(u) < 0$ when $u > 0$.
Expected value equal to zero such that $\mathrm{E}[K] = 0$.
For more information about kernel density estimation, see for instance Wikipedia - Kernel density estimation.
A univariate kernel density estimator is implemented in sm.nonparametric.KDEUnivariate.
In this example we will show the following:
Basic usage, how to fit the estimator.
The effect of varying the bandwidth of the kernel using the bw argument.
The various kernel functions available using the kernel argument.
End of explanation
"""
np.random.seed(12345) # Seed the random number generator for reproducible results
"""
Explanation: A univariate example
End of explanation
"""
# Location, scale and weight for the two distributions
dist1_loc, dist1_scale, weight1 = -1, 0.5, 0.25
dist2_loc, dist2_scale, weight2 = 1, 0.5, 0.75
# Sample from a mixture of distributions
obs_dist = mixture_rvs(
prob=[weight1, weight2],
size=250,
dist=[stats.norm, stats.norm],
kwargs=(
dict(loc=dist1_loc, scale=dist1_scale),
dict(loc=dist2_loc, scale=dist2_scale),
),
)
"""
Explanation: We create a bimodal distribution: a mixture of two normal distributions with locations at -1 and 1.
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Scatter plot of data samples and histogram
ax.scatter(
obs_dist,
np.abs(np.random.randn(obs_dist.size)),
zorder=15,
color="red",
marker="x",
alpha=0.5,
label="Samples",
)
lines = ax.hist(obs_dist, bins=20, edgecolor="k", label="Histogram")
ax.legend(loc="best")
ax.grid(True, zorder=-5)
"""
Explanation: The simplest non-parametric technique for density estimation is the histogram.
End of explanation
"""
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit() # Estimate the densities
"""
Explanation: Fitting with the default arguments
The histogram above is discontinuous. To compute a continuous probability density function,
we can use kernel density estimation.
We initialize a univariate kernel density estimator using KDEUnivariate.
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Plot the histrogram
ax.hist(
obs_dist,
bins=20,
density=True,
label="Histogram from samples",
zorder=5,
edgecolor="k",
alpha=0.5,
)
# Plot the KDE as fitted using the default arguments
ax.plot(kde.support, kde.density, lw=3, label="KDE from samples", zorder=10)
# Plot the true distribution
true_values = (
stats.norm.pdf(loc=dist1_loc, scale=dist1_scale, x=kde.support) * weight1
+ stats.norm.pdf(loc=dist2_loc, scale=dist2_scale, x=kde.support) * weight2
)
ax.plot(kde.support, true_values, lw=3, label="True distribution", zorder=15)
# Plot the samples
ax.scatter(
obs_dist,
np.abs(np.random.randn(obs_dist.size)) / 40,
marker="x",
color="red",
zorder=20,
label="Samples",
alpha=0.5,
)
ax.legend(loc="best")
ax.grid(True, zorder=-5)
"""
Explanation: We present a figure of the fit, as well as the true distribution.
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Plot the histrogram
ax.hist(
obs_dist,
bins=25,
label="Histogram from samples",
zorder=5,
edgecolor="k",
density=True,
alpha=0.5,
)
# Plot the KDE for various bandwidths
for bandwidth in [0.1, 0.2, 0.4]:
kde.fit(bw=bandwidth) # Estimate the densities
ax.plot(
kde.support,
kde.density,
"--",
lw=2,
color="k",
zorder=10,
label="KDE from samples, bw = {}".format(round(bandwidth, 2)),
)
# Plot the true distribution
ax.plot(kde.support, true_values, lw=3, label="True distribution", zorder=15)
# Plot the samples
ax.scatter(
obs_dist,
np.abs(np.random.randn(obs_dist.size)) / 50,
marker="x",
color="red",
zorder=20,
label="Data samples",
alpha=0.5,
)
ax.legend(loc="best")
ax.set_xlim([-3, 3])
ax.grid(True, zorder=-5)
"""
Explanation: In the code above, default arguments were used. We can also vary the bandwidth of the kernel, as we will now see.
Varying the bandwidth using the bw argument
The bandwidth of the kernel can be adjusted using the bw argument.
In the following example, a bandwidth of bw=0.2 seems to fit the data well.
End of explanation
"""
from statsmodels.nonparametric.kde import kernel_switch
list(kernel_switch.keys())
"""
Explanation: Comparing kernel functions
In the example above, a Gaussian kernel was used. Several other kernels are also available.
End of explanation
"""
# Create a figure
fig = plt.figure(figsize=(12, 5))
# Enumerate every option for the kernel
for i, (ker_name, ker_class) in enumerate(kernel_switch.items()):
# Initialize the kernel object
kernel = ker_class()
# Sample from the domain
domain = kernel.domain or [-3, 3]
x_vals = np.linspace(*domain, num=2 ** 10)
y_vals = kernel(x_vals)
# Create a subplot, set the title
ax = fig.add_subplot(3, 3, i + 1)
ax.set_title('Kernel function "{}"'.format(ker_name))
ax.plot(x_vals, y_vals, lw=3, label="{}".format(ker_name))
ax.scatter([0], [0], marker="x", color="red")
plt.grid(True, zorder=-5)
ax.set_xlim(domain)
plt.tight_layout()
"""
Explanation: The available kernel functions
End of explanation
"""
# Create three equidistant points
data = np.linspace(-1, 1, 3)
kde = sm.nonparametric.KDEUnivariate(data)
# Create a figure
fig = plt.figure(figsize=(12, 5))
# Enumerate every option for the kernel
for i, kernel in enumerate(kernel_switch.keys()):
# Create a subplot, set the title
ax = fig.add_subplot(3, 3, i + 1)
ax.set_title('Kernel function "{}"'.format(kernel))
# Fit the model (estimate densities)
kde.fit(kernel=kernel, fft=False, gridsize=2 ** 10)
# Create the plot
ax.plot(kde.support, kde.density, lw=3, label="KDE from samples", zorder=10)
ax.scatter(data, np.zeros_like(data), marker="x", color="red")
plt.grid(True, zorder=-5)
ax.set_xlim([-3, 3])
plt.tight_layout()
"""
Explanation: The available kernel functions on three data points
We now examine how the kernel density estimate will fit to three equally spaced data points.
End of explanation
"""
obs_dist = mixture_rvs(
[0.25, 0.75],
size=250,
dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1, scale=0.5), dict(loc=1, scale=1, args=(1, 0.5))),
)
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit()
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.hist(obs_dist, bins=20, density=True, edgecolor="k", zorder=4, alpha=0.5)
ax.plot(kde.support, kde.density, lw=3, zorder=7)
# Plot the samples
ax.scatter(
obs_dist,
np.abs(np.random.randn(obs_dist.size)) / 50,
marker="x",
color="red",
zorder=20,
label="Data samples",
alpha=0.5,
)
ax.grid(True, zorder=-5)
"""
Explanation: A more difficult case
The fit is not always perfect. See the example below for a harder case.
End of explanation
"""
obs_dist = mixture_rvs(
[0.25, 0.75],
size=1000,
dist=[stats.norm, stats.norm],
kwargs=(dict(loc=-1, scale=0.5), dict(loc=1, scale=0.5)),
)
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit(gridsize=2 ** 10)
kde.entropy
kde.evaluate(-1)
"""
Explanation: The KDE is a distribution
Since the KDE is a distribution, we can access attributes and methods such as:
entropy
evaluate
cdf
icdf
sf
cumhazard
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.cdf, lw=3, label="CDF")
ax.plot(np.linspace(0, 1, num=kde.icdf.size), kde.icdf, lw=3, label="Inverse CDF")
ax.plot(kde.support, kde.sf, lw=3, label="Survival function")
ax.legend(loc="best")
ax.grid(True, zorder=-5)
"""
Explanation: Cumulative distribution, it's inverse, and the survival function
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.cumhazard, lw=3, label="Cumulative Hazard Function")
ax.legend(loc="best")
ax.grid(True, zorder=-5)
"""
Explanation: The Cumulative Hazard Function
End of explanation
"""
|
googledatalab/notebooks | tutorials/BigQuery/BigQuery Magic Commands and DML.ipynb | apache-2.0 | %%bq query --name UniqueNames2013
WITH UniqueNames2013 AS
(SELECT DISTINCT name
FROM `bigquery-public-data.usa_names.usa_1910_2013`
WHERE Year = 2013)
SELECT * FROM UniqueNames2013
"""
Explanation: BigQuery Magic Commands and DML
The examples in this notebook introduce features of BigQuery Standard SQL and BigQuery SQL Data Manipulation Language (beta). BigQuery Standard SQL is compliant with the SQL 2011 standard. You've already seen the use of the magic command %%bq in the Hello BigQuery and BigQuery Commands notebooks. This command and others in the Google Cloud Datalab API support BigQuery Standard SQL.
Using the BigQuery Magic command with Standard SQL
First, we will cover some more uses of the %%bq magic command. Let's define a query to work with:
End of explanation
"""
%%bq -h
"""
Explanation: Now let's list all available commands to work with %%bq
End of explanation
"""
%%bq dryrun -q UniqueNames2013
"""
Explanation: The dryrun argument in %%bq can be helpful to confirm the syntax of the SQL query. Instead of executing the query, it will only return some statistics:
End of explanation
"""
%%bq sample -q UniqueNames2013
"""
Explanation: Now, let's get a small sample of the results using the sample argument in %%bq:
End of explanation
"""
%%bq execute -q UniqueNames2013
"""
Explanation: Finally, We can use the execute command in %%bq to display the results of our query:
End of explanation
"""
import google.datalab.bigquery as bq
# Create a new dataset (this will be deleted later in the notebook)
sample_dataset = bq.Dataset('sampleDML')
if not sample_dataset.exists():
sample_dataset.create(friendly_name = 'Sample Dataset for testing DML', description = 'Created from Sample Notebook in Google Cloud Datalab')
sample_dataset.exists()
# To create a table, we need to create a schema for it.
# Its easiest to create a schema from some existing data, so this
# example demonstrates using an example object
fruit_row = {
'name': 'string value',
'count': 0
}
sample_table1 = bq.Table("sampleDML.fruit_basket").create(schema = bq.Schema.from_data([fruit_row]),
overwrite = True)
"""
Explanation: Using Google BigQuery SQL Data Manipulation Language
Below, we will demonstrate how to use Google BigQuery SQL Data Manipulation Language (DML) in Datalab.
Preparation
First, let's import the BigQuery module, and create a sample dataset and table to help demonstrate the features of Google BigQuery DML.
End of explanation
"""
%%bq query
INSERT sampleDML.fruit_basket (name, count)
VALUES('banana', 5),
('orange', 10),
('apple', 15),
('mango', 20)
"""
Explanation: Inserting Data
We can add rows to our newly created fruit_basket table by using an INSERT statement in our BigQuery Standard SQL query.
End of explanation
"""
%%bq query
INSERT sampleDML.fruit_basket (name, count)
SELECT *
FROM UNNEST([('peach', 25), ('watermelon', 30)])
"""
Explanation: You may rewrite the previous query as:
End of explanation
"""
%%bq query
INSERT sampleDML.fruit_basket(name, count)
WITH w AS (
SELECT ARRAY<STRUCT<name string, count int64>>
[('cherry', 35),
('cranberry', 40),
('pear', 45)] col
)
SELECT name, count FROM w, UNNEST(w.col)
"""
Explanation: You can also use a WITH clause with INSERT and SELECT.
End of explanation
"""
fruit_row_detailed = {
'name': 'string value',
'count': 0,
'readytoeat': False
}
sample_table2 = bq.Table("sampleDML.fruit_basket_detailed").create(schema = bq.Schema.from_data([fruit_row_detailed]),
overwrite = True)
%%bq query
INSERT sampleDML.fruit_basket_detailed (name, count, readytoeat)
SELECT name, count, false
FROM sampleDML.fruit_basket
"""
Explanation: Here is an example that copies one table's contents into another. First we will create a new table.
End of explanation
"""
%%bq query
UPDATE sampleDML.fruit_basket_detailed
SET readytoeat = True
WHERE name = 'banana'
"""
Explanation: Updating Data
You can update rows in the fruit_basket table by using an UPDATE statement in the BigQuery Standard SQL query. We will try to do this using the Datalab BigQuery API.
End of explanation
"""
%%bq tables view --name sampleDML.fruit_basket_detailed
"""
Explanation: To view the contents of a table in BigQuery, use %%bq tables view command:
End of explanation
"""
%%bq query
DELETE sampleDML.fruit_basket
WHERE name in ('cherry', 'cranberry')
"""
Explanation: Deleting Data
You can delete rows in the fruit_basket table by using a DELETE statement in the BigQuery Standard SQL query.
End of explanation
"""
%%bq query
DELETE sampleDML.fruit_basket_detailed
WHERE NOT EXISTS
(SELECT * FROM sampleDML.fruit_basket
WHERE fruit_basket_detailed.name = fruit_basket.name)
"""
Explanation: Use the following query to delete the corresponding entries in sampleDML.fruit_basket_detailed
End of explanation
"""
# Clear out sample resources
sample_dataset.delete(delete_contents = True)
"""
Explanation: Deleting Resources
End of explanation
"""
|
pastas/pastas | examples/notebooks/09_calibration_options.ipynb | mit | import pandas as pd
import matplotlib.pyplot as plt
import pastas as ps
ps.show_versions()
ps.set_log_level("ERROR")
"""
Explanation: Calibrating Pastas models
R.A. Collenteur, University of Graz
After a model is constructed, the model parameters can be estimated using the ml.solve method. It can (and will) happen that the model fit after solving is not as good as expected. This may be the result of the settings that are used to solve the model or the way the model was constructed. In this notebook common pitfalls and various tips and tricks that may help to improve the calibration of Pastas models are shared.
In general, the following strategy is advised to solve problems with the parameter estimation:
Check the input time series and solve settings
Change the initial parameters,
Change the model structure,
Change the solve method.
End of explanation
"""
head = pd.read_csv("../data/B32C0639001.csv", parse_dates=['date'],
index_col='date', squeeze=True).loc["1985":]
# Make this millimeters per day
evap = ps.read_knmi("../data/etmgeg_260.txt", variables="EV24").series.loc["1985":"2003"]
rain = ps.read_knmi("../data/etmgeg_260.txt", variables="RH").series.loc["1985":"2005"]
ps.plots.series(head, [evap, rain]);
"""
Explanation: Loading the data
In the following code-block some example data is loaded. It is good practice to visualize all time series before creating the time series model.
End of explanation
"""
ml = ps.Model(head)
rch = ps.rch.FlexModel()
rm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=ps.Gamma, name="rch")
ml.add_stressmodel(rm)
"""
Explanation: Make a model
Given the data above we create a Pastas model with a non-linear recharge model (ps.FlexModel) and a constant to simulate the groundwater level. We'll use this model to show how we may analyse different types of problems and how to solve them.
End of explanation
"""
#ml.solve? ## Run this to see other solve options
ml.solve()
ml.plots.results(figsize=(10,6));
"""
Explanation: Calibrating a model
In the above code-block a Pastas model was created, but not yet solved. To solve the model we call ml.solve(). This method has quite a few options (see also the docstring of the method) that influence the model calibration, for example:
tmin/tmax: select the time period used for calibration
noise: use a noise model to model the residuals or not
fit_constant: fit the constant as a parameter or not
warmup: length of the warmup period
solver: the solver that is used to estimate parameters
We start without providing any arguments to the solve method.
End of explanation
"""
ml = ps.Model(head)
rch = ps.rch.FlexModel()
rm = ps.RechargeModel(rain * 1e3, evap * 1e3, recharge=rch, rfunc=ps.Gamma, name="rch")
ml.add_stressmodel(rm)
ml.solve(tmin="1986", tmax="2003", report=False)
axes = ml.plots.results(tmin="1975", figsize=(10,6)) # Use tmin=1975 to show warmup period
axes[0].axvline("1986", c="k", linestyle="--"); # Start of calibration
"""
Explanation: The fit report and the Figure above show that the model is not that great. The parameters have large standard errors, the goodness-of-fit metrics are not that high, and the simulated time series shows a very different behavior to the observed groundwater level.
Checking the explanatory time series and solve settings
A common pitfall is that there is a problem with the explanatory time series (e.g., precipitation, pumping discharge). This should be the first thing to check when the model fit is not as good as expected.
Length of Time Series: The time series should in principle be available for the entire period of calibration,
Warmup Period: For some models it is necessary that the time series are also available before the calibration period, during the warmup period. This is for example the case with the non-linear recharge models (e.g., FlexModel, Berendrecht).
Units of Time Series (1): While Pastas is in principle unitless, the units of the time series can impact the model calibration. For example, a pumping discharge provided in m$^3$/day may lead to very small parameter values ('Gamma_A') that are harder to esimate. If you end up with very small parameters for the gain parameter, it may help to rescale the input time series.
Units of Time Series (2): The initial parameters and bounds for the non-linear recharge models are set for precipitation and evaporaton time series provided in mm/day. Using these models with time series in m/day will give bad results.
Normalization of Time Series: Sometimes it can help to normalize the expanatory time series. For example, when using a river level that is high above a certain datum (e.g. tens of meters), it may help to subtract the mean water level from the time series first.
In the example model, many of these things are happening. First, the precipitation time series are not available for the entire calibration period. Secondly, because a non-linear model is applied, we need to to have precipitation and evaporation data before the calibration period starts (typically about one year is enough). We should therefore shorten the calibration period by using to 1986-2003. Note that we use 3650 days for the warmup period (warmup=3650 is the default), the last 365 days of which now has real precipitation and evaporation data . For the other 9 years the mean flux is used. Finally, the non-linear model requires the evaporation and precipitation in mm/day (unless we want to manually set all parameter bounds).
End of explanation
"""
ml.set_parameter("rch_n", initial=15) # Clearly wrong, just for educational purposes
ml.solve(noise=True, tmin="1986", tmax="2003", report=True)
"""
Explanation: Changing the explanatory time series and using the correct calibration period definitely improve the model fit in this example. Changing the explanatory time series a bit generally helps to resolve many issues with the calibration. If this does not work, we may try to help the solver a bit.
Improving initial parameters
Although Pastas tries to set sensible initial parameters when constructing a model, it occurs that the initial parameters set by Pastas are not a great place to start the search for the optimal parameters. In this case, it may be tried to manually adapt the initial parameters using the ml.set_parameter as follows:
End of explanation
"""
ml.solve(noise=False, report=False, tmin="1986", tmax="2003") # First solve without noise model
ml.solve(noise=True, initial=False, tmin="1986", tmax="2003", report=True) # Then solve with noise model, but do not initialize the parameters
axes = ml.plots.results(figsize=(10,6))
"""
Explanation: Often we do not know what good initial parameters are, but we do get a bad fit, like with this initial value for rch_n above. While solving the model with a noise model is recommended, it does make the parameter estimation more difficult and more sensitive to the initial parameter values. One solution that often helps is to first solve the model without a noise model, and then solve the model with a noise model but without re-initializing the parameters.
By default the parameters are initialized upon each solve, such that each time we call solve we obtain the same result. By setting initial=False we prevent the re-initialisation and use the optimal parameters as initial parameters. This can be done as follows:
End of explanation
"""
## Example to be added
"""
Explanation: After solving the model without a noise model (providing the solver an easier problem), we solve again with the parameter estimated from the solve without a noise model. This generally works well. We may also choose to fix parameters that are hard to estimate, perhaps because they are correlated to other parameters, to certain values.
Changing the model structure
At this point, one might start to think that the bad fit has something to do with the model structure. This could off course be an explanatory time series that is missing, but let's assume that is not the case. One thing that might help is too change the response function. This can either be from a complicated function to a simpler function (e.g., Gamma to Exponential) or the other way around (e.g., Gamma to FourParam). Another option could be to change other parts of the model structure, for example by applying a non-linear recharge model instead of a linear model.
End of explanation
"""
|
iutzeler/Introduction-to-Python-for-Data-Sciences | 4-2_Supervised_Learning.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
%matplotlib inline
# we create 40 separable points in R^2 around 2 centers (random_state=6 is a seed so that the set is separable)
X, y = make_blobs(n_samples=40, n_features=2, centers=2 , random_state=6)
print(X[:5,:],y[:5]) # print the first 5 points and labels
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
"""
Explanation: <table>
<tr>
<td width=15%><img src="./img/UGA.png"></img></td>
<td><center><h1>Introduction to Python for Data Sciences</h1></center></td>
<td width=15%><a href="http://www.iutzeler.org" style="font-size: 16px; font-weight: bold">Franck Iutzeler</a> </td>
</tr>
</table>
<br/><br/>
<center><a style="font-size: 40pt; font-weight: bold">Chap. 4 - Scikit Learn </a></center>
<br/><br/>
2- Supervised Learning
In the session, we will investigate some examples on how to deal with popular learning problems using standard algorithms. Many other problems and algorithms exist so this course is not at all exhaustive.
Classification
End of explanation
"""
from sklearn.svm import SVC # Support vector classifier i.e. Classifier by SVM
modelSVMLinear = SVC(kernel="linear")
modelSVMLinear.fit(X,y)
"""
Explanation: Support Vector Machines (SVM) are based on learning a vector $w$ and an intercept $b$ such that the hyperplane $w^T x - b = 0$ separates the data i.e. $a$ belongs to one class if $w^T a - b > 0$ and the other elsewhere.
They were later extended to Kernel methods that is $\kappa(w, a) - b = 0$ is now the separating curve where $\kappa$ is the kernel, typically:
* linear: $\kappa(x,y)= x^T y$ (original SVM)
* polynomial: $\kappa(x,y)= (x^T y)^d$
* Gaussian radial basis function (rfb): $\kappa(x,y)= \exp( - \gamma \| x - y \|^2 )$
End of explanation
"""
def plot_svc_decision_function(model, ax=None, plot_support=True):
"""Plot the decision function for a 2D SVC"""
if ax is None:
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# create grid to evaluate model
x = np.linspace(xlim[0], xlim[1], 30)
y = np.linspace(ylim[0], ylim[1], 30)
Y, X = np.meshgrid(y, x)
xy = np.vstack([X.ravel(), Y.ravel()]).T
P = model.decision_function(xy).reshape(X.shape)
# plot decision boundary and margins
ax.contour(X, Y, P, colors='k',
levels=[-1, 0, 1], alpha=0.5,
linestyles=['--', '-', '--'])
# plot support vectors
if plot_support:
ax.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=300, linewidth=1, facecolors='none');
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.scatter(X[:, 0], X[:, 1], c=y , cmap=plt.cm.Paired)
plot_svc_decision_function(modelSVMLinear)
"""
Explanation: The following illustration can be found in the Python Data Science Handbook by Jake VanderPlas.
End of explanation
"""
# we create points in R^2 around 2 centers (random_state=48443 is a seed so that the set is *not* separable)
X, y = make_blobs(n_samples=100, n_features=2, centers=2 , random_state=48443)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
"""
Explanation: We see clearly that the linear SVM seeks at maximizing the margin between the hyperplane and the two well defined classes from the data.
Non-separable data
In real cases, the data is usually not linearly separable as before.
End of explanation
"""
from sklearn.model_selection import train_test_split # sklearn > ...
XTrain, XTest, yTrain, yTest = train_test_split(X,y,test_size = 0.5) # split data in two
model1 = SVC(kernel="linear",C=0.01)
model1.fit(XTrain,yTrain)
model2 = SVC(kernel="linear",C=100)
model2.fit(XTrain,yTrain)
plt.scatter(XTrain[:, 0], XTrain[:, 1], c=yTrain , cmap=plt.cm.Paired)
plot_svc_decision_function(model1)
plt.title("C = 0.01")
plt.scatter(XTrain[:, 0], XTrain[:, 1], c=yTrain , cmap=plt.cm.Paired)
plot_svc_decision_function(model2)
plt.title("C = 100")
"""
Explanation: Let us use the same linear SVM classifier. Obviously, there are misclassified points, the model is thus learnt not by maximizing the margin (which does not exist anymore) but by minimizing a penalty over misclassified data. This penalty takes the form of an allowance margin controlled by a parameter $C$. The smaller $C$ the more inclusive the margin. Finding a good value for $C$ is up to the data scientist.
End of explanation
"""
from sklearn.metrics import confusion_matrix
yFit1 = model1.predict(XTest)
yFit2 = model2.predict(XTest)
mat1 = confusion_matrix(yTest, yFit1)
mat2 = confusion_matrix(yTest, yFit2)
print('Model with C = 0.01')
print(mat1)
print("Model with C = 100")
print(mat2)
"""
Explanation: To find out which value of $C$ to use or globally the performance of the classifier, one can use Scikit Learn's classification metrics, for instance the confusion matrix.
End of explanation
"""
import seaborn as sns
sns.heatmap(mat1, square=True, annot=True ,cbar=False)
plt.ylabel('true label')
plt.xlabel('predicted label')
"""
Explanation: It can also be plotted in a fancier way with seaborn.
End of explanation
"""
from sklearn.datasets import make_moons
X,y = make_moons(noise=0.1)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
modelLinear = SVC(kernel="linear")
modelLinear.fit(X,y)
modelRbf = SVC(kernel="rbf")
modelRbf.fit(X,y)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plot_svc_decision_function(modelLinear)
plot_svc_decision_function(modelRbf)
plt.title("The two models superposed")
"""
Explanation: Kernels
When the separation between classes is not linear, kernels may be used to draw separating curves instead of lines. The most popular is the Gaussian rbf.
End of explanation
"""
from sklearn.metrics import zero_one_loss
yFitLinear = modelLinear.predict(X)
yFitRbf = modelRbf.predict(X)
print("0/1 loss -- Linear: {:.3f} Rbf: {:.3f}".format(zero_one_loss(y, yFitLinear),zero_one_loss(y, yFitRbf)))
"""
Explanation: Let us compare the linear and rbf training error using the zero one loss (the proportion of misclassified examples).
End of explanation
"""
import pandas as pd
import numpy as np
iris = pd.read_csv('data/iris.csv')
classes = pd.DataFrame(iris["species"])
features = iris.drop(["species","sepal_length","sepal_width"],axis=1)
classes.sample(6)
features.sample(6)
XTrain, XTest, yTrain, yTest = train_test_split(features,classes,test_size = 0.5)
from sklearn.multiclass import OneVsRestClassifier
yPred = OneVsRestClassifier(SVC()).fit(XTrain, yTrain).predict(XTest)
print(yPred) # Note the classes are not number but everything went as expected
class_labels= ['virginica' , 'setosa' , 'versicolor']
sns.heatmap(confusion_matrix(yTest, yPred), square=True, annot=True ,cbar=False, xticklabels= class_labels, yticklabels=class_labels)
plt.ylabel('true label')
plt.xlabel('predicted label')
"""
Explanation: Multiple classes
Where there are multiples classes (as in the iris dataset of the Pandas notebook), different strategies can be adopted:
* Transforming the multiclass problem into a binary one by looking at the one-vs-rest problem (for each class construct a binary classifier between it and the rest) or the one-vs-one one (where each couple of classes is considered separately). After this transformation, standard binary classifiers can be used.
* Using dedicated algorithms such as decision trees
The corresponding algorithms can be found in the multiclass module documentation.
We are going to illustrate this by the iris 3-class classification problem using only the 2 petal features (width and length, this is only so that the feature vector is 2D and easy to visualize).
End of explanation
"""
import pandas as pd
import numpy as np
student = pd.read_csv('data/student-mat.csv')
student.head()
target = pd.DataFrame(student["G3"])
features = student.drop(["G3"],axis=1)
"""
Explanation: Other classifiers
The main classifiers from Scikit learn are: Linear SVM, RBF SVM (as already seen), Nearest Neighbors, Gaussian Process, Decision Tree, Random Forest, Neural Net, AdaBoost, Naive Bayes, QDA.
Use is:
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
Regression
Let consider the problem of predicting real values from a set of features.
We will consider the <a href="http://archive.ics.uci.edu/ml/datasets/Student+Performance">student performance</a> dataset. The goal is to predict the final grade from the other information, we get from the documentation:
End of explanation
"""
from sklearn.preprocessing import LabelEncoder
lenc = LabelEncoder()
num_features = features.apply(lenc.fit_transform)
num_features.head()
"""
Explanation: One immediate problem here is that the features are not numeric (not floats). Thankfully, Scikit Learn provides encoders to convert categorical (aka nominal, discrete) features to numerical ones.
End of explanation
"""
from sklearn.preprocessing import StandardScaler, add_dummy_feature
scaler = StandardScaler()
normFeatures = add_dummy_feature(scaler.fit_transform(num_features))
preproData = pd.DataFrame(normFeatures , columns=[ "intercept" ] + list(num_features.columns) )
preproData.describe().T
"""
Explanation: Even numerical values were encoded, as we are going to normalize, it is not really important.
The normalization is done by removing the mean and equalizing the variance per feature, in addition, we are going to add an intercept.
End of explanation
"""
from sklearn.model_selection import train_test_split # sklearn > ...
from sklearn.linear_model import Lasso
XTrain, XTest, yTrain, yTest = train_test_split(preproData,target,test_size = 0.25)
model = Lasso(alpha=0.1)
model.fit(XTrain,yTrain)
"""
Explanation: Regression and Feature selection with the Lasso
The lasso problem is finding a regressor $w$ such that minimizes
$$ \frac{1}{2 n_{samples}} \|X w - y ||^2_2 + \alpha \|w\|_1 $$
and is popular for prediction as it simultaneously selects features thanks to the $\ell_1$-term. The greater $\alpha$ the fewer features.
End of explanation
"""
model.coef_
"""
Explanation: We can observe the regressor $w$ provided by the model, notice the sparsity.
End of explanation
"""
print("Value Feature")
for idx,val in enumerate(model.coef_):
print("{:6.3f} {}".format(val,preproData.columns[idx]))
"""
Explanation: We can observe which coefficients are put to $0$ and which ones are positively/negatively correlated.
End of explanation
"""
targetPred = model.predict(XTest)
print("Predicted True")
for idx,val in enumerate(targetPred):
print("{:4.1f} {:.0f}".format(val,float(yTest.iloc[idx])))
"""
Explanation: Let us take a look at our predictions.
End of explanation
"""
n_test = 15
alpha_tab = np.logspace(-10,1,base=2,num = n_test)
print(alpha_tab)
trainError = np.zeros(n_test)
testError = np.zeros(n_test)
featureNum = np.zeros(n_test)
for idx,alpha in enumerate(alpha_tab):
model = Lasso(alpha=alpha)
model.fit(XTrain,yTrain)
yPredTrain = model.predict(XTrain)
yPredTest = model.predict(XTest)
trainError[idx] = np.linalg.norm(yPredTrain-yTrain["G3"].values)/yTrain.count()
testError[idx] = np.linalg.norm(yPredTest-yTest["G3"].values)/yTest.count()
featureNum[idx] = sum(model.coef_!=0)
alpha_opt = alpha_tab[np.argmin(testError)]
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
plt.subplot(311)
plt.xscale("log")
plt.plot(alpha_tab, trainError,label="train error")
plt.xlim([min(alpha_tab),max(alpha_tab)])
plt.legend()
plt.xticks([])
plt.axvline(x=alpha_opt)
plt.ylabel("error")
plt.subplot(312)
plt.xscale("log")
plt.plot(alpha_tab, testError,'r',label="test error")
plt.xlim([min(alpha_tab),max(alpha_tab)])
#plt.ylim([0.19, 0.21])
plt.legend()
plt.axvline(x=alpha_opt)
plt.xticks([])
plt.ylabel("error")
plt.subplot(313)
plt.xscale("log")
plt.scatter(alpha_tab, featureNum)
plt.xlim([min(alpha_tab),max(alpha_tab)])
plt.ylim([0,28])
plt.axvline(x=alpha_opt)
plt.ylabel("nb. of features")
plt.xlabel("alpha")
"""
Explanation: Regularization path
Selecting a good parameter $\alpha$ is the role of the data scientist. For instance, a easy way to do is the following.
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion | notebooks/launching_into_ml/labs/3_repeatable_splitting.ipynb | apache-2.0 | from google.cloud import bigquery
"""
Explanation: Repeatable splitting
Learrning Objectives
* explore the impact of different ways of creating train/valid/test splits
Overview
Repeatability is important in machine learning. If you do the same thing now and 5 minutes from now and get different answers, then it makes experimentation difficult. In other words, you will find it difficult to gauge whether a change you made has resulted in an improvement or not.
End of explanation
"""
compute_alpha = """
#standardSQL
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
(
SELECT
RAND() AS splitfield,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
)
WHERE
splitfield < 0.8
"""
results = bigquery.Client().query(compute_alpha).to_dataframe()
alpha = results["alpha"][0]
print(alpha)
"""
Explanation: <h3> Create a simple machine learning model </h3>
The dataset that we will use is <a href="https://bigquery.cloud.google.com/table/bigquery-samples:airline_ontime_data.flights">a BigQuery public dataset</a> of airline arrival data. Click on the link, and look at the column names. Switch to the Details tab to verify that the number of records is 70 million, and then switch to the Preview tab to look at a few rows.
<p>
We want to predict the arrival delay of an airline based on the departure delay. The model that we will use is a zero-bias linear model:
$$ delay_{arrival} = \alpha * delay_{departure} $$
<p>
To train the model is to estimate a good value for $\alpha$.
<p>
One approach to estimate alpha is to use this formula:
$$ \alpha = \frac{\sum delay_{departure} delay_{arrival} }{ \sum delay_{departure}^2 } $$
Because we'd like to capture the idea that this relationship is different for flights from New York to Los Angeles vs. flights from Austin to Indianapolis (shorter flight, less busy airports), we'd compute a different $alpha$ for each airport-pair. For simplicity, we'll do this model only for flights between Denver and Los Angeles.
<h2> Naive random split (not repeatable) </h2>
End of explanation
"""
compute_rmse = """
#standardSQL
SELECT
dataset,
SQRT(
AVG(
(arrival_delay - ALPHA * departure_delay) *
(arrival_delay - ALPHA * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM (
SELECT
IF (RAND() < 0.8, 'train', 'eval') AS dataset,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX' )
GROUP BY
dataset
"""
bigquery.Client().query(
compute_rmse.replace("ALPHA", str(alpha))
).to_dataframe()
"""
Explanation: <h3> What is wrong with calculating RMSE on the training and test data as follows? </h3>
End of explanation
"""
train_and_eval_rand = """
#standardSQL
WITH
alldata AS (
SELECT
IF (RAND() < 0.8, 'train', 'eval') AS dataset,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX' ),
training AS (
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
alldata
WHERE
dataset = 'train' )
SELECT
MAX(alpha) AS alpha,
dataset,
SQRT(
AVG(
(arrival_delay - alpha * departure_delay) *
(arrival_delay - alpha * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM
alldata,
training
GROUP BY
dataset
"""
bigquery.Client().query(train_and_eval_rand).to_dataframe()
"""
Explanation: Hint:
* Are you really getting the same training data in the compute_rmse query as in the compute_alpha query?
* Do you get the same answers each time you rerun the compute_alpha and compute_rmse blocks?
<h3> How do we correctly train and evaluate? </h3>
<br/>
Here's the right way to compute the RMSE using the actual training and held-out (evaluation) data. Note how much harder this feels.
Although the calculations are now correct, the experiment is still not repeatable.
Try running it several times; do you get the same answer?
End of explanation
"""
compute_alpha = """
#standardSQL
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
AND ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8
"""
results = bigquery.Client().query(compute_alpha).to_dataframe()
alpha = results["alpha"][0]
print(alpha)
"""
Explanation: <h2> Using HASH of date to split the data </h2>
Let's split by date and train.
End of explanation
"""
compute_rmse = """
#standardSQL
SELECT
IF(ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8, 'train', 'eval') AS dataset,
SQRT(
AVG(
(arrival_delay - ALPHA * departure_delay) *
(arrival_delay - ALPHA * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
GROUP BY
dataset
"""
print(
bigquery.Client()
.query(compute_rmse.replace("ALPHA", str(alpha)))
.to_dataframe()
.head()
)
"""
Explanation: We can now use the alpha to compute RMSE. Because the alpha value is repeatable, we don't need to worry that the alpha in the compute_rmse will be different from the alpha computed in the compute_alpha.
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.