repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
mtchem/ETL-MarchMadness-data | organize-data.ipynb | mit | # imports
import sqlite3 as sql
from sklearn import datasets
from sklearn import metrics
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: This notebook uses the March Madness dataset provided by Kaggel.com. Pleas use kaggle.com to access that data.
I put the flat data into a SQLite database on my local, for the notebook explaining that process please go to https://github.com/mtchem/ETL-MarchMadness-data/blob/master/data_to_SQLite_DB.ipynb
End of explanation
"""
# creates a connection to by local SQLite NCAA database
## * I pull from the database each time to guarantee that no raw/original data is modified
db = r'<path to database>'
conn = sql.connect(db)
# Creates dataframe for each year, puts it in a dictonary with the year as a key
d={}
for year in range(2009,2017,1):
year = str(year)
SQL_str = 'SELECT * FROM RegularSeasonDetailedResults WHERE Season =' + year
d["df{0}".format(year)]= pd.read_sql_query(SQL_str, conn)
# creates a copy of the dataframe dictionary
data_d = d.copy()
"""
Explanation: Create a connection to the database, pull data from the RegularSeasonDetailedResults table, separate each season into a pandas dataframe and put the dataframes into a dictionary with the year of each season as the key.
End of explanation
"""
# takes a dictionary of dataframes and modifies each df. Returns dictionary with modified dfs.
def modify_dictdf(d):
# new dictionary
new_d = {}
# adds some low level agg. stats like %freethrow
for year, df in d.items():
df['Wspread'] = df['Wscore'] - df['Lscore']
df['Lspread'] = df['Lscore'] - df['Wscore']
df['W%_freethrow'] = df['Wftm']/(df['Wftm']+df['Wfta'])
df['W%_field_goals'] = df['Wfgm']/(df['Wfgm']+df['Wfga'])
df['W%_3pt'] = df['Wfgm3']/(df['Wfgm3']+df['Wfga3'])
df['L%_freethrow'] = df['Lftm']/(df['Lftm']+df['Lfta'])
df['L%_field_goals'] = df['Lfgm']/(df['Lfgm']+df['Lfga'])
df['L%_3pt'] = df['Lfgm3']/(df['Lfgm3']+df['Lfga3'])
# difference in offensive rebounds
df['W_dor'] = df['Wor']-df['Lor']
df['L_dor'] = df['Lor']-df['Wor']
# difference in defensive rebounds
df['W_ddR'] = df['Wdr']-df['Ldr']
df['L_ddR'] = df['Ldr']-df['Wdr']
# creates two rows for every game so that each game's stats are represented for both teams
#splits game data to team data
df_a = df[['Wteam', 'Wscore', 'Wfgm','Wfga', 'Wfgm3', 'Wfga3', 'Wftm', 'Wfta', 'Wor',
'Wdr', 'Wast', 'Wto', 'Wstl', 'Wblk', 'Wpf', 'W%_freethrow', 'W%_field_goals',
'W%_3pt', 'Wspread', 'W_dor', 'W_ddR']]
df_b = df[['Lteam', 'Lscore','Lfgm', 'Lfga', 'Lfgm3', 'Lfga3','Lftm', 'Lfta', 'Lor', 'Ldr',
'Last', 'Lto', 'Lstl', 'Lblk', 'Lpf','L%_freethrow', 'L%_field_goals', 'L%_3pt',
'Lspread', 'L_dor', 'L_ddR']]
# renames columns of winner and loser dataframes
df_a = df_a.rename(columns = {'Wteam':'team', 'Wscore': 'score', 'Wfgm': 'fgm','Wfga': 'fga',
'Wfgm3': 'fgm3', 'Wfga3':'fga3', 'Wftm':'ftm', 'Wfta':'fta',
'Wor':'or', 'Wdr':'dr', 'Wast':'ast','Wto':'to', 'Wstl':'stl',
'Wblk':'blk', 'Wpf':'pf', 'W%_freethrow':'%_freethrow',
'W%_field_goals':'%field_goal', 'W%_3pt':'%_3pt',
'Wspread':'spread', 'W_dor':'dor', 'W_ddR':'ddR' })
df_b = df_b.rename(columns = {'Lteam':'team', 'Lscore': 'score','Lfgm': 'fgm', 'Lfga': 'fga',
'Lfgm3': 'fgm3', 'Lfga3':'fga3','Lftm':'ftm', 'Lfta':'fta',
'Lor':'or', 'Ldr':'dr', 'Last':'ast','Lto':'to','Lstl':'stl',
'Lblk':'blk','Lpf':'pf','L%_freethrow':'%_freethrow',
'L%_field_goals':'%field_goal', 'L%_3pt':'%_3pt',
'Lspread':'spread', 'L_dor':'dor', 'L_ddR':'ddR'})
# combines winner and loser dataframes so that each game has two data rows and some extra info
df_comp = df_a.append(df_b)
# applies outcome function
add_outcome = outcome(df_comp)
# makes a new dictionary of dataframes
new_d[year] = add_outcome
return new_d
# takes a dataframe, adds an 'outcome' column where win = 1 and loss = 0, returns dataframe
def outcome(df):
outcome = []
for num in df.spread:
if num > 0:
outcome.append(1)
if num < 0:
outcome.append(0)
df['outcome'] = outcome
return df
"""
Explanation: Functions to modify each dataframe in the dictionary . Additions include low level statistics like % freethrows , and splits each game into two rows so that winner and loser team stats each have their own row.
End of explanation
"""
def team_data(df):
clean_data = modify_dictdf(data_d)
# creates dictionary for every season where team is key, and game df for team is value
team_data = {}
for key in clean_data.keys():
df = clean_data[key]
name = 'team_dict' + key[2:]
name = {}
# list of team unique team ID numbers
teams = list(set(list(df.team)))
for team in teams:
name[team] = df[df.team == team]
team_data[key] = name
return team_data
"""
Explanation: function to create a dictionary for every season where team is key, and game data for team is the value value
End of explanation
"""
# addition of low level statistics and separating winner and loser fields so that each game has two rows
modified_data = modify_dictdf(data_d)
# adds low level statistics, splits each game into two rows, and separates
# **depending on your computer this could take a few minutes
team_data = team_data(data_d)
"""
Explanation: Using the previous functions to add features, separate each game into two rows in a pandas dataframe, and separate team data.
End of explanation
"""
# closes the connection to the database
conn.close()
"""
Explanation: Now you are ready to start exploring the team_data!
End of explanation
"""
|
agile-geoscience/welly | docs/_userguide/Projects.ipynb | apache-2.0 | import welly
welly.__version__
"""
Explanation: Projects
Wells are one of the fundamental objects in welly.
Well objects include collections of Curve objects. Multiple Well objects can be stored in a Project.
On this page, we take a closer look at the Project class. It lets us handle groups of wells. It is really just a list of Well objects, with a few extra powers.
First, some preliminaries…
End of explanation
"""
p = welly.read_las("../../tests/assets/example_*.las")
"""
Explanation: Make a project
We have a few LAS files in a folder; we can load them all at once with standard POSIX file globbing syntax:
End of explanation
"""
p
"""
Explanation: Now we have a project, containing two files:
End of explanation
"""
p = welly.read_las(['../../tests/assets/P-129_out.LAS',
'https://geocomp.s3.amazonaws.com/data/P-130.LAS',
'https://geocomp.s3.amazonaws.com/data/R-39.las',
])
"""
Explanation: You can pass in a list of files or URLs:
End of explanation
"""
p
"""
Explanation: This project has three wells:
End of explanation
"""
p[0]
"""
Explanation: Typical, the UWIs are a disaster. Let's ignore this for now.
The Project is really just a list-like thing, so you can index into it to get at a single well. Each well is represented by a welly.Well object.
End of explanation
"""
alias = {'Sonic': ['DT', 'DT4P'],
'Caliper': ['HCAL', 'CALI'],
}
import matplotlib.pyplot as plt
fig, axs = plt.subplots(figsize=(7, 14),
ncols=len(p),
sharey=True,
)
for i, (ax, w) in enumerate(zip(axs, p)):
log = w.get_curve('Sonic', alias=alias)
if log is not None:
ax = log.plot(ax=ax)
ax.set_title("Sonic log for\n{}".format(w.uwi))
min_z, max_z = p.basis_range
plt.ylim(max_z, min_z)
plt.show()
"""
Explanation: Some of the fields of this LAS file are messed up; see the Well notebook for more on how to fix this.
Plot curves from several wells
The DT log is called DT4P in one of the wells. We can deal with this sort of issue with aliases. Let's set up an alias dictionary, then plot the DT log from each well:
End of explanation
"""
p[0].uwi = p[0].name
p[0]
"""
Explanation: Get a pandas.DataFrame
The df() method makes a DataFrame using a dual index of UWI and Depth.
Before we export our wells, let's give Kennetcook #2 a better UWI:
End of explanation
"""
alias
keys = ['Caliper', 'GR', 'Sonic']
df = p.df(keys=keys, alias=alias, rename_aliased=True)
df
"""
Explanation: That's better.
When creating the DataFrame, you can pass a list of the keys (mnemonics) you want, and use aliases as usual.
End of explanation
"""
import welly.quality as q
tests = {
'All': [q.no_similarities],
'Each': [q.no_gaps, q.no_monotonic, q.no_flat],
'GR': [q.all_positive],
'Sonic': [q.all_positive, q.all_between(50, 200)],
}
"""
Explanation: Quality
Welly can run quality tests on the curves in your project. Some of the tests take arguments. You can test for things like this:
all_positive: Passes if all the values are greater than zero.
all_above(50): Passes if all the values are greater than 50.
mean_below(100): Passes if the mean of the log is less than 100.
no_nans: Passes if there are no NaNs in the log.
no_flat: Passes if there are no sections of well log with the same values (e.g. because a gap was interpolated across with a constant value).
no_monotonic: Passes if there are no monotonic ramps in the log (e.g. because a gap was linearly interpolated across).
Insert lists of tests into a dictionary with any of the following key examples:
'GR': The test(s) will run against the GR log.
'Gamma': The test(s) will run against the log matching according to the alias dictionary.
'Each': The test(s) will run against every log in a well.
'All': Some tests take multiple logs as input, for example quality.no_similarities. These test(s) will run against all the logs as a group. Could be quite slow, because there may be a lot of pairwise comparisons to do.
The tests are run against all wells in the project. If you only want to run against a subset of the wells, make a new project for them.
End of explanation
"""
def has_si_units(curve):
return curve.units.lower() in ['mm', 'gapi', 'us/m', 'k/m3']
tests['Each'].append(has_si_units)
"""
Explanation: Let's add our own test for units:
End of explanation
"""
alias
"""
Explanation: We'll use the same alias dictionary as before:
End of explanation
"""
from IPython.display import HTML
HTML(p.curve_table_html(keys=['Caliper', 'GR', 'Sonic', 'SP', 'RHOB'],
tests=tests, alias=alias)
)
"""
Explanation: Now we can run the tests and look at the results, which are in an HTML table:
End of explanation
"""
|
fgnt/nara_wpe | examples/WPE_Numpy_offline.ipynb | mit | def aquire_audio_data():
D, T = 4, 10000
y = np.random.normal(size=(D, T))
return y
y = aquire_audio_data()
Y = stft(y, **stft_options)
Y = Y.transpose(2, 0, 1)
Z = wpe(Y)
z_np = istft(Z.transpose(1, 2, 0), size=stft_options['size'], shift=stft_options['shift'])
"""
Explanation: Minimal example with random data
End of explanation
"""
channels = 8
sampling_rate = 16000
delay = 3
iterations = 5
taps = 10
alpha=0.9999
"""
Explanation: Example with real audio recordings
WPE estimates a filter to predict the current reverberation tail frame from K time frames which lie 3 (delay) time frames in the past. This frame (reverberation tail) is then subtracted from the observed signal.
Setup
End of explanation
"""
file_template = 'AMI_WSJ20-Array1-{}_T10c0201.wav'
signal_list = [
sf.read(str(project_root / 'data' / file_template.format(d + 1)))[0]
for d in range(channels)
]
y = np.stack(signal_list, axis=0)
IPython.display.Audio(y[0], rate=sampling_rate)
"""
Explanation: Audio data
Shape: (channels, frames)
End of explanation
"""
Y = stft(y, **stft_options).transpose(2, 0, 1)
"""
Explanation: STFT
A STFT is performed to obtain a Numpy array with shape (frequency bins, channels, frames).
End of explanation
"""
Z = wpe(
Y,
taps=taps,
delay=delay,
iterations=iterations,
statistics_mode='full'
).transpose(1, 2, 0)
z = istft(Z, size=stft_options['size'], shift=stft_options['shift'])
IPython.display.Audio(z[0], rate=sampling_rate)
"""
Explanation: Iterative WPE
The wpe function is fed with Y. Finally, an inverse STFT is performed to obtain a dereverberated result in time domain.
End of explanation
"""
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(20, 10))
im1 = ax1.imshow(20 * np.log10(np.abs(Y[ :, 0, 200:400])), origin='lower')
ax1.set_xlabel('frames')
_ = ax1.set_title('reverberated')
im2 = ax2.imshow(20 * np.log10(np.abs(Z[0, 200:400, :])).T, origin='lower', vmin=-120, vmax=0)
ax2.set_xlabel('frames')
_ = ax2.set_title('dereverberated')
cb = fig.colorbar(im2)
"""
Explanation: Power spectrum
Before and after applying WPE
End of explanation
"""
|
OSGeoLabBp/tutorials | english/data_processing/lessons/ransac_line.ipynb | cc0-1.0 | # Python packages used
import numpy as np # for array operations
from matplotlib import pyplot as plt # for graphic output
from math import sqrt
# parameters
tolerance = 2.5 # max distance from the plane to accept point
rep = 1000 # number of repetition
"""
Explanation: <a href="https://colab.research.google.com/github/OSGeoLabBp/tutorials/blob/master/english/data_processing/lessons/ransac_line.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
RANSAC line in 2D
In this lesson the base of RANSAC (RANdom SAmple Consensus) method is explaned.
RANSAC is a robust method to find geometric shapes in point clouds. Two find a line in two dimensions the following steps are used
select two points random from the point cloud
find the equation of line runs through the two points
find the number of points which are close to the line
if the number of points is larger than the max sofar save it as the best solution so far
if the repeptition number not reached go back to 1
This algorithm is not deterministic, if you repeat the steps above you may get different solution. If the number of repetition is high the different solutions will be close to each other.
The algorithm depends on two predefined constants, the maximal distance of the point from the line to be accepted and the number of repetition.
End of explanation
"""
n = 100 # number of inliers
k = 200 # number of outliers
range = 100.0 # range of x, y coordinates
l = [0.451, -1.0, 2.0] # line equation ax + by + c = 0
x = np.zeros(n+k)
y = np.zeros(n+k)
# points near to the line
x[:n] = np.random.rand(n) * range
y[:n] = -l[0] / l[1] * x[:n] - l[2] / l[1] + (np.random.rand(n) * 2 * tolerance - tolerance)
# outlier points (noise)
x[n:] = np.random.rand(k) * range
y[n:] = np.random.rand(k) * range
points = np.c_[x, y, np.full(n+k, 1.0)] # put together inliers and outliers
"""
Explanation: The number of repetition is quiet high to get a quasy optimal solution.
We use random numbers to generate 2D points. n points are generated close to the line (0.451 x - y + 2 = 0). Other k points are generated
End of explanation
"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot()
ax.scatter(x, y)
ax.plot([0,100], [-l[2] / l[1], -l[0] / l[1] * 100 - l[2] / l[1]], 'r', label='original line')
_ = ax.set_title('Points and line')
best_n = 0 # number of points on the best fit line so far
best_i = 0 # iteration index of best fit line so far
best_inliers = np.array([]) # indices of inliers of the best fit line so far
for i in np.arange(rep):
# select two random points
p = [] # list of random indices for points
while len(p) != 2:
p = list(set(np.random.randint(n+k, size=2))) # remove repeated random integers
p1 = points[p] # randomly selected points
x1 = p1[:,0] # x coordinates
y1 = p1[:,1] # y coordinates
# line equation from the two points using homogenouos coordinates
l1 = np.array([y1[0] - y1[1], x1[1] - x1[0], x1[0] * y1[1] - x1[1] * y1[0]])
l1 = l1 / sqrt(l1[0]**2 + l1[1]**2) # normalize
# select close points
inliers = points[np.abs(np.dot(points, l1)) < tolerance]
if inliers.shape[0] > best_n:
# better solution found
best_n = inliers.shape[0]
best_i = i
best_inliers = inliers.copy()
best_line = l1.copy()
print(f'Best solution after {best_i} repetitions, {best_n} points on line: {best_line}')
"""
Explanation: Let's display the points and the original line.
End of explanation
"""
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot()
ax.scatter(x, y)
ax.scatter(best_inliers[:,0], best_inliers[:,1], c='g')
ax.plot([0,100], [-l[2] / l[1], -l[0] / l[1] * 100 - l[2] / l[1]], 'g', label='original line')
ax.plot([0,100], [-best_line[2] / best_line[1], -best_line[0] / best_line[1] * 100 - best_line[2] / best_line[1]], 'r', label='best approximation')
ax.legend()
_ = ax.set_title('RANSAC line')
"""
Explanation: Please run the code block above and notice the small differences in the line parameters.
Finaly let's display the best fitting line.
End of explanation
"""
|
simpleblob/ml_algorithms_stepbystep | algo_example_logistic_regression_and_optimization_methods.ipynb | mit | print type(iris.data)
print iris.data.shape
print iris.target.shape
print iris.data[0:5]
print np.unique(iris.target)
#make it a binary classification problem instead
X = np.copy(iris.data)
X = (X - np.average(X,axis=0)) / np.std(X, axis=0)
Y = np.copy(iris.target)
np.place(Y, Y==2, [0])
print np.unique(Y)
"""
Explanation: Load and return the boston house-prices dataset (regression).
| Description | Value |
|-------------------|----------------|
| Classes | 3 |
| Samples per class | 50 |
| Samples total | 150 |
| Dimensionality | 4 |
| Features | real, positive |
End of explanation
"""
def f_func(beta,X):
return 1.0/(1.0 + np.exp(-beta.dot(X.transpose())))
def d_func(f,cutoff=0.5):
#python function always pass obj by ref_name, array is mutable
return np.where(f>=cutoff,1,0)
def cost_ifunc(f,y):
return -math.log1p((1-y)+(2*y-1)*f)
cost_func = np.vectorize(cost_ifunc)
def grad_func(beta,X,Y):
a = f_func(beta,X)-Y
b = a.dot(X)
return b
#test
test_x = f_func(np.array([[0.1,0.2,0.2,-0.5]]), np.array([[1,3,1,1],[2,3,4,5]]))
print test_x
print d_func(test_x,0.5)
print cost_func(test_x,[1,0])
print np.average(cost_func(test_x,[1,0]))
t = grad_func(beta,X,Y)
t
"""
Explanation: For Logistic regression, the normal function is this:
$$ f(\beta,X^{(i)}) = \dfrac{1}{1+e^{-(\beta \cdot X^{(i)})}} $$
For prediction, push to either 0 or 1
$$
d =
\begin{cases}
1, & \text{if} \quad f(\beta,X^{(i)}) \ge 0.5 \
0, & \text{if} \quad f(\beta,X^{(i)}) \lt 0.5
\end{cases}
$$
We need a convex cost function.. enter "cost" function computed from log-likelyhood
$$
cost(f,y) =
\begin{cases}
-log(f) & \text{if y = 1} \
-log(1-f) & \text{if y = 0}
\end{cases}
$$
$$ cost(f,y) = -log((1-y)+(2y-1) \cdot f) $$
For the Loss function we can use aggregate errors we need to minimize:
$$ L = \dfrac{1}{m}\sum_{i=1}^m cost $$
End of explanation
"""
beta = np.array([0.5,0.5,0.5,0.5])
y_prob = f_func(beta,X[0])
print y_prob
"""
Explanation: Using gradient descent from this: https://en.wikipedia.org/wiki/Gradient_descent
Stepping (where gamma is learning rate):
$$\beta_{n+1} \leftarrow \beta_n - {\gamma \cdot \nabla L}$$
With a complicated derivation here, we arrive at the final equation:
$$\beta_{n+1} \leftarrow \beta_n - {\gamma \cdot \dfrac{1}{m}\sum_{i=1}^m\left(f(\beta,x^{(i)})-y^{(i)}\right)\cdot x_j^{(i)}}$$
End of explanation
"""
# Gradient Descent method
# init beta as an average of Y/Xi
learn_rate = 0.01
beta = np.array([0.5,0.5,0.5,0.5])
y_prob = f_func(beta,X)
y_pred = d_func(y_prob)
curr_loss = np.average(cost_func(y_prob,Y))
loss = []
loss.append(curr_loss)
print curr_loss
for i in range(0,1000):
beta = beta - learn_rate*grad_func(beta,X,Y)
y_prob = f_func(beta,X)
y_pred = d_func(y_prob)
curr_loss = np.average(cost_func(y_prob,Y))
loss.append(curr_loss)
#print 'curr={},prev={},diff={}'.format(curr_loss,loss[-2],loss[-2]-curr_loss)
if (i>10) and ((loss[-2] - curr_loss) < 10**-5): #stopping criterion
print 'stop at {}'.format(i)
break
unique, counts = np.unique(Y, return_counts=True)
print beta
plt.figure()
plt.xlabel('no. of run')
plt.ylabel('loss function')
sns.tsplot(loss)
from sklearn.metrics import confusion_matrix
cm_mat = confusion_matrix(Y,d_func(f_func(beta,X),0.6))
print cm_mat.T
df_temp = pd.DataFrame(cm_mat.flatten()[np.newaxis].T,columns = ['values'])
df_temp = pd.DataFrame(cm_mat.flatten()[np.newaxis].T,columns = ['values'])
plt.figure(figsize = (6,4),dpi=600)
sns.heatmap(cm_mat.T, cbar=True ,annot=True, fmt=',.0f')
plt.xlabel('Truth')
plt.ylabel('Predicted')
print X[0]
print Y
print y_pred
print y_prob[0]
print X[0:5]
print np.gradient(X[0:5],axis=0)
print beta
"""
Explanation: Run it!
End of explanation
"""
# Computer R Squared
ssreg = np.sum((y_pred-Y.mean())**2)
sstot = np.sum((Y-Y.mean())**2)
R_sq = ssreg/sstot
print 'R Squared = {:.2f}'.format(R_sq)
#standardized residual plot
res = Y-y_pred
res_d = ((1.0/(len(res)-1))*np.sum(res**2))**0.5
res_stu = res/res_d
plt.figure()
grid = sns.JointGrid(x=Y, y=res_stu, space=0, size=6, ratio=50)
grid.plot_joint(plt.scatter, color="g")
grid.plot_marginals(sns.rugplot, height=1, color="g")
grid.set_axis_labels(xlabel="Boston house price",ylabel="studentized residues")
res_d
res_stu[0:10]
"""
Explanation: Using Newton's optimization method from this: http://www.stat.cmu.edu/~cshalizi/402/lectures/14-logistic-regression/lecture-14.pdf
Also this, page 4, cubic iteration: http://www.sztaki.hu/~bozoki/oktatas/nemlinearis/SebahGourdon-Newton.pdf
For implementation: https://ipvs.informatik.uni-stuttgart.de/mlr/marc/teaching/13-Optimization/04-secondOrderOpt.pdf
For finding optima of f(x) in 1D:
$$x_{n+1} \leftarrow x_n - \dfrac{f'(x)}{f''(x)}$$
For matrix version, hessians all the way:
$$x_{n+1} \leftarrow x_n - \dfrac{f'(x)}{f''(x)}$$
End of explanation
"""
|
letsgoexploring/economicData | inflation-forecasts-and-interest-rates/python/real_rate.ipynb | mit | import numpy as np
import matplotlib.dates as dts
import pandas as pd
import fredpy as fp
import runProcs
import requests
import matplotlib.pyplot as plt
plt.style.use('classic')
%matplotlib inline
"""
Explanation: About
This program downloads, manages, and exports to .csv files inflation forecast data from the Federal Reserve Bank of Philadelphia, and actual inflation and interest rate data from FRED. The purpose is to learn about historical ex ante real interest rates in the US.
End of explanation
"""
url = "https://www.philadelphiafed.org/-/media/research-and-data/real-time-center/survey-of-professional-forecasters/historical-data/medianlevel.xls?la=en"
r = requests.get(url,verify=False)
with open("../xls/medianLevel.xls", "wb") as code:
code.write(r.content)
deflator_forecasts = pd.read_excel('../xls/medianLevel.xls',sheet_name = 'PGDP')
deflator_forecasts=deflator_forecasts.interpolate()
deflator_forecasts = deflator_forecasts.iloc[5:]
"""
Explanation: Import forecast data
End of explanation
"""
# Initialize forecast lists
forecast_1q = []
forecast_2q = []
forecast_1y = []
# Associate forecasts with dates. The date should coincide with the start of the period for which the forecast applies.
dates = []
for i,ind in enumerate(deflator_forecasts.index):
year =int(deflator_forecasts.iloc[i]['YEAR'])
quart=int(deflator_forecasts.iloc[i]['QUARTER'])
if quart == 1:
month = '01'
elif quart == 2:
month = '04'
elif quart == 3:
month = '07'
else:
month = '10'
year=year
date = month+'-01-'+str(year)
dates.append(date)
forecast_1q.append(400*(deflator_forecasts.iloc[i]['PGDP3']/deflator_forecasts.iloc[i]['PGDP2']-1))
forecast_2q.append(200*(deflator_forecasts.iloc[i]['PGDP4']/deflator_forecasts.iloc[i]['PGDP2']-1))
forecast_1y.append(100*(deflator_forecasts.iloc[i]['PGDP6']/deflator_forecasts.iloc[i]['PGDP2']-1))
# Update the FRED instances
defl_forecast_1q = fp.to_fred_series(data = forecast_1q,dates = dates,frequency='Quarterly')
defl_forecast_2q = fp.to_fred_series(data = forecast_2q,dates = dates,frequency='Quarterly')
defl_forecast_1y = fp.to_fred_series(data = forecast_1y,dates = dates,frequency='Quarterly')
deflator_frame = pd.DataFrame({'deflator inflation - 3mo forecast':defl_forecast_1q.data,
'deflator inflation - 6mo forecast':defl_forecast_2q.data,
'deflator inflation - 1yr forecast':defl_forecast_1y.data})
"""
Explanation: GDP deflator inflation forecasts
End of explanation
"""
interest3mo = fp.series('TB3MS').as_frequency('Q')
interest6mo = fp.series('TB6MS').as_frequency('Q')
interest1yr = fp.series('GS1').as_frequency('Q')
interest3mo,interest6mo,interest1yr = fp.window_equalize([interest3mo,interest6mo,interest1yr])
interest_frame = pd.DataFrame({'nominal interest - 3mo':interest3mo.data,
'nominal interest - 6mo':interest6mo.data,
'nominal interest - 1yr':interest1yr.data})
defl_3mo = fp.series('GDPDEF')
defl_6mo = fp.series('GDPDEF')
defl_1yr = fp.series('GDPDEF')
defl_3mo = defl_3mo.pc(method='forward',annualized=True)
defl_6mo.data = (defl_6mo.data.shift(-2)/defl_6mo.data-1)*200
defl_6mo = defl_6mo.drop_nan()
defl_1yr.data = (defl_1yr.data.shift(-4)/defl_1yr.data-1)*100
defl_1yr = defl_1yr.drop_nan()
defl_3mo_frame = pd.DataFrame({'deflator inflation - 3mo actual':defl_3mo.data})
defl_6mo_frame = pd.DataFrame({'deflator inflation - 6mo actual':defl_6mo.data})
defl_1yr_frame = pd.DataFrame({'deflator inflation - 1yr actual':defl_1yr.data})
actual_rates_frame = pd.concat([interest_frame,defl_3mo_frame,defl_6mo_frame,defl_1yr_frame],axis = 1)
"""
Explanation: Actual data
End of explanation
"""
full_data_frame = pd.concat([actual_rates_frame,deflator_frame],axis=1)
full_data_frame = full_data_frame.dropna(subset=['deflator inflation - 1yr forecast',
'deflator inflation - 3mo forecast',
'deflator inflation - 6mo forecast'])
full_data_frame.columns
# Export quarterly data
full_data_frame[['deflator inflation - 3mo forecast','deflator inflation - 3mo actual','nominal interest - 3mo'
]].to_csv('../csv/real_rate_data_Q.csv')
fig = plt.figure(figsize = (12,8))
ax = fig.add_subplot(1,1,1)
full_data_frame[['deflator inflation - 3mo forecast','deflator inflation - 3mo actual','nominal interest - 3mo'
]].plot(ax=ax,lw=4,alpha = 0.6,grid=True)
# Construct annual data and export
# Resample to annual freq and count occurences per year
annual_data_frame = full_data_frame[['deflator inflation - 1yr forecast','deflator inflation - 1yr actual','nominal interest - 1yr'
]].resample('AS').mean().dropna()
# Export to csv
annual_data_frame[['deflator inflation - 1yr forecast','deflator inflation - 1yr actual','nominal interest - 1yr'
]].to_csv('../csv/real_rate_data_A.csv')
fig = plt.figure(figsize = (12,8))
ax = fig.add_subplot(1,1,1)
annual_data_frame.plot(ax=ax,lw=4,alpha = 0.6,grid=True)
"""
Explanation: Organize actual and forecasted data and export to csv files
End of explanation
"""
# Formatter for inserting commas in y axis labels with magnitudes in the thousands
# Make all plotted axis lables and tick lables bold 15 pt font
font = {#'weight' : 'bold',
'size' : 15}
axes={'labelweight' : 'bold'}
plt.rc('font', **font)
plt.rc('axes', **axes)
# Add some space around the tick lables for better readability
plt.rcParams['xtick.major.pad']='8'
plt.rcParams['ytick.major.pad']='8'
def func(x, pos): # formatter function takes tick label and tick position
s = '{:0,d}'.format(int(x))
return s
y_format = plt.FuncFormatter(func) # make formatter
# format the x axis ticksticks
years2,years4,years5,years10,years15= dts.YearLocator(2),dts.YearLocator(4),dts.YearLocator(5),dts.YearLocator(10),dts.YearLocator(15)
# y label locator for vertical axes plotting gdp
majorLocator_y = plt.MultipleLocator(3)
majorLocator_shares = plt.MultipleLocator(0.2)
# Figure
expectedInflation = annual_data_frame['deflator inflation - 1yr forecast']
actualInflation = annual_data_frame['deflator inflation - 1yr actual']
v =fp.to_fred_series(data = annual_data_frame['deflator inflation - 1yr actual'],dates=annual_data_frame.index)
fig=plt.figure(figsize=(10, 6))
ax=fig.add_subplot(1,1,1)
v.recessions()
ax.plot_date(annual_data_frame.index,actualInflation,'b-',lw=3)
ax.plot_date(annual_data_frame.index,expectedInflation,'r--',lw=3)
ax.fill_between(annual_data_frame.index,actualInflation, expectedInflation, where = expectedInflation<actualInflation,alpha=0.25,facecolor='green', interpolate=True)
ax.fill_between(annual_data_frame.index,actualInflation, expectedInflation, where = expectedInflation>actualInflation,alpha=0.25,facecolor='red', interpolate=True)
ax.set_ylabel('%')
ax.xaxis.set_major_locator(years5)
ax.legend(['actual inflation (year ahead)','expected inflation (year ahead)'],bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.,prop={'weight':'normal','size':'15'})
plt.grid()
fig.autofmt_xdate()
plt.savefig('../png/fig_US_Inflation_Forecast_site.png',bbox_inches='tight')
progName = 'realRateData'
runProcs.exportNb(progName)
"""
Explanation: Figure for website
End of explanation
"""
|
whiterd/Tutorial-Notebooks | 2019-03-Presentation-Micropython.ipynb | mit | %serialconnect
"""
Explanation: <img src="images/micropython-logo-new.jpg" width="400">
<!--  -->
<img src="images/micropython-logo-old.png" width="400">
What is it?
“micro-ified”
MicroPython-specific libraries
btree - simple BTree database
framebuf - Frame buffer manipulation
machine - functions related to the hardware
micropython - access and control MicroPython internals
network - network configuration
ucryptolib - cryptographic ciphers
uctypes - access binary data in a structured way
ESP8266-specific library
esp
Optimizations
Limitations
What does it run on?
pyboard
ESP8266
ESP32
WiPy/CC3200
BBC:microbit
Teensy 3.x
Pinout Diagram - Node MCU
<img src="https://i.stack.imgur.com/yT4hb.png" width="400">
https://arduino.stackexchange.com/questions/56093/arduino-sketch-for-nodemcu-v1-0-esp8266-12e-has-no-error-of-compiling-but-does
End of explanation
"""
import network
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True) # activate station interface
#sta_if.connect('<your ESSID>', '<your password>')
if sta_if.isconnected():
ip_address, netmask, gateway, dns = sta_if.ifconfig()
print('IP: {}, netmask: {}, gateway: {}, DNS: {}'.format(*sta_if.ifconfig()))
else:
'Not Connected'
ap_if = network.WLAN(network.AP_IF)
print(ap_if.active()) # Activate access point.
ap_if.active(False) # Disable if not using.
import socket
def http_get(url):
_, _, host, path = url.split('/', 3)
addr = socket.getaddrinfo(host, 80)[0][-1]
s = socket.socket()
s.connect(addr)
s.send(bytes('GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n' % (path, host), 'utf8'))
while True:
data = s.recv(100)
if data:
print(str(data, 'utf8'), end='')
else:
break
s.close()
http_get('http://micropython.org/ks/test.html')
import machine
pins = [machine.Pin(i, machine.Pin.IN) for i in (0, 2, 4, 5, 12, 13, 14, 15)]
html = """<!DOCTYPE html>
<html>
<head> <title>ESP8266 Pins</title> </head>
<body> <h1>ESP8266 Pins</h1>
<table border="1"> <tr><th>Pin</th><th>Value</th></tr> %s </table>
</body>
</html>
"""
import socket
addr = socket.getaddrinfo('0.0.0.0', 8081)[0][-1]
s = socket.socket()
s.bind(addr)
s.listen(1)
print('listening on', addr)
while True:
cl, addr = s.accept()
print('client connected from', addr)
cl_file = cl.makefile('rwb', 0)
while True:
line = cl_file.readline()
if not line or line == b'\r\n':
break
rows = ['<tr><td>%s</td><td>%d</td></tr>' % (str(p), p.value()) for p in pins]
response = html % '\n'.join(rows)
cl.send(response)
cl.close()
"""
Explanation: Getting Connected
Obtain a terminal emulater to access REPL:
Linux: mpfshell or picocom /dev/ttyUSB0 -b115200
Windows: TeraTerm
Mac: the built-in screen program
Optionally, WebREPL:
import webrepl_setup
Find your SSID
$ nmcli -f SSID,BSSID,DEVICE dev wifi
End of explanation
"""
%serialconnect
# May need to allow permission:
# sudo chmod 777 /dev/ttyUSB0
# Could also add your user to the dialout group
# sudo usermod -a -G dialout your_username
%lsmagic
# Common commands:
# %rebootdevice
# %sendtofile
# %disconnect
# %sendtofile yourfilename.py
import network
wlan = network.WLAN(network.STA_IF) # create station interface
wlan.active(True) # activate the interface
wlan.scan() # scan for access points
wlan.isconnected() # check if the station is connected to an AP
wlan.connect('Student', 'Improving') # connect to an AP
wlan.config('mac') # get the interface's MAC adddress
wlan.ifconfig() # get the interface's IP/netmask/gw/DNS addresses
ap = network.WLAN(network.AP_IF) # create access-point interface
ap.active(True) # activate the interface
ap.config(essid='ESP-AP') # set the ESSID of the access point
# Simple demo.
import machine
from machine import Pin
import dht
from time import sleep
motion = Pin(14, Pin.IN, Pin.PULL_UP)
light = Pin(12, Pin.IN, Pin.PULL_UP)
hum = dht.DHT22(Pin(13))
blue = Pin(0, Pin.OUT)
green = Pin(4, Pin.OUT)
red = Pin(5, Pin.OUT)
def blink(led, loops=1, delay=0.25):
for _ in range(loops):
led.on()
sleep(delay)
led.off()
sleep(delay)
def main():
while True:
if light.value():
blue.on()
else:
blue.off()
if motion.value():
green.on()
else:
green.off()
hum.measure()
if hum.temperature() > 26 or hum.humidity() > 50:
red.on()
else:
red.off()
sleep(2)
main() # To end script, interrupt kernel (or press 'Esc' twice and 'i' twice).
"""
Explanation: Micropython for Jupyter Notebook (ESP8266-specific)
https://github.com/goatchurchprime/jupyter_micropython_kernel/
If you get this error:
ModuleNotFoundError: No module named 'prompt_toolkit.formatted_text'
https://github.com/jupyter/notebook/issues/4050
End of explanation
"""
import time, machine
blue = machine.Pin(0, machine.Pin.OUT)
loops = 200_000
start = time.ticks_us()
for i in range(loops):
blue.on()
blue.off()
end = time.ticks_us()
diff = time.ticks_diff(end, start)
temp = '{:5.3f} sec, {:6.3f} usec/blink, {:8.2f} kblinks/sec'
print(temp.format(diff * 1e-6, diff / loops, loops / diff * 1e3))
"""
Explanation: Demo time...
Simple loop with no efficiency in mind.
End of explanation
"""
import time, machine
blue = machine.Pin(0, machine.Pin.OUT)
loops = 200_000
def blink_me(num):
for i in range(num):
blue.on()
blue.off()
def time_me(func, num):
start = time.ticks_us()
func(num)
end = time.ticks_us()
diff = time.ticks_diff(end, start)
temp = '{:5.3f} sec, {:6.3f} usec/blink, {:8.2f} kblinks/sec'
print(temp.format(diff * 1e-6, diff / num, num / diff * 1e3))
time_me(blink_me, loops)
"""
Explanation: Simple loop wrapped in a function.
Number of loops no longer looked up in global scope.
End of explanation
"""
import time, machine
blue = machine.Pin(0, machine.Pin.OUT)
loops = 200_000
def blink_me(num):
for i in range(num):
blue.on()
blue.off()
def time_me(func, num):
start = time.ticks_us()
func(num)
end = time.ticks_us()
diff = time.ticks_diff(end, start)
temp = '{:5.3f} sec, {:6.3f} usec/blink, {:8.2f} kblinks/sec'
print(temp.format(diff * 1e-6, diff / num, num / diff * 1e3))
time_me(blink_me, loops)
"""
Explanation: Preload methods into local scope of function.
Further reduces lookups.
End of explanation
"""
import time, machine
blue = machine.Pin(0, machine.Pin.OUT)
loops = 200_000
def blink_me(num):
num //= 8
on = blue.on
off = blue.off
r = range(num)
for i in r:
on()
off()
on()
off()
on()
off()
on()
off()
on()
off()
on()
off()
on()
off()
on()
off()
# continued from above...
def time_me(func, num):
start = time.ticks_us()
func(num)
end = time.ticks_us()
diff = time.ticks_diff(end, start)
temp = '{:5.3f} sec, {:6.3f} usec/blink, {:8.2f} kblinks/sec'
print(temp.format(diff * 1e-6, diff / num, num / diff * 1e3))
time_me(blink_me, loops)
"""
Explanation: Patial loop unrolling.
Reduces the overhead of looping.
End of explanation
"""
import time, machine
blue = machine.Pin(0, machine.Pin.OUT)
loops = 200_000
@micropython.native
def blink_me(num):
num //= 8
on = blue.on
off = blue.off
r = range(num)
for i in r:
on()
off()
on()
off()
on()
off()
on()
off()
on()
off()
on()
off()
on()
off()
on()
off()
# continued from above...
def time_me(func, num):
start = time.ticks_us()
func(num)
end = time.ticks_us()
diff = time.ticks_diff(end, start)
temp = '{:5.3f} sec, {:6.3f} usec/blink, {:8.2f} kblinks/sec'
print(temp.format(diff * 1e-6, diff / num, num / diff * 1e3))
time_me(blink_me, loops)
"""
Explanation: Machine code instead of bytecode.
End of explanation
"""
import time, machine
blue = machine.Pin(0, machine.Pin.OUT)
loops = 200_000
@micropython.viper
def blink_me(num:int):
num //= 8
p = ptr32(0x60000328)
for i in range(num)
p[0] = 1 << 4 # High
p[1] = 1 << 4 # Low
p[0] = 1 << 4 # High
p[1] = 1 << 4 # Low
p[0] = 1 << 4 # High
p[1] = 1 << 4 # Low
p[0] = 1 << 4 # High
p[1] = 1 << 4 # Low
p[0] = 1 << 4 # High
p[1] = 1 << 4 # Low
p[0] = 1 << 4 # High
p[1] = 1 << 4 # Low
p[0] = 1 << 4 # High
p[1] = 1 << 4 # Low
p[0] = 1 << 4 # High
p[1] = 1 << 4 # Low
# continued from above...
def time_me(func, num):
start = time.ticks_us()
func(num)
end = time.ticks_us()
diff = time.ticks_diff(end, start)
temp = '{:5.3f} sec, {:6.3f} usec/blink, {:8.2f} kblinks/sec'
print(temp.format(diff * 1e-6, diff / num, num / diff * 1e3))
time_me(blink_me, loops)
"""
Explanation: Viper mode!
Writes directly to GPIO registers.
Allows direct manipulation of registers.
End of explanation
"""
import time, machine
blue = machine.Pin(0, machine.Pin.OUT)
loops = 200_000
@micropython.asm_thumb
def blink_me(r0):
lsr(r0, r0, 3)
movwt(r1, 0x60000328)
mov(r2, 1 << 4)
lable(loop)
strh(r2, [r1, 0]) # High
strh(r2, [r1, 2]) # Low
strh(r2, [r1, 0]) # High
strh(r2, [r1, 2]) # Low
strh(r2, [r1, 0]) # High
strh(r2, [r1, 2]) # Low
strh(r2, [r1, 0]) # High
strh(r2, [r1, 2]) # Low
strh(r2, [r1, 0]) # High
strh(r2, [r1, 2]) # Low
strh(r2, [r1, 0]) # High
strh(r2, [r1, 2]) # Low
strh(r2, [r1, 0]) # High
strh(r2, [r1, 2]) # Low
strh(r2, [r1, 0]) # High
strh(r2, [r1, 2]) # Low
# continued from above...
def time_me(func, num):
start = time.ticks_us()
func(num)
end = time.ticks_us()
diff = time.ticks_diff(end, start)
temp = '{:5.3f} sec, {:6.3f} usec/blink, {:8.2f} kblinks/sec'
print(temp.format(diff * 1e-6, diff / num, num / diff * 1e3))
time_me(blink_me, loops)
"""
Explanation: Assembler
Write directly to the GPIO registers.
End of explanation
"""
help("modules")
import machine
help(machine)
def do_connect():
import network
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
sta_if.connect('<essid>', '<password>')
while not sta_if.isconnected():
pass
print('network config:', sta_if.ifconfig())
help(sta_if)
[print(i) for i in sta_if.scan()]
help(network)
%rebootdevice
"""
Explanation: Fin
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/ec-earth-consortium/cmip6/models/ec-earth3-gris/landice.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-gris', 'landice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Landice
MIP Era: CMIP6
Institute: EC-EARTH-CONSORTIUM
Source ID: EC-EARTH3-GRIS
Topic: Landice
Sub-Topics: Glaciers, Ice.
Properties: 30 (21 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:59
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Ice Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify how ice albedo is modelled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Atmospheric Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the atmosphere and ice (e.g. orography, ice mass)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Oceanic Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the ocean and ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which variables are prognostically calculated in the ice model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Grid
Land ice grid
3.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is an adative grid being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Base Resolution
Is Required: TRUE Type: FLOAT Cardinality: 1.1
The base resolution (in metres), before any adaption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Resolution Limit
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If an adaptive grid is being used, what is the limit of the resolution (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.5. Projection
Is Required: TRUE Type: STRING Cardinality: 1.1
The projection of the land ice grid (e.g. albers_equal_area)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of glaciers in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of glaciers, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 4.3. Dynamic Areal Extent
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does the model include a dynamic glacial extent?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the ice sheet and ice shelf in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.2. Grounding Line Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.3. Ice Sheet
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice sheets simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Ice Shelf
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice shelves simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over bedrock
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Ocean
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of calving from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Melting
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of melting from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Ice --> Dynamics
**
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description if ice sheet and ice shelf dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Approximation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Approximation type used in modelling ice dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.3. Adaptive Timestep
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there an adaptive time scheme for the ice scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | quests/tpu/tpu_fundamentals.ipynb | apache-2.0 | import numpy as np
import six
import tensorflow as tf
import time
import os
WORKER_NAME = "laktpu" #@param {type:"string"}
TPU_WORKER = tf.contrib.cluster_resolver.TPUClusterResolver(
WORKER_NAME
).get_master()
session = tf.Session(TPU_WORKER)
session.list_devices()
"""
Explanation: TPU Fundamentals
This codelab will walk you through the creation of a simple model using the TF-TPU programming primitives. We also demonstrate how to use the TPUEstimator and Keras APIs to simplify common training tasks.
For most models, we recommend using the high-level APIs, but understanding the underlying TPU programming model is important for debugging or for advanced/custom training scenarios.
End of explanation
"""
def add(x, y):
return x + y
"""
Explanation: What makes TPU modeling special?
Writing TensorFlow models for the TPU differs from the CPU and GPU modeling you may be familiar with in a few important ways:
Model functions are just-in-time compiled to run on the device.
Almost all models are replicated by default, using synchronous in-graph replication (go/tf-strong-sync).
An early goal of the TPU program was the ability to scale up. Individual TPU cores are powerful (~70 TFlops per core in the DragonFish generation), but the specialized network and hardware transfer support truly distinguish TPUs from other devices. The interconnect between TPU chips is an order of magnitude faster and lower-latency than the normal datacenter network fabric. This allows us to build models that scale up transparently using batch parallelism and synchronous replication.
The TF-TPU programming model is designed to help your TF model take advantage of this scaling ability. Some models can scale to the size of an entire TPU pod (2048 cores!). To utilize TPUs effectively we need to make a few changes to how we develop our models, which we'll cover below.
tpu.rewrite and model functions
We refer to the portion of a TF model that runs on the TPU as a "model function". Most programs have a single model function which computes the training step for your model. Explicitly encapsulating the TPU related logic in a model function allows the TPU software stack to compile your model and implicitly replicate it across multiple TPU cores in a cluster. Let's start with a simple model function that adds two tensors.
End of explanation
"""
x = tf.placeholder(name='x', dtype=tf.float32)
y = tf.placeholder(name='y', dtype=tf.float32)
tpu_add = tf.contrib.tpu.rewrite(add, [x, y])
"""
Explanation: This model takes 2 arguments and adds them together. Obviously this isn't a terribly interesting use of a TPU but we can run this model on the TPU nevertheless! We first need to wrap our model with a call to tpu.rewrite:
End of explanation
"""
session.run(tf.contrib.tpu.initialize_system())
z = session.run(tpu_add, {x: np.arange(16), y: np.ones(16)})
print('Result of TPU computation: %s', z)
"""
Explanation: What's happening here? The tpu.rewrite call returns a version of our original function which is ready to be executed on the TPU. Now we can run it on our device!
Note the initialize_system call. These are necessary to reset the TPU hardware. In the future, this initialization should occur automatically, but for now remember that you'll need to call them to initialize your session.
End of explanation
"""
IMAGE_SIZE = 28 * 28
NUM_LABELS = 10
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train.shape, y_train.shape
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
def fit_batch(img, labels):
with tf.variable_scope('tpu', reuse=tf.AUTO_REUSE):
# flatten images
x = tf.reshape(img, [-1, IMAGE_SIZE])
W = tf.get_variable('W', [28*28, 10]) # pylint: disable=invalid-name
b = tf.get_variable('b', [10], initializer=tf.zeros_initializer)
logits = tf.matmul(x, W) + b
print(img, logits, labels)
loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
return loss, optimizer.minimize(loss, tf.train.get_or_create_global_step())
"""
Explanation: Running Logistic Regression
Our first example wasn't very exciting. After all, we're interested in using the TPU to train models, not add tensors together. Let's change our model to learn a simple classifier via logistic regression.
We'll use the well known MNIST dataset (easily imported via tf.keras.datasets), and to keep things as simple as possible, we will use a single fully connected layer to make our predictions.
End of explanation
"""
images = tf.placeholder(name='images', dtype=tf.float32, shape=[None, 28, 28])
labels = tf.placeholder(name='labels', dtype=tf.int32, shape=[None,])
fit_on_tpu = tf.contrib.tpu.rewrite(fit_batch, [images, labels])
session.run(tf.global_variables_initializer())
for i in range(50):
loss = session.run(fit_on_tpu, {
images: x_train[:1000], labels: y_train[:1000]
})
if i % 10 == 0:
print('loss = %s' % loss)
"""
Explanation: The only unusual part of our model above the use of CrossShardOptimizer: this wraps a standard TF optimizer and allows us to train on multiple cores simultaneously (we'll demonstrate that below).
And now we can train our model! Note we use tpu.rewrite again to create a TPU version of our computation. We'll only use the first 1000 images from our training data for this example.
End of explanation
"""
def predict(img):
with tf.variable_scope('tpu', reuse=tf.AUTO_REUSE):
# flatten images
x = tf.reshape(img, [-1, IMAGE_SIZE])
W = tf.get_variable('W', [28*28, 10]) # pylint: disable=invalid-name
b = tf.get_variable('b', [10], initializer=tf.zeros_initializer)
logits = tf.matmul(x, W) + b
return tf.nn.softmax(logits)
predict_on_tpu = tf.contrib.tpu.rewrite(predict, [images,])
from matplotlib import pyplot
%matplotlib inline
def plot_predictions(images, predictions):
f, axes = pyplot.subplots(16, 2)
for i in range(16):
axes[i, 0].bar(np.arange(10), predictions[i])
axes[i, 1].imshow(images[i])
axes[i, 1].axis('off')
if i != 15:
axes[i, 0].axis('off')
else:
axes[i, 0].get_yaxis().set_visible(False)
pyplot.gcf().set_size_inches(6, 8)
[predictions] = session.run(predict_on_tpu, {
images: x_test[:16],
})
plot_predictions(x_test[:16], predictions)
"""
Explanation: Now we can sample predictions from our test set to see how well we're doing:
End of explanation
"""
fit_multi_on_tpu = tf.contrib.tpu.batch_parallel(fit_batch, [images, labels], num_shards=8)
session.run(tf.global_variables_initializer())
for i in range(50):
loss = session.run(fit_multi_on_tpu, {
images: x_train[:1024], labels: y_train[:1024]
})
if i % 10 == 0:
print('loss = %f %s' % (np.mean(loss), loss))
[predictions] = session.run(predict_on_tpu, {
images: x_test[:16], labels: y_train[:16]
})
plot_predictions(x_test[:16], predictions)
"""
Explanation: We can see our network has quickly converged towards mostly correct observations!
Using Multiple TPU Cores with Batch Parallelism
Up until this point our models have only been running on a single TPU core. This is wasting a lot of the power of TPUs!
Let's change that so we can take advantage of our entire TPU device (8 cores). Scaling up to multiple cores with TPUs uses a different mechanism than the asynchronous out of graph replication used in CPU deployments. With TPUs, the software stack handles the replication for you: this is important to keep in mind, as you must use the builtin TPU replication to take advantage of the specialized TPU network. Fortunately TPU replication is easy to use.
We just replace tpu.rewrite with tpu.batch_parallel: the TPU system handles the rest! We just need to make sure our batches are divisible by the number of cores (8).
End of explanation
"""
BATCH_SIZE = 1024
def fit_batch_with_infeed():
"""Train one batch, reading from infeed and writing to outfeed."""
# each core will read 1/8 of the total batch size
images, labels = tf.contrib.tpu.infeed_dequeue_tuple(
dtypes=[tf.float32, tf.int32],
shapes=[(BATCH_SIZE // 8, 28, 28,), (BATCH_SIZE // 8,)],
name='infeed_dequeue')
loss, train_op = fit_batch(images, labels)
return tf.contrib.tpu.outfeed_enqueue_tuple((loss,)), train_op
"""
Explanation: If you're paying close attention to the output on this run, you may notice that our print statement for loss is now reporting 8 values instead of a single scalar! tpu.batch_parallel concatenates the output from each core to return a result. We are thus seeing the loss computed by each individual TPU core. Our CrossShardOptimizer takes care of averaging our gradients, but we must average the loss on the CPU if we want to obtain a single scalar value:
Using Infeed and Outfeed
In our previous examples, our TPU execution looked something like this:
Between each call to fit the TPU device is idle waiting for the CPU. If we could queue inputs and outputs, the TPU could work on new data while the CPU is working. Depending on our model, we may be able to entirely decouple the CPU and TPU training: the CPU feeds data and pulls out loss values, and the TPU runs the training independently as long as data is available:
End of explanation
"""
from tensorflow.contrib.tpu.ops.gen_tpu_ops import infeed_enqueue_tuple, outfeed_dequeue_tuple
def setup_feed(image_batch, label_batch):
"""Generate TF operations for CPU side infeed and outfeed."""
infeed_ops = []
outfeed_ops = []
# Split our input into 8 pieces and infeed each sub-batch
infeed_batches = list(zip(tf.split(image_batch, 8), tf.split(label_batch, 8)))
for i in range(8):
infeed_op = infeed_enqueue_tuple(
infeed_batches[i],
[b.shape for b in infeed_batches[i]],
device_ordinal=i
)
infeed_ops.append(infeed_op)
outfeed_op = outfeed_dequeue_tuple(
dtypes=[tf.float32], shapes=[[]], device_ordinal=i)
outfeed_ops.append(outfeed_op)
return [infeed_ops, outfeed_ops]
setup_feed(tf.placeholder(tf.float32, [1024, 28*28]),
tf.placeholder(tf.int32, [1024]))
# Replicate our model function onto 8 cores (with no inputs)
fit_with_infeed = tf.contrib.tpu.batch_parallel(
fit_batch_with_infeed, num_shards=8)
# Infeed requires a static shape for inputs. We create a new set of placeholders
# here with a fixed batch size.
image_batch = tf.placeholder(name='images', dtype=tf.float32, shape=[BATCH_SIZE, 28, 28])
label_batch = tf.placeholder(name='labels', dtype=tf.int32, shape=[BATCH_SIZE,])
session.run(tf.global_variables_initializer())
infeed_ops, outfeed_ops = setup_feed(image_batch, label_batch)
# # Start training. We first push a batch of data on the infeed so the device
# # is working while we're getting the next batch ready.
print('priming infeed')
session.run(tf.group(infeed_ops), {image_batch: x_train[:BATCH_SIZE],
label_batch: y_train[:BATCH_SIZE]})
for i in range(50):
_, _, loss = session.run(
[tf.group(infeed_ops), fit_with_infeed, tf.tuple(outfeed_ops)],
{image_batch: x_train[:BATCH_SIZE],
label_batch: y_train[:BATCH_SIZE]}
)
if i % 10 == 0:
print('loss = %f' % np.mean(loss))
_, final_loss = session.run([fit_with_infeed, tf.tuple(outfeed_ops)])
print('final loss = %f' % np.mean(final_loss))
[predictions] = session.run(predict_on_tpu, {
images: x_test[:16], labels: y_train[:16]
})
plot_predictions(x_test[:16], predictions)
"""
Explanation: Note we can still re-use our original training function! We've just wrapped it in the logic to remove batches from the infeed and push our loss onto the outfeed. We return both the minimize operation and the enqueue operation from our model function to ensure that both are run as part of the TPU program.
Now let's look at the CPU side. Note that we need an enqueue/dequeue operation for each TPU core. We use the standard tf.device scope to assign our operations to a given core:
End of explanation
"""
NUM_STEPS = 100
import threading
def fit_loop():
return tf.contrib.tpu.repeat(NUM_STEPS, fit_batch_with_infeed)
def _run_infeed(session, infeed_ops, x, y, images, labels):
for i in range(NUM_STEPS):
if i % 10 == 0:
print('Infeed %s' % i)
session.run(infeed_ops, {x: images, y: labels})
losses = []
def _run_outfeed(session, outfeed_ops):
# hack: store output in losses
for i in range(NUM_STEPS):
losses.append(session.run(outfeed_ops))
if i % 10 == 0:
print('Outfeed: %s %s' % (i, losses[-1]))
infeed_thread = threading.Thread(
target=lambda: _run_infeed(session, infeed_ops,
image_batch, label_batch,
x_train[:BATCH_SIZE], y_train[:BATCH_SIZE]))
outfeed_thread = threading.Thread(
target=lambda: _run_outfeed(session, outfeed_ops))
fit_with_infeed_loop = tf.contrib.tpu.batch_parallel(fit_loop, num_shards=8)
session.run(tf.global_variables_initializer())
infeed_thread.start()
outfeed_thread.start()
session.run(fit_with_infeed_loop)
infeed_thread.join()
outfeed_thread.join()
[predictions] = session.run(predict_on_tpu, {
images: x_test[:16], labels: y_train[:16]
})
plot_predictions(x_test[:16], predictions)
"""
Explanation: Training Loops
In our previous example, we used infeed and outfeed to decouple the CPU and TPU operations, but our TPU is still dependent on the CPU to "pump" the fit operation once per training loop. What if we could put a loop around the entire TPU computation such that it was entirely decoupled from the CPU? The tpu.repeat function helps us do just that. We supply a function to run in the loop and the number of times we want to run:
End of explanation
"""
|
danellecline/stoqs | stoqs/contrib/notebooks/compare_clustering_algorithms.ipynb | gpl-3.0 | cd /vagrant/dev/stoqsgit/stoqs/
from contrib.analysis.cluster import Clusterer
%matplotlib inline
import pylab as plt
import numpy as np
# defining function to create clusters for a specified algorithm
def cluster(algorithm_string, normalize):
# specifying arguments - simulating cluster.py command line arguments
from argparse import Namespace
c = Clusterer()
ns = Namespace()
ns.database = 'stoqs_september2013'
ns.platform = 'Slocum_260'
ns.inputs = ['optical_backscatter700nm', 'fluorescence']
ns.start = '20130923T094038'
ns.end = '20130923T130613'
ns.algorithm = algorithm_string
ns.do_not_normalize = not normalize
ns.verbose=True
c.args = ns
# loading the data, output is in list form
x, y, x_ids, y_ids = c.loadData()
# creating clusters
X, y_clusters, X_ids = c.createClusters()
# plotting result
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
return X, y_clusters, colors
# Define function to plot clusters of 4 different algorithms
def plot_clusters(ax, algorithms, normalize=False):
for algorithm, i, j in zip(algorithms, (0, 0, 1, 1), (0, 1, 0, 1)):
X1, y_clusters, colors = cluster(algorithm, normalize);
ax[i, j].scatter(X1[:,0], X1[:,1], s=2, color=colors[y_clusters])
ax[i, j].set_title(algorithm, fontsize='20')
ax[i, j].set_ylabel('fluorescence (micrograms/l)', fontsize='14')
ax[i, j].set_xlabel('optical_backscatter700nm', fontsize='14')
ax[i, j].tick_params(labelsize=12)
if not normalize:
ax[i, j].axis([0.0001,0.0014,0,16]);
"""
Explanation: Compare Unsupervised Machine Learning Algorithms
Idenitify clusters in oceanographic data using several clustering algorithms for unsupervised machine learning
Executing this Notebook requires a personal STOQS database. Follow the steps to build your own development system — this will take about an hour and depends on a good connection to the Internet. Once your server is up log into it (after a cd ~/Vagrants/stoqsvm) and activate your virtual environment with the usual commands:
vagrant ssh -- -X
cd /vagrant/dev/stoqsgit
source venv-stoqs/bin/activate
Connect to your Institution's STOQS database server using read-only credentials. (Note: firewalls typically limit unprivileged access to such resources.)
cd stoqs
ln -s mbari_campaigns.py campaigns.py
export DATABASE_URL=postgis://everyone:guest@kraken.shore.mbari.org:5433/stoqs
This script uses the following clustering algorithms:
- Hierarchical Clustering: identifies clusters based on distance connectivity
- DBSCAN (Density-based spatial clustering of applications with noise): identifies clusters based on data point
density
- Mean Shift: identifies clusters based on centroids (the mean of the points within a region) - finds data "blobs"
- Birch: identifies clusters by using a Clustering Feature Tree to recursively create subclusters by adding each
sample to the subcluster with the closest centroid
To be able to execute the cells and experiment with different algortithms and parameters, launch Jupyter Notebook with:
cd contrib/notebooks
../../manage.py shell_plus --notebook
navigate to this file and open it. You will then be able to execute the cells and experiment with different settings and code.
End of explanation
"""
fig, ax = plt.subplots(2, 2, figsize=(18,10));
fig.subplots_adjust(wspace=.2, hspace=.4)
plot_clusters(ax, ('Hierarchical_Clustering', 'DBSCAN', 'Mean_Shift', 'Birch'), normalize=False)
"""
Explanation: Compare clustering of sample without normalizing the input data
End of explanation
"""
fig, ax = plt.subplots(2, 2, figsize=(18,10));
fig.subplots_adjust(wspace=.2, hspace=.4)
plot_clusters(ax, ('Hierarchical_Clustering', 'DBSCAN', 'Mean_Shift', 'Birch'), normalize=True)
"""
Explanation: Compare clustering of sample with normalized input data. It's odd/interesting that the clustering is different.
End of explanation
"""
|
Taekyoon/Pytorch_Seq2Seq_Tutorial | Pytorch_Seq2Seq_Practice.ipynb | mit | MAX_LENGTH = 10
"""
Explanation: Pytorch Seq2Seq Machine Translator Practice
이번 튜토리얼에서는 Sequence to Sequence 모델의 핵심인 RNN Encoder Decoder과 Attention 모델을 이해하고, 이를 활용하여 Machine Translator를 구현해보겠습니다.
Machine Traslator에 핵심인 Sequence to Sequence 모델은 아래의 그림과 같이 구성되어 있습니다.
모델의 역할은 다음과 같습니다.
번역을 하고자 하는 데이터를 RNN Encoder에 입력하여 encoder context 정보를 얻습니다. Encoder context를 활용하여 RNN Decoder를 통해 보이고자 하는 번역 데이터를 학습하여 모델을 만듭니다. 학습된 모델은 Encoder 데이터만 입력을 하여 Decoder에서 번역된 내용을 보이게 됩니다.
Setting Sequence Length
Encoder와 Decoder에 입력할 최대 Sequence 길이에 대해 설정합니다. 빠른 학습을 위해서 최대길이는 10으로 지정하였습니다.
End of explanation
"""
from data_util import prepare_data
input_lang, output_lang, train_pairs, test_pairs = prepare_data('lang1', 'lang2', MAX_LENGTH, 2, True)
"""
Explanation: Load Europal dataset
Europal 영-불 데이터셋을 불러옵니다.
End of explanation
"""
from models import EncoderRNN, AttnDecoderRNN
hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size)
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words,
MAX_LENGTH, dropout_p=0.1)
"""
Explanation: Implement Encoder Decoder Model
Encoder와 Decoder를 구현해봅니다.
구현하고자 하는 Encoder는 다음과 같은 구조로 구성되어 있습니다.
위에 그래프를 보면 input vector에 대해서 embedding을 하고 hidden vector와 GRU function을 통해 한 feed forward step을 하게 됩니다. 마지막으로, GRU를 통해서 output과 hidden vector를 각각 얻게 됩니다.
다음, 구현하고자 하는 Decoder의 구조는 아래와 같습니다.
Decoder구현은 Encoder보다 복잡합니다. Decoder를 보다 잘 이해하기 위해서 그래프 구조 설명 이전에 Decoder에 중요한 부분 중 하나인 Attention에 대한 설명을 하겠습니다.
Attention은 예측을 하고자 할 때 Input data에 대해 어디에 집중을 해야할 지 Encoder context에 가중치를 주는 역할을 합니다. 여기 Translator 모델에서는 매 스텝마다 들어오는 Decoder Input과 Hidden vector를 통해 Encoder context에 대한 가중치를 부여하여 Input에 대한 Output을 예측할 수 있도록 합니다.
모델의 전체과정 중 Attention 부분은 다음과 같습니다.
Input에 들어온 데이터는 embedding layer을 통해 이전 스텝의 hidden_vector와 결합을 합니다. 이후 softmax function을 거쳐 attn linear function을 두어 encoder_outputs와 matrix multiplication을 할 수 있도록 해줍니다.
Attention이 적용된 context vector는 input vector와 결합이 되어 hidden vector와 같이 GRU function에 들어갑니다. GRU에서 나온 output은 softmax를 처리하여 return 처리를 합니다.
이제 위 내용을 바탕으로 model을 구현해 보겠습니다.
models.py에 NotImplementedError라 표시된 영역에 구현해보겠습니다.
각 구현에 대한 순서는 다음과 같습니다.
Encoder 모델 __init__에 embedding과 gru 함수를 구현합니다.
Encoder 모델 forward 부분을 구현합니다. 방법은 아래와 같습니다.
Embedding function을 통해 word embedding layer를 구현합니다.
GRU function을 이용하여 multi layer RNN을 구현합니다.
Decoder 모델 forward 부분을 구현합니다. 방법은 아래와 같습니다.
Embedding functiondㅡㄹ 통해 word embedding layer를 구현합니다.
Attention Module을 구현합니다. (구현에 관한 내용은 위 그래프 Image를 참조하여 구현합니다.)
GRU function을 이용하여 multi layer RNN을 구현합니다.
Fully Connected Layer을 구현하고 Softmax를 통해 output data를 보입니다.
End of explanation
"""
from train import train_iters
plot_losses = train_iters(encoder1, attn_decoder1, input_lang,
output_lang, train_pairs[:70], 1000, MAX_LENGTH)
"""
Explanation: Implement Training Module
Training Module 중 Teacher forcing 부분에 대해서 구현을 하고 criterion과 optimizer에 대해서 설정을 해봅니다.
train.py에 NotImplementedError라 표시된 영역에 구현해보겠습니다.
Teacher forcing 부분을 구현합니다.
decoder로 부터 output vector를 받습니다.
criterion을 활용하여 loss값을 축적합니다.
ground truth 값을 decoder_input에 입력합니다.
Without Teacher forcing 부분을 구현합니다.
decoder로 부터 ouput vector를 받습니다.
output vector로 부터 argmax값을 받습니다.
decoder로 부터 받은 예측값을 decoder_input에 입력합니다.
criterion을 활용하여 loss값을 축적합니다.
EOS_token이 있을 시 break를 하도록 조건문을 둡니다.
End of explanation
"""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
%matplotlib inline
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
showPlot(plot_losses)
from predict import ModelPredictor
predictor = ModelPredictor(encoder1, attn_decoder1, input_lang, output_lang, MAX_LENGTH)
predictor.evaluate_randomly(train_pairs[:10])
predictor.predict_sentence("je comprends il est essentiel .")
"""
Explanation: Evaluate and predict model
구현한 모델의 training loss값들을 그래프로 확인하고, 번역성능을 확인해보도록 합니다.
End of explanation
"""
|
xchaoo/titanic | kaggle-titanic.ipynb | apache-2.0 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
# plot
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Import the linear regression class
from sklearn.linear_model import LinearRegression
# Sklearn also has a helper that makes it easy to do cross validation
from sklearn.cross_validation import KFold
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
trian_name = 'titanic_train.csv'
titanic = pd.read_csv(trian_name,dtype={"Age": np.float64}) #read data
titanic.head(5) #preview the data
titanic.describe() # describe common statics
titanic.info() #notes non-null count in Age,
#fill null 'age' with median
titanic['Age'] = titanic['Age'].fillna(titanic['Age'].median())
"""
Explanation: This is a Kaggle competition named Titanic Machine Learning From Disaster.
kaggle
Dependencies:
NumPy
IPython
Pandas
SciKit-Learn
Matplotlib
This Notebook will show examples of:
Data Handling
Importing Data with Pandas
Cleaning Data
Exploring Data through Visualizations with Matplotlib
Data Analysis
Supervised Machine learning Techniques:
Logic Regression Model
Plotting results
Support Vector Machine
Random Forest
Valuation of the Analysis
K-folds cross validation to valuate results locally
data:
titanic_test.csv
titanic_train.csv
VARIABLE DESCRIPTIONS:
survival Survival
(0 = No; 1 = Yes)
pclass Passenger Class
(1 = 1st; 2 = 2nd; 3 = 3rd)
name Name
sex Sex
age Age
sibsp Number of Siblings/Spouses Aboard
parch Number of Parents/Children Aboard
ticket Ticket Number
fare Passenger Fare
cabin Cabin
embarked Port of Embarkation
(C = Cherbourg; Q = Queenstown; S = Southampton)
SPECIAL NOTES:
Pclass is a proxy for socio-economic status (SES)
1st ~ Upper; 2nd ~ Middle; 3rd ~ Lower
Age is in Years; Fractional if Age less than One (1)
If the Age is Estimated, it is in the form xx.5
With respect to the family relation variables (i.e. sibsp and parch)
some relations were ignored. The following are the definitions used
for sibsp and parch.
Sibling: Brother, Sister, Stepbrother, or Stepsister of Passenger Aboard Titanic
Spouse: Husband or Wife of Passenger Aboard Titanic (Mistresses and Fiances Ignored)
Parent: Mother or Father of Passenger Aboard Titanic
Child: Son, Daughter, Stepson, or Stepdaughter of Passenger Aboard Titanic
Other family relatives excluded from this study include cousins,
nephews/nieces, aunts/uncles, and in-laws. Some children travelled
only with a nanny, therefore parch=0 for them. As well, some
travelled with very close friends or neighbors in a village, however,
the definitions do not support such relations.
End of explanation
"""
plt.figure(figsize=(16,12), dpi=80)
plt.subplot2grid((2,3),(0,0)) # split the mom pic
titanic.Survived.value_counts().plot(kind='bar')# plots a bar graph of those who surived vs those who did not.
plt.title("survived (1 alive)")
plt.ylabel("people num")
plt.subplot2grid((2,3),(0,1))
titanic.Pclass.value_counts().plot(kind="bar")
plt.ylabel("people num")
plt.title(u"Pclass distribute")
plt.subplot2grid((2,3),(0,2))
plt.scatter(titanic.Survived, titanic.Age)
plt.ylabel("age")
plt.grid(b=True, which='major', axis='y')
plt.title("age vs survived (1 alive)")
plt.subplot2grid((2,3),(1,0), colspan=2)
# plots a kernel desnsity estimate of the subset of the 1st class passanges's age
titanic.Age[titanic.Pclass == 1].plot(kind='kde')
titanic.Age[titanic.Pclass == 2].plot(kind='kde')
titanic.Age[titanic.Pclass == 3].plot(kind='kde')
plt.xlabel("age")
plt.ylabel("dens")
plt.title("age distribute in all class")
plt.legend(( '1th Pclass', '2th Pclass','3th Pclass'),loc='best') # sets legend
plt.subplot2grid((2,3),(1,2))
titanic.Embarked.value_counts().plot(kind='bar')
plt.title("Embarked nums")
plt.ylabel("people num")
plt.show()
"""
Explanation: explore data primal
Survived
Pclass
Age vs Survived
Age vs Pclass
Embarked
End of explanation
"""
# Pclass vs Survived
fig = plt.figure(figsize=(10,8), dpi=80)
Survived_0 = titanic.Pclass[titanic.Survived == 0].value_counts()
Survived_1 = titanic.Pclass[titanic.Survived == 1].value_counts()
df=pd.DataFrame({'alive':Survived_1, 'die':Survived_0})
df.plot(kind='bar', stacked=True)
plt.title("Pclass vs Survived")
plt.xlabel("Pclass")
plt.ylabel("people num")
plt.show()
#Embarked vs Survived
fig = plt.figure(figsize=(10,8), dpi=80)
Survived_0 = titanic.Embarked[titanic.Survived == 0].value_counts()
Survived_1 = titanic.Embarked[titanic.Survived == 1].value_counts()
df=pd.DataFrame({'alive':Survived_1, 'die':Survived_0})
df.plot(kind='bar', stacked=True)
plt.title("Embarked vs Survived")
plt.xlabel("Embarked")
plt.ylabel("people num")
plt.show()
#Sex vs Survived
fig = plt.figure(figsize=(10,8), dpi=80)
Survived_m = titanic.Survived[titanic.Sex == 'male'].value_counts()
Survived_f = titanic.Survived[titanic.Sex == 'female'].value_counts()
df=pd.DataFrame({'male':Survived_m, 'female':Survived_f})
df.plot(kind='bar', stacked=True)
plt.title("Sex vs Survived")
plt.xlabel("sex")
plt.ylabel("people num")
plt.show()
# Sex vs Pclass vs Survived
fig=plt.figure(figsize=(14,10), dpi=80)
plt.title("Sex vs Pclass vs Survived ")
ax1=fig.add_subplot(141)
titanic.Survived[titanic.Sex == 'female'][titanic.Pclass != 3].value_counts().plot(kind='bar', label="female highclass", color='#FA2479')
ax1.set_xticklabels(["die" ,"alive"], rotation=0)
ax1.legend(["female/1&2 Pclass"], loc='best')
ax2=fig.add_subplot(142, sharey=ax1)
titanic.Survived[titanic.Sex == 'female'][titanic.Pclass == 3].value_counts().plot(kind='bar', label='female, low class', color='pink')
ax2.set_xticklabels(["die" ,"alive"], rotation=0)
plt.legend(["female/3 Pclass"], loc='best')
ax3=fig.add_subplot(143, sharey=ax1)
titanic.Survived[titanic.Sex == 'male'][titanic.Pclass != 3].value_counts().plot(kind='bar', label='male, high class',color='lightblue')
ax3.set_xticklabels(["die" ,"alive"], rotation=0)
plt.legend(["male/1&2 Pclass"], loc='best')
ax4=fig.add_subplot(144, sharey=ax1)
titanic.Survived[titanic.Sex == 'male'][titanic.Pclass == 3].value_counts().plot(kind='bar', label='male low class', color='steelblue')
ax4.set_xticklabels(["die" ,"alive"], rotation=0)
plt.legend(["male/3 Pclass"], loc='best')
plt.show()
# SibSp vs Survived
g = titanic.groupby(['SibSp','Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
# Parch vs Survived
g = titanic.groupby(['Parch','Survived'])
df = pd.DataFrame(g.count()['PassengerId'])
df
#decribe Cabin,most are single tag
titanic.Cabin.value_counts()
# notnull_Cabin vs null_Cabin vs Survived
fig = plt.figure(figsize=(10,8), dpi=80)
Survived_cabin = titanic.Survived[pd.notnull(titanic.Cabin)].value_counts()
Survived_nocabin = titanic.Survived[pd.isnull(titanic.Cabin)].value_counts()
df=pd.DataFrame({'notnull':Survived_cabin, 'null':Survived_nocabin}).transpose()
df.plot(kind='bar', stacked=True)
plt.title("notnull_Cabin vs null_Cabin")
plt.xlabel("Cabin")
plt.ylabel("people num")
plt.show()
"""
Explanation: explore data upgrade
Pclass vs Survived
Embarked vs Survived
Sex vs Survived
Sex vs Pclass vs Survived
End of explanation
"""
titanic["ParentsAndChildren"] = titanic["Parch"]
titanic["SiblingsAndSpouses"] = titanic["SibSp"]
plt.figure()
sns.pairplot(data=titanic[["Fare","Survived","Age","ParentsAndChildren","SiblingsAndSpouses","Pclass"]],
hue="Survived", dropna=True)
plt.savefig("1_seaborn_pair_plot.png")
"""
Explanation: explore data by sns
End of explanation
"""
#trans Sex
print(titanic['Sex'].unique())
titanic.loc[titanic['Sex']=='male','Sex'] = 0
titanic.loc[titanic['Sex']=='female','Sex'] = 1
#trans Embarked
print titanic['Embarked'].unique()
titanic['Embarked'] = titanic['Embarked'].fillna('S')
titanic.loc[titanic['Embarked']=='S','Embarked'] = 0
titanic.loc[titanic['Embarked']=='C','Embarked'] = 1
titanic.loc[titanic['Embarked']=='Q','Embarked'] = 2
"""
Explanation: feature encoding
End of explanation
"""
#logistic regression
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
alg = LogisticRegression(random_state=1)
scores = cross_validation.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3)
print('logistic regression: '+str(scores.mean()))
test_name = 'titanic_test.csv'
titanic_test = pd.read_csv(test_name)
titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0
titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S")
titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0
titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1
titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2
# Initialize the algorithm class
#alg = LogisticRegression(random_state=1)
alg = LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
# Train the algorithm using all the training data
alg.fit(titanic[predictors], titanic["Survived"])
# Make predictions using the test set.
predictions = alg.predict(titanic_test[predictors])
# Create a new dataframe with only the columns Kaggle wants from the dataset.
submission = pd.DataFrame({
"PassengerId": titanic_test["PassengerId"],
"Survived": predictions
})
submission.to_csv("kaggle.csv", index=False)
"""
Explanation: baseline learner---LR
End of explanation
"""
pd.DataFrame({"columns":list(titanic[predictors]), "coef":list(alg.coef_.T)})
"""
Explanation: feature correlation coefficent
End of explanation
"""
def clean_and_munge_data(df):
#handle null
df.Fare = df.Fare.map(lambda x: np.nan if x==0 else x)
#handle title
title_list=['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev',
'Dr', 'Ms', 'Mlle','Col', 'Capt', 'Mme', 'Countess',
'Don', 'Jonkheer']
df['Title']=df['Name'].map(lambda x: substrings_in_string(x, title_list))
#all in mr, mrs, miss, master
def replace_titles(x):
title=x['Title']
if title in ['Mr','Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']:
return 'Mr'
elif title in ['Master']:
return 'Master'
elif title in ['Countess', 'Mme','Mrs']:
return 'Mrs'
elif title in ['Mlle', 'Ms','Miss']:
return 'Miss'
elif title =='Dr':
if x['Sex']=='Male':
return 'Mr'
else:
return 'Mrs'
elif title =='':
if x['Sex']=='Male':
return 'Master'
else:
return 'Miss'
else:
return title
df['Title']=df.apply(replace_titles, axis=1)
#new feature family size
df['Family_Size']=df['SibSp']+df['Parch']
df['Family']=df['SibSp']*df['Parch']
df.loc[ (df.Fare.isnull())&(df.Pclass==1),'Fare'] =np.median(df[df['Pclass'] == 1]['Fare'].dropna())
df.loc[ (df.Fare.isnull())&(df.Pclass==2),'Fare'] =np.median( df[df['Pclass'] == 2]['Fare'].dropna())
df.loc[ (df.Fare.isnull())&(df.Pclass==3),'Fare'] = np.median(df[df['Pclass'] == 3]['Fare'].dropna())
df['Gender'] = df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
df['AgeFill']=df['Age']
mean_ages = np.zeros(4)
mean_ages[0]=np.average(df[df['Title'] == 'Miss']['Age'].dropna())
mean_ages[1]=np.average(df[df['Title'] == 'Mrs']['Age'].dropna())
mean_ages[2]=np.average(df[df['Title'] == 'Mr']['Age'].dropna())
mean_ages[3]=np.average(df[df['Title'] == 'Master']['Age'].dropna())
df.loc[ (df.Age.isnull()) & (df.Title == 'Miss') ,'AgeFill'] = mean_ages[0]
df.loc[ (df.Age.isnull()) & (df.Title == 'Mrs') ,'AgeFill'] = mean_ages[1]
df.loc[ (df.Age.isnull()) & (df.Title == 'Mr') ,'AgeFill'] = mean_ages[2]
df.loc[ (df.Age.isnull()) & (df.Title == 'Master') ,'AgeFill'] = mean_ages[3]
df['AgeCat']=df['AgeFill']
df.loc[ (df.AgeFill<=10) ,'AgeCat'] = 'child'
df.loc[ (df.AgeFill>60),'AgeCat'] = 'aged'
df.loc[ (df.AgeFill>10) & (df.AgeFill <=30) ,'AgeCat'] = 'adult'
df.loc[ (df.AgeFill>30) & (df.AgeFill <=60) ,'AgeCat'] = 'senior'
df.Embarked = df.Embarked.fillna('S')
df.loc[ df.Cabin.isnull()==True,'Cabin'] = 0.5
df.loc[ df.Cabin.isnull()==False,'Cabin'] = 1.5
df['Fare_Per_Person']=df['Fare']/(df['Family_Size']+1)
#Age times class
df['AgeClass']=df['AgeFill']*df['Pclass']
df['ClassFare']=df['Pclass']*df['Fare_Per_Person']
df['HighLow']=df['Pclass']
df.loc[ (df.Fare_Per_Person<8) ,'HighLow'] = 'Low'
df.loc[ (df.Fare_Per_Person>=8) ,'HighLow'] = 'High'
le.fit(df['Sex'] )
x_sex=le.transform(df['Sex'])
df['Sex']=x_sex.astype(np.float)
le.fit( df['Ticket'])
x_Ticket=le.transform( df['Ticket'])
df['Ticket']=x_Ticket.astype(np.float)
le.fit(df['Title'])
x_title=le.transform(df['Title'])
df['Title'] =x_title.astype(np.float)
le.fit(df['HighLow'])
x_hl=le.transform(df['HighLow'])
df['HighLow']=x_hl.astype(np.float)
le.fit(df['AgeCat'])
x_age=le.transform(df['AgeCat'])
df['AgeCat'] =x_age.astype(np.float)
le.fit(df['Embarked'])
x_emb=le.transform(df['Embarked'])
df['Embarked']=x_emb.astype(np.float)
df = df.drop(['PassengerId','Name','Age','Cabin'], axis=1) #remove Name,Age and PassengerId
return df
"""
Explanation: feature engineering
End of explanation
"""
traindf=pd.read_csv(train_file)
#clean data
df=clean_and_munge_data(traindf)
########################################formula################################
formula_ml='Survived~Pclass+C(Title)+Sex+C(AgeCat)+Fare_Per_Person+Fare+Family_Size'
y_train, x_train = dmatrices(formula_ml, data=df, return_type='dataframe')
y_train = np.asarray(y_train).ravel()
print y_train.shape,x_train.shape
##train set and test set
X_train, X_test, Y_train, Y_test = train_test_split(x_train, y_train, test_size=0.2,random_state=seed)
#inite data
clf=RandomForestClassifier(n_estimators=500, criterion='entropy', max_depth=5,
max_features='auto', n_jobs=-1, random_state=seed)
###grid search find the best parameter
param_grid = dict( )
##create pipeline
pipeline=Pipeline([ ('clf',clf) ])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=3,scoring='accuracy',\
cv=StratifiedShuffleSplit(Y_train, n_iter=10, test_size=0.2, train_size=None, indices=None, \
random_state=seed, n_iterations=None)).fit(X_train, Y_train)
# show train score
print("Best score: %0.3f" % grid_search.best_score_)
print(grid_search.best_estimator_)
report(grid_search.grid_scores_)
print('-----grid search end------------')
print ('on all train set')
scores = cross_val_score(grid_search.best_estimator_, x_train, y_train,cv=3,scoring='accuracy')
print scores.mean(),scores
print ('on test set')
scores = cross_val_score(grid_search.best_estimator_, X_test, Y_test,cv=3,scoring='accuracy')
print scores.mean(),scores
# show test score
print(classification_report(Y_train, grid_search.best_estimator_.predict(X_train) ))
print('test data')
print(classification_report(Y_test, grid_search.best_estimator_.predict(X_test) ))
model_file=MODEL_PATH+'model-rf.pkl'
joblib.dump(grid_search.best_estimator_, model_file)
"""
Explanation: fit the classification
End of explanation
"""
|
caioau/personal | apresentacao rev4.ipynb | gpl-3.0 | YouTubeVideo('XEVlyP4_11M')
"""
Explanation: Encrypta Tudo Unicamp - 2016
Oficina pratica de privacidade
caioau , fabiom, jv ; contato
Tópicos
Referencias Recomendadas
Como fazer boas senhas
Password Managers
Autenticação em dois passos
dá tempo de falar de PGP?
Referencias recomendadas
algumas referencias legais que recomendo
Government Surveillance: Last Week Tonight with John Oliver (HBO)
End of explanation
"""
YouTubeVideo('VPBH1eW28mo')
"""
Explanation: CGP Grey: Should all locks have keys? Phones, Castles, Encryption, and You.
End of explanation
"""
YouTubeVideo('V9_PjdU3Mpo')
"""
Explanation: Kurzgesagt – In a Nutshell: Safe and Sorry – Terrorism & Mass Surveillance
End of explanation
"""
YouTubeVideo('kWNk9irv1e8')
"""
Explanation: Snowden's Cryptographer on the NSA & Defending the Internet
End of explanation
"""
YouTubeVideo('ucRWyGKBVzo')
"""
Explanation: 'State of Surveillance' with Edward Snowden and Shane Smith (FULL EPISODE)
End of explanation
"""
Image('https://1.bp.blogspot.com/-dvlWtVAz8Ek/V1UjOdBI02I/AAAAAAAAoU4/Ssu0LqLKoxwMhsGJjMycLF7TKgQ9yZ7LwCLcB/s1600/mark-zuck-hacked.png',retina=True)
"""
Explanation: Links
tem boi na linha
O Guia Motherboard para não ser hackeado
EFF Surveillance Self-Defense
privacytools.io
Terms of Service; Didn't Read
/r/chapeubranco
Como fazer boas senhas
Pré requesitos de boas senhas
ter pelo menos 8 caracteres.
facil de lembrar.
ter letras maisculas, minusculas, numeros e caracteres especiais (ou pelo menos 3 desses grupos)
NUNCA reutilize suas senhas.
não deve conter datas, nomes ou outras coisas pessoais.
End of explanation
"""
Image('https://imgs.xkcd.com/comics/password_strength.png')
"""
Explanation: entropia
$ H = \log_2 (\mbox{#senhas}) = \log_2 N^L = L \; log_2 N$
| Grupo | num simbolos (N) | entropia/simbolo (H)|
|:-------------:|:----------------:|:-------------------:|
| numeros | 10 | 3.32 |
| letras | 26 | 4.7 |
| "tudo" | 94 | 6.5 |
| palavra dadoware | 7776 | 12.92 |
| Entropia | "força" |
|:-----------:|:------------------------------------------------------------------------:|
|< 28 bits | Muito fraca; Mantenha longe de membros familiares |
|28 - 35 bits | Fraca; mantenha longe para maioria das pessoas, use para senhas de login |
|36 - 59 bits | Razoavel; senhas relativamente seguras para login de rede |
|60 - 127 bits| Forte; Pode ser usada para guardar informação bancaria |
|128+ bits | Muito Forte; Normalmente é um exagero |
Dadoware
Dadoware é um método para gerar senhas mais seguras usando dados, lápis e papel
descrever o método, dar um exemplo.
livreto
exemplo
Para cada palavra , jogue o dado 5 vezes , obtendo por exemplo:
2-6-5-1-3
vá na pagina 2,6 e procure a palavra 513 = egípcio
repita o processo 6 vezes , por exemplo
xkcd#936
End of explanation
"""
YouTubeVideo('3NjQ9b3pgIg')
"""
Explanation: Rumkin Password Strength Test
teste uma senha aqui aqui
Password Choice - Computerphile
End of explanation
"""
YouTubeVideo('yzGzB-yYKcc')
"""
Explanation: Edward Snowden on Passwords: Last Week Tonight with John Oliver (HBO)
End of explanation
"""
Image('https://www.rarst.net/images/PasswordCardprintablepasswordgenerator_127B/passwordcard.png',retina=True)
"""
Explanation: The science of password selection
Otimo post sobre escolha de senhas
PIN number analysis
Otimo post sobre pins codes
Password managers
É um programa que cria boas senhas aleatorias automaticamente, para cada site individualmente, guardando todas as senhas trancadas com apenas uma senha.
KeePass/ KeepassX
facil e simples
disponivel no repositorio das principais distros GNU/Linux
Autotype :-D
tem para android (KeePassDroid)
Dica: File -> Database settings -> Encryption Rounds -> reloginho
SuperGenPass: A Free Bookmarklet Password Generator
site
Não utiliza um arquivo contendo suas senhas (previnindo a perda ou sequestro das senhas)
tem para android (SuperGenPass)
Roda no navegador (não precisa instalar nada)
Pass: the standard unix password manager
site
usa PGP ;)
GIT :-D
tem para android (PwdStore)
PasswordCard
site
* "Gerenciador" de senhas old School :P
End of explanation
"""
Image('https://s1.mzstatic.com/us/r30/Purple5/v4/66/8b/9c/668b9cf1-3502-3683-7787-ef450ef90019/screen568x568.jpeg',retina=True)
"""
Explanation: Autenticação em dois passos
A ideia consiste em: para logar em sua conta, apenas sua senha não é o suficiente é preciso entrar um codigo temporario gerado pelo celular.
No android use o APP software livre FreeOTP (disponivel no fdroid)
End of explanation
"""
Image('https://comsecllc.com/wp-content/uploads/2016/05/IMSI.png',retina=True)
"""
Explanation: notificações sensiveis
colocar print com o pin code vazando e outro print acertado
como faz no iphone?
IMSI catchers
Defcon 18 - Practical Cellphone Spying
https://antivigilancia.org/pt/2016/03/detectando-antenas-de-celular-espias/
End of explanation
"""
|
qkitgroup/qkit | qkit/doc/notebooks/Quickplot_demonstration.ipynb | gpl-2.0 | %matplotlib qt5
import qkit
qkit.cfg['fid_scan_hdf'] = True
#qkit.cfg['datadir'] = r'D:\data\run_0815' #maybe you want to set a path to your data directory manually?
qkit.start()
import qkit.gui.notebook.quickplot as qp
"""
Explanation: In contrast to the usually taken %matplotlib inline, we want to have a dedicated window here, where we can just exchange the data being shown.
It should work with most matplotlib backends, I just took qt5 here.
End of explanation
"""
q = qp.QuickPlot(maximize=True)
q.show()
"""
Explanation: quickplot will automatically maximize the width of the notebook view. You can suppress this by setting maximize=False
End of explanation
"""
q.remove_offset_x_avg = False
q.remove_offset_y_avg = True
q.unwrap_phase = True
try: #try to replot the current dataset
q.plot_selected_df(None)
except:
pass
"""
Explanation: A number of very basic filtering switches is included.
You can enable them below.
End of explanation
"""
|
yw-fang/readingnotes | machine-learning/McKinney-pythonbook2013/chapter04-note.ipynb | apache-2.0 | import numpy.random as nrandom
data = nrandom.randn(3,2)
data
data*10
data + data
"""
Explanation: 阅读笔记
作者:方跃文
Email: fyuewen@gmail.com
时间:始于2017年9月12日, 结束写作于
第四章笔记始于2017年10月17日,结束于2018年1月6日
第四章 Numpy基础:数组和矢量计算
时间: 2017年10月17日早晨
Numpy,即 numerical python的简称,是高性能科学计算和数据分析的基础包。它是本书所介绍的几乎所有高级工具的构建基础。其部分功能如下:
ndarray,一个具有矢量算数运算和复杂广播能力的快速且节省空间的多维数组
在不需要循环的情况下,用于对数组快速运算的标准数学函数
用于读写磁盘数据的工具以及用于操作内存映射文件的工具
线性代数、随机数生成以及傅里叶变化
用于集成由 C、C++、Fortran 等语言编写的代码的工具
Numpy 本身功能不复杂,但是理解 Numpy 有助于更高效地使用诸如 Pandas 之类的工具。
原书作者主要从事数据分析,所以他关注的功能主要集中于:
用于数据整理和清理、子集构造和过滤、转换等快速的矢量化数组运算
常用的数组算法,如排序、唯一化、集合运算等。
高效地描述统计和数据聚合/摘要运算
用于异构数据集的合并/连接运算的数据和关系型数据运算
将条件逻辑表述为数组表达式(而不是带有if-elif-else分支的循环)
数据的分组运算(聚合、转换、函数应用等)第五章将对此进行详细解释。
注:建议总是使用 import numpy as np; 而不是用 from numpy import *
Numpy 的 ndarray:一种多维数组对象
时间: 2017年10月18日晚
Numpy 一个重要特点就是其 N 维数组对象,即 ndarray,该对象是一个快速而灵活的数据集容器。我们可以利用这种数组对整块数据进行一些运算,它的语法跟标量元素之间的运算相同:
End of explanation
"""
data.shape # 数组的维数,即行数和列数
data.dtype #数组中元素的类型
data.size #数组的大小
dataconversion = data.astype('int8')
print('data is: ', data)
print('\n dataconversion is ', dataconversion)
"""
Explanation: ndarray 是 同构数据多维容器,that is to say, 所有元素必须是同类型的。
每个数组都有一个 shape (一个表示各维度大小的元祖)和一个 dtype (一个用于说明数组数据类型的对象):
End of explanation
"""
import numpy as np
data1 = [2,3,3,5,6,9]
array1 = np.array(data1)
print('data1 is ', type(data1))
print('array1 is ', type(array1))
data1[:]
array1
print(array1)
print(array1.dtype)
print(array1.shape)
print(array1.size)
"""
Explanation: 虽然大多数数据分析工作不需要深入理解Numpy,但是精通面向数组的编程和思维方式是成为 Python 科学计算达人的一大步骤。
注意:第一版翻译版本中有个批注,说“本书中的数组、Numpy数组、ndarray 基本指的都是同一样东西,即 ndarray 对象”
创建 ndarray
创建数组最简单的办法就是使用 array 函数。它接受一切序列行的对象(包括其他数组),然后产生一个新的含有传入数据的 NumPy 数组。以列表转换为数组方式为例:
End of explanation
"""
import numpy as np
data2=[[23,5,5,6], [4,56,2,8],[3,5,6,7],[2,3,4,5]]
arr2=np.array(data2)
arr2
arr2.ndim #Number of array dimensions.
arr2.shape
arr2.size
"""
Explanation: 嵌套序列(比如由一组等长列表组成的列表),将会被转换为一个多维数组:
End of explanation
"""
data.dtype
arr2.dtype
"""
Explanation: 除非显示说明,np.array 会尝试为新建的这个数组推断出一个较为合适的数据类型。数据类型保存在一个特殊的 dtype 对象中。比如说,在上面的两个examples中,我们有
End of explanation
"""
np.zeros(10)
arr4 = np.zeros((3,6,3))
arr4
arr4.ndim
arr3 = np.empty((2,4,2))
arr3
arr3.ndim
arr5 = np.empty((2,3,4,2))
arr5
"""
Explanation: 除 np.array 之外,还有一些函数可以新建数组。比如,zeros 和 ones 分别可创建指定长度或形状的全 0 和 全 1 数组。empty 可创建一个没有任何具体值的数组。要用这些方法创建多维数组,只需要传入一个表示形状的元祖即可:
End of explanation
"""
np.arange(15)
np.arange(2)
"""
Explanation: 警告 认为 np.emptry 会返回全 0 数组的想法是不安全的。很多情况下(如上所示),它返回的都是一些未初始化的垃圾值。
arange 是 Python 内置函数range 的数组版:
End of explanation
"""
data1 = (1,2,3,4)
np.asarray(data1)
np.array(data1)
data2 = ([2,2])
np.asarray(data2)
import numpy as np
np.arange(15)
ones
np.ones(19)
np.zeros(10)
np.empty(4)
np.eye(3)
np.eye(4)
np.identity(2)
np.identity(3)
"""
Explanation: 下表列出了一些数组创建函数。由于Numpy关注的是数值计算,因此,如果没有特别的制定,数据类型一般都是 float64。
|函数 | 说明 |
|-------------|---------------|
| array | 将输入数据(列表、元祖、数字或者其他数据类型)转换为 ndarray。要么推断出 dtype,要么显示地指定dtype。默认直接复制输入数据|
| asarray | 将输入转为 ndarray,如果输入本身就是一个ndarray就不进行复制|
| arange | 类似于python内置的range,但是返回的是一个ndarray,而不是一个列表|
| ones、ones_like | 根据指定的形状和dtype创建一个全1数组。ones_like以另一个数组为参数,并根据其形状和dtype创建一个全1数组|
|zeros、zeros_like | 类似上述命令,只是改为全0数组|
|empty、empty_like|创建新数组,只分配内存空间但不填充任何值|
|eye、identity|创建一个正方的N * N 单位矩阵(对角线为1,其余为0)|
End of explanation
"""
import numpy as np
arr1 = np.array([1,2,3], dtype = np.float64)
arr2 = np.array([1,2,3], dtype = np.int32)
arr1.dtype
arr2.dtype
"""
Explanation: ndarray 的数据类型
Recently I jsut moved from Shanghai to Kyoto, hence I have stopped taking notes for almost two weeks.
From now on, I will continue writing this notes. Let's note~
YWFANG @Kyoto University November, 2017
dtype()
dtype 是一个特殊的对象,它含有ndarray将一块内存解释为特定数据类型的所需信息:
End of explanation
"""
import numpy as np
arr = np.array([1,2,3,4,5], dtype='i2')
print(arr.dtype)
print(arr)
float_arr = arr.astype(np.float64)
float_arr.dtype
"""
Explanation: dtype 是 NumPy 强大的原因之一。在多数情况下,它们直接映射到相应的机器表示,这使得“读写磁盘上的二进制数据流”以及“集成低级语言,如fortran"等工作变得简单。
下表记录了NumPy所支持的全部数据类型:(记不住没有关系,刚开始记不住也很正常)
|类型|类型代码|说明
|-------------|---------------|
|int8、unit8| i1、u1| 有符号和无符号的8位(1个字节)整型|
|int16、unit16| i2、u2| 有符号和无符号的16位(2字节)整型|
|int32、unit32| i4、u4| 。。。32位。。。|
|int64、unit64| i8、u8|。。。64位。。。|
| float16| f2| 半精度浮点数|
| flaot32| f4或者f| 标准单精度浮点数,与C的float兼容|
| float64| f8或d | 标准双精度浮点数,与C的double和Python的float对象兼容|
|float128| f16或者g| 扩展精度浮点数|
|complex64、complex128|c8、c16| 分别用两个32位、64位或128位浮点数表示的复数|
|complex256|c32|复数|
| bool|?|存储True 或Flase 值的布尔类型|
|object | O | Python多象类型|
| string_|S|固定长度的字符串类型(每个字符1个字节)。例如,要创建一个长度位10的字符串,应使用S10|
|unicode|U|固定长度的unicode类型(字节数由平台决定)。跟字符串定义方式一样(如U10)|
我们可以通过 ndarray 的 astype 方法显示地转换其dtype:
End of explanation
"""
import numpy as np
arr = np.array([1.2, 2.3, 4.5, 53.4,3.2,4.2])
print(arr.dtype)
print(arr)
print(id(arr)) #memoery address of arr
print('\n')
#conversion to integer
int_arr = arr.astype(np.int32)
print(int_arr.dtype)
print(int_arr)
"""
Explanation: In the above example, an integer array was converted into a floating array.
In the following example, I will show you how to convert a float array to an int array. You will see that, if I cast some floating point numbers to be of interger type, the decimal part will be truncated.
End of explanation
"""
import numpy as np
num_strings_arr = np.array(['1.25', '-9.6', '42'], dtype = np.string_)
print(num_strings_arr)
print(num_strings_arr.dtype)
float_arr = num_strings_arr.astype(np.float64)
# num_strings_arr.astype(float)
print(float_arr.dtype)
print(float_arr)
# alternatively, we can use a lazy writing
float1_arr = num_strings_arr.astype(float)
print(float_arr.dtype)
print(float_arr)
"""
Explanation: If you have an array of strings representing numbers, you can also use 'astype' to convert them into numberic form:
End of explanation
"""
# in this example, we can see that the int_arry will converted into
# a floating array, in particular, the dtype of calibers was used
# during the conversion using astype(calibers.dtype)
import numpy as np
int_array = np.arange(10)
print(int_array, int_array.dtype)
calibers = np.array([.22, .20, .23,.45, .44], dtype=np.float64)
print(calibers , calibers.dtype)
int_array_new = int_array.astype(calibers.dtype)
print(int_array_new, int_array_new.dtype)
#when stating an array, we can use the short code in the table to assign
# the dtype of the array
# for example
import numpy as np
empty_array = np.empty(8, dtype='u4')
print(empty_array)
print('\n')
zero_array = np.zeros(12, dtype='u4')
print(zero_array, zero_array.dtype)
print('\n')
one_array = np.ones(9, dtype='f8')
print(one_array, one_array.dtype)
print(*one_array)
"""
Explanation: In addition, we can use another array’s dtype attribute:
End of explanation
"""
import numpy as np
arr = np.array([[1., 2., 3.,],[3.,5.,6.]])
print(arr.shape)
print(arr)
arr*arr
arr-arr
arr+arr
"""
Explanation: 点数(比如float64和float32)只能表示近似的分数值。因此复杂计算中,由于可能积累的浮点错误,比较浮点数字大小时,只能在一定的小数位数以内有效。
数组和标量之间的运算
数据的便利之处在于即使我们不用loop,也可以对批量数据进行运算和操作。这种方式通常叫做“矢量化”(vectorization)。大小相等的数组之间的任何算数运算都会将运算应用到元素级:
End of explanation
"""
1/arr
arr*2
"""
Explanation: 同样地,当数组与标量进行算数运算时,也会遍历到各个元素
End of explanation
"""
import numpy as np
arr = np.arange(10, dtype='i1')
print(arr)
print(arr.dtype)
print(arr[0],arr[5])
print(arr[0:2])
arr[5:8]=12
print(arr)
#作为对比,我们回顾下之前列表的一些操作
list1=[0,1,2,3,4,5,6,7,8,9]
print(list1[:])
print(list1[0:2])
list1[5] = 12
print(list1[:])
list1[5:8]=12 #这里是跟数组很不同的地方
#如果不使用一个iterable,这里并无法赋值
print(list1[:])
"""
Explanation: 不同大小的数组之间的运算叫做广播 broadcasting,我们之后还会在第12章进行深度的学习。
基本的索引和切片
NumPy 数组的索引是一个内容丰富的主题,因为选取数据子集或者单个元素的方式非常多。一维数组很简单。从表面看,它们跟python列表的功能差不多。
End of explanation
"""
import numpy as np
arr = np.arange(10)
print(arr)
arr_slice = arr[5:8]
arr_slice[1] = 12345
print(arr)
arr_slice[:]=123
print(arr)
"""
Explanation: 如上面例子中看到的那种,当我们将标量赋值给一个切片时(arr[5:8]=12),该值会自动传播(也就是12章将降到的broadcasting)到整个选区。跟列表最重要的区别在于,数组切片是原始数组的视图。这意味着数据不会被复制,视图上任何的修改都会直接反映到源数组上。
End of explanation
"""
import numpy as np
arr = np.arange(10)
arr_slice = arr[5:8]
arr_slice[1] = 12345
arr1 = arr[5:8]
print(arr1)
arr2 = arr[5:8].copy()
print(arr2)
#in this example,arr1仍然是数组的视图,
#但是arr2已经是通过复制得到的副本了
arr[5:8]=78
print('arr1 = ', arr1)
print('arr2 = ', arr2)
"""
Explanation: 由于python常用来处理大数据,这种通过操作数组视图就可以改变源数组的方式,可以避免对数据的反复复制所带来的性能和内存问题。
如果我们想要得到的是一个数组切片的副本,而不是视图,就需要显式地进行复制操作,例如
End of explanation
"""
import numpy as np
arr2d = np.array([[1,2,3],[4,5,6],[7,8,9]])
arr2d[2]
"""
Explanation: 对于高维数组,能做的事情更多。在一个二维数组中,各个索引位置上的元素不再是标量,而是一维数组:
End of explanation
"""
arr2d[0][2]
arr2d[0,2]
"""
Explanation: 因此可以对各个元素进行递归的访问,不过这样需要做的事情有点多。我们可以传入一个以逗号隔开的索引列表来选区单个元素。也就是说,下面两种方式是等价的:
End of explanation
"""
import numpy as np
arr3d = np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]])
print(arr3d)
arr3d[0] #它是一个 2*3 数组
"""
Explanation: 下图说明了二维数组的索引方式
在多维数组中,如果省略了后面的索引,则返回对象会是一个维度低一点的ndarray(它含有高一级维度上的所有数据)。
这里中文版的作者特别说明了上面这句话。括号外面的“维度”是一维、二维、三维之类的意思,而括号外面的应该理解为“轴”。也就是说,这里指的是“返回的低维度数组含有原始高维度数组某条轴上的所有数据。
下面看个例子来理解:
End of explanation
"""
arr3d[0] = 42
print(arr3d)
print(arr3d[0,1])
print(arr3d[1,0])
"""
Explanation: 标量值和数值都可以赋值给 arr3d[0]:
End of explanation
"""
import numpy as np
arr = np.arange(10)
print(arr)
arr[4]=54
print(arr[1:6])
"""
Explanation: 注意,上面所有选取数组子集的例子中,返回的数组都是视图。
切片索引
ndarray 的切片语法跟python列表这样的一维对象差不多:
End of explanation
"""
import numpy as np
arr2d = np.array([[2,3,4],[3,5,5],[3,5,5]])
print(arr2d)
arr2d[:2]
"""
Explanation: 高维度对象的花样更多,我们可以在一个或者多个轴上进行切片、也可以跟整数索引混合使用。
End of explanation
"""
arr2d[:2, :2]
"""
Explanation: 上述我们可以看出,这里的切片是沿着第0轴(即第一个轴)切片的。换句话说,切片是沿着一个轴向选取元素的。我们可以单次传入多个切片,就像传入多个索引那样:
End of explanation
"""
arr2d[2,:2]
arr2d[:,:1] #这里,我们实现了对高维轴进行了切片
"""
Explanation: 像上述这样的切片方式,只能得到相同维数的数组视图。我们还可以将整数索引与切片混合使用,从而得到低纬度的切片:
End of explanation
"""
arr2d[:,:1] = 0
print(arr2d)
"""
Explanation: 自然地,对切片表达式的赋值操作也会被扩散到整个选区:
End of explanation
"""
%reset
import numpy as np
from numpy.random import randn
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
#please make a comparison, if you use
# names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'], dtype='S4')
print(names, names.dtype)
type(names)
print('\n')
data = randn(7,4)
print(data, data.dtype, data.shape)
type(data)
"""
Explanation: 布尔型索引
来看这样一个例子,假设我们有一个用于存储数据的数组以及一个存储姓名的数组(含有重复项)。在这里,我将使用 numpy.random 中的randn函数生成一些正态分布的随机数据。
End of explanation
"""
names == 'Will'
"""
Explanation: 假设 names 数组中的每个名字都对应 data数组中的一行,而我们想要选出对应于名字‘Bob'的所有行。跟算数运算一样,数组的比较运算(如==)也是矢量化的。因此,对于names和字符串"Bob"的比较运算将会产生一个boolean array
End of explanation
"""
data[names =='Will']
"""
Explanation: 这个Boolean array可以用于数组索引,This boolean array can be passed when indexing the array:
End of explanation
"""
data[names =='Will', 2:]
data[names =='Will', 2]
"""
Explanation: 当利用布尔型数组进行索引时候,必须注意布尔型数组的长度需要与被索引的轴长度一致。此外,还可以将布尔型数组跟切片、整数(或者整数序列,稍后对此进行详细的介绍)混合使用:
End of explanation
"""
names != 'Will'
print(data[names != 'Will'])
data[-(names == 'Will')]
#this '-' was discarded in python3, alternatively we
# use '~'
data[~(names == 'Bob')]
# in python2, it should be
# data[-(names == 'Bob')]
"""
Explanation: 要选择除了will以外的其他值,既可以使用不等于符号(!=),也可以通过符号(-)对条件进行否定
End of explanation
"""
mask = (names =='Bob') | (names == 'Will')
mask
data[mask]
"""
Explanation: 如果我们要选取这三个名字中的两个进行组合来应用多个布尔条件,需要使用&(和)、|(或)之类的布尔运算符:(注意,python关键字and和or在布尔型数组中是无效的)
End of explanation
"""
data[data<0] = 0
data
"""
Explanation: 值得注意的是,通过布尔索引选取数组中的数据,将总是创建数据的副本,即使返回一模一样的数组也是如此。
通过布尔型数组设置值是一种常用的方法。为了将data中的所有负数变为0,我们只需要
End of explanation
"""
data[names != 'Will'] = 7
data
"""
Explanation: 通过一维布尔数组设置整行或列的值也很简单:
End of explanation
"""
#Suppose we had an 8 × 4 array:
import numpy as np
arr1 = np.zeros((8,4))
print(arr1)
print('\n')
for i in range(8):
arr1[i] = i+1
print(arr1)
"""
Explanation: 花式索引
fancy indexing,即花式索引,是一个NumPy专业术语,代指利用整数数组进行索引。
End of explanation
"""
arr1[[4,3,0,6]]
"""
Explanation: 为了以特定顺序选取行子集,只需传入一个用于指定顺序的整数列表或ndarray即可:
End of explanation
"""
arr1[[-4,-3,-1,-6,-0]]
"""
Explanation: 上面的代码的,我们用一个列表[4,3,0,6]就选出了arra1中的第4,3,0,6的子集。
如果我们使用负数进行索引,则选择的顺序将是从末尾到开头。
注意-0和0是一样的,还是开头的第一行作为0. 这是值得注意的地方。
End of explanation
"""
# 在第12章,我们会展开讲讲reshape,在这个例子中,我们只是使用 reshape
import numpy as np
arr = np.arange(32).reshape((8,4))
print(arr)
print('\n')
arr_select = arr[[1,5,7,2],[0,3,1,2]]
print(arr_select)
"""
Explanation: 一次传入多个索引数组会会比较特别。它返回的是一个一维数组,其中的元素对应各个索引元组:
End of explanation
"""
import numpy as np
arr = np.arange(32).reshape((8,4))
print(arr)
print('\n')
arr_select = arr[[1,5,7,2]][:, [0,3,1,2]]
#1 5 7 2 选取行
#0 3 2 1 选取列
print(arr_select)
"""
Explanation: 从上述代码的结果看不难看出,得出来的结果是[1,0] [5,3] [7,1] 和 [2,2]
那么怎么选取矩阵的行列子集呢?下面,我们只需要稍微改动下代码即可实现:(这部分最好再读几遍原书,字句不好理解)
End of explanation
"""
import numpy as np
arr = np.arange(32).reshape((8,4))
print(arr)
print('\n')
arr_select = arr[np.ix_([1,5,7,2],[0,3,1,2])]
print(arr_select)
"""
Explanation: 此外,还可以使用 np.ix_函数来实现上述的功能,它可以将两个一维整数数组转换为一个用于选取方形区域的索引器:
End of explanation
"""
import numpy as np
arr = np.arange(15).reshape((3,5))
print(arr)
print(arr.T)
print('\n')
print(arr)
"""
Explanation: It should be mentioned that, 花式索引与切片不一样,它总是将数据复制到新数组中。
数组转置和轴对称
转置,即 transpose,是重塑的一种重要特殊形式,它返回的是原数据的视图(不会进行任何复制操作)。数组不禁有transpose方法,还有一个特殊的T属性。
End of explanation
"""
import numpy
from numpy.random import randn
arr = randn(6,3)
print(arr, '\n')
np.dot(arr.T, arr)
"""
Explanation: 当我们进行矩阵预算时候,进行需要用到转置操作。例如,要用 np.dot计算矩阵内积X$^T$X:
End of explanation
"""
#这里我简单举个例子
import numpy as np
arr = np.arange(16).reshape((2,2,4))
print(arr)
arr_transpose = arr.transpose((1))
"""
Explanation: 对于更高维的数组,transpose 时需要得到一个由轴编号组成的元祖才能对这些轴进行转置(这个可能不好理解,得多阅读几次):
End of explanation
"""
import numpy as np
arr = np.arange(18).reshape(3,3,2)
print(arr, '\n')
arr_axes1 = arr.swapaxes(0,1)
print(arr_axes1)
print('\n')
arr_axes2 = arr.swapaxes(1,2)
print(arr_axes2)
"""
Explanation: 从上面几个例子,我们可以看出,对于简单的低维矩阵,使用.T就可以实现转置,毕竟只是进行轴对换而已;但是对于高维数组,就显得麻烦好多。ndarray还有一个swapaxes方法,它需要接受一对轴编号:(注意swapaxes也是返回源数据的视图,并不会进行任何复制操作。)
End of explanation
"""
import numpy as np
arr = np.arange(10)
print(arr, '\n')
print(np.sqrt(arr))
print(arr,'\n')
np.exp(arr) #the results are e^N (N = 0, 1, 2,...)
"""
Explanation: 通用函数:快速的元素级数组函数。
通用函数(即ufuc)是一种对ndarray中对数据执行元素级运算对函数。我们可以将其看作简单对函数(接受一个或者多个标量值,并产生一个或者多个标量值)的矢量化包装器。
许多 unfunc 都是简单的元素级变体,如sqrt和exp:
End of explanation
"""
import numpy as np
from numpy.random import randn
x = randn(8)
print(x,'\n')
y = randn(8)
print(y,'\n')
max_number = np.maximum(x,y)
print(max_number,'\n')
"""
Explanation: 上述这些都是一元(unary)ufunc。另外一些(如add或maximum)接受2个数组(因此也叫二元binary ufunc),并返回一个结果数组:
End of explanation
"""
import numpy as np
from numpy.random import randn
arr = randn(7)*5
print(arr,'\n')
arr_1 = np.modf(arr)
print(arr_1)
print(type(arr_1))
print(arr_1[1])
"""
Explanation: 此外,有一小部分的ufunc,它们可以返回多个数组。mof就是一个例子,它是Python内置函数
divmod的矢量化版本,用于分离浮点数组的小数和整数部分。通过下面的例子,我们会发现,mof其实得到的是几个数组组成的tuple
End of explanation
"""
import numpy as np
from numpy.random import randn
new = randn(10)
new
np.sign(new)
import numpy as np
new = np.arange(10)
new = new+0.1
print(new,'\n')
np.ceil(new)
import numpy as np
from numpy.random import randn
new = randn(10)
print(new,'\n')
print('rint function:', np.rint(new))
print('isnan function: ', np.isnan(new))
print('isfinite function', np.isfinite(new))
print('isinf function: ', np.isinf(new))
print('logical_not function: ', np.logical_not(new))
#Revieing some knowledge I have learnt
import numpy as np
arr1 = np.arange(16,dtype='i4').reshape(2,2,4)
arr2 = np.arange(10,dtype='float')
print(arr1)
print('\n')
print(arr2)
print('\n')
arr3=arr1.copy()
arr3[1]=23
print(arr3.astype(arr2.dtype))
sum(arr1,arr3)
print('mean value = ', arr1.mean(), '\n' 'max value is ',
arr1.max(), '\n' 'std root = ', arr1.std(), '\n'
'The sum of all the elements = ', arr1.cumsum(),
'\n' 'The multipy of all the elements = ', arr1.cumprod())
"""
Explanation: 下表中列出了一些一元和二元ufunc
一元ufunc
|函数|说明|
|------|-----|
|abs, fabs|计算整数、浮点数和负数的绝对值。对于复数值,可以使用更快的fabs|
|sqrt|计算各元素的平方根。相当于 arr 0.5|
|square|计算各元素的平方。相当于是 arr 2 |
|exp|计算各元素的e指数,即 e$^x$|
|log,log10,log2,log1p|分别对应自然对数(以e为底),底数是10的log,底数是2的log,以及log(1+x)|
|sign|计算各元素的正负号:1代表整数,0代表零,-1代表负数|
|ceil|计算各元素的ceiling值,即大于等于该值的最小整数|
|floor|计算各元素的floor值,即小于等于该值的最大整数|
|rint|将各元素之四舍五入到最接近的整数,保留dtype|
|modf|将数组的小数和整数部分以两个独立的数组形式返回|
|isnan| 返回一个表示“哪些值是NaN(这不是一个数字)”的boolean数组|
|isfinite、isinf|分别返回一个表示“哪些元素是有穷的(非inf,非NaN)” 或者 “哪些元素是无穷的”的布尔型数组|
|cos、cosh、sin、sinh、tan,tanh|普通型和双曲型三角函数|
|arccos、arccosh、arcsin、arcsinh、arctan、arctanh|反三角函数|
|logical_not| 计算各个元素not x的真值。相当于-arr|
二元ufunc
|函数|说明|
|------|-----|
|add|将数组中对应的元素相加|
|substract|从第一个数组中减去第二个数组中的元素|
|multiply|数组元素相乘|
|divide、floor_divide|除法或向下圆整除法(丢弃余数)|
|power|对第一个数组中的元素A,根据第二个数组中的相应好元素B,计算A$^B$|
|maximum, fmax|元素级的最大值计算。fmax将忽略NaN|
|minimum、fmin|元素级的最小值计算。fmin将忽略NaN|
|mod|元素级的求模计算(除法的余数)|
|copysign|将第二个数组中的值的符号复制给第一个数组中的值|
|greater、greater_equal、less、less_equal、equal、not_equal|执行元素级的比较运算,最终产生boolean型数组。相当于中缀运算>, >=, <, <=, ==, !=|
|logical_and、logical_or、logical_xor | 执行元素级的真值逻辑运算。相当于中缀运算符 '&','$|$','^'|
End of explanation
"""
import numpy as np
points = np.arange(-1,1,0.5) # 产生4个间隔同为0.5的点。
print(points[:10],'\n')
xs, ys = np.meshgrid(points, points)
print('xs is \n',xs,'\n')
print('transposed xs is \n', xs.T)
print('ys is \n', ys, '\n')
"""
Explanation: 利用数组进行数据处理
NumPy数组的矢量化在很大程度上简化了数据处理方式。一般而言,矢量化运算要比等价的纯python方式快1-2个数量级,尤其是在数值计算处理过程中这个优势更加的明显。在后面的第12章节中,我们将了解到广播,它是一种针对矢量化计算的强大手段。
假设我们想要在一组值(网格型)上计算sqrt(x^2+y^2)。我们当然可以选择用loop的方式来计算,但是我们在这里使用数组的方法。
np.meshgrid 函数接受两个一维数组,并产生两个二维矩阵(对英语两个数组中所有的(x,y)对):
End of explanation
"""
z = np.sqrt(xs**2 + ys**2)
print('z = \n', z)
"""
Explanation: 现在,我们来计算xs二次方与ys二次方的和:
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
#Here, the matplotlib function 'imshow' was used
# to create an image plot from a 2D array of function values
plt.imshow(z, cmap=plt.cm.gray);
plt.colorbar()
"""
Explanation: 我们试着将上述这个z函数画出来
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
arr1=np.arange(-1,1,0.001)
print(arr1)
xs1,ys1=np.meshgrid(arr1,arr1)
#print(xs)
z1 = np.sqrt(xs1**2+ys1**2)
print(z1)
plt.imshow(z1, cmap=plt.cm.gray)
plt.colorbar()
"""
Explanation: 上面,我们只使用了很“疏”的点,接下来,我们尝试使用很密集的点,这样有利于我们可视化sqrt(x^2+y^2)这个函数。
End of explanation
"""
import numpy as np
xarr = np.array([1.1, 1.2, 1.3, 1.4, 1.5])
yarr = np.array([2.1, 2.2, 2.3, 2.4, 2.5])
cond = np.array([True, False, True, True, False])
"""
Explanation: 将条件逻辑表述为数组运算
Expressing conditional logic as array operations
numpy.where 函数是三元表达式 x if condition else y 的矢量化版本。假设我们有一个boolean 数组和两个值数组。
End of explanation
"""
result = [(x if c else y)
for x, y, c in zip(xarr, yarr, cond)]
print(result)
"""
Explanation: 假设我们想要根据 cond 中的值来决定我们是选取 xarr 还是 yarr 的值。当 cond 中的值为 True 时,我们选取 xarr 中的值,否则选用 yarr 中的数值。
python中列表推导式的写法如下所示:
End of explanation
"""
result_where = np.where(cond, xarr, yarr)
print(result_where)
"""
Explanation: It has multiple problems here. First, it will not be fast for large arrages (because all the work is being done in interpreted python code,即纯python处理);second, it will not work with multidimensional array,即无法处理多维数组。
如果我们使用 np.where,we can wirte this code very concisely:
End of explanation
"""
from numpy.random import randn
import numpy as np
arr_a = randn(10)
print(arr_a)
arr_b = np.where(arr_a <0, -2, 2)
print(arr_a)
print(arr_b)
"""
Explanation: np.where的第二个和第三个参数不必是数组,它们可以是标量。在数据分析工作中,where 通常用于根据另一个数组而产生一个新的数组。假设有一个由随机数据组成的矩阵,我们想将所有正的值替换为2,所有负值改为-2。那么我们可以写为:
End of explanation
"""
arr_c = np.where(arr_a < 0, -3, arr_a)
print(arr_c)
"""
Explanation: 如果我们只需要把负的值改为 -3, 那么我们可以用
End of explanation
"""
result = 1*(cond1 - cond2) + 2 *(cond2 & -cond1) + 3*-(cond1|cond2)
#这种写法我觉得并不是太推荐,在2017年的新版中,原作者写删除了这部分的讨论
"""
Explanation: Highlight: 我们可以使用where表现更加复杂的逻辑。想象这样一个例子:有两个boolean array,分别叫做cond1和conda2,希望使用四种不同的布尔值组合实现不同的赋值操作.
如果我们不用where,那么这个pseudo code 的逻辑大概如下
虽然不是那么容易看出来,我们可以使用 where 的嵌套来实现上述的pseudocode逻辑
np.where(conda1 & conda2, 0,
np.where(conda1, 1,
np.where(conda2, 2, 3)))
在这个特殊的例子中,我们还可以利用“布尔值在计算过程中被当作0或者1处理”这个事实,将上述result的结果改写成
End of explanation
"""
import numpy as np
from numpy.random import randn
x1 = randn(10)
y1 = randn(10)
cond1 = np.where(x1<0, True, False)
cond2 = np.where(y1>0, True, False)
result=np.where(cond1 & cond2, 0,
np.where(cond1, 1,
np.where(cond2, 2, 3)))
print(result)
"""
Explanation: 现在我们来应用下上面的嵌套np.where
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
arr = np.random.randn(5,4)
print(arr)
plt.imshow(arr, cmap=plt.cm.gray)
plt.colorbar()
arr[0,2]
arr[0,3]
"""
Explanation: 数学和统计方法 Mathematical and Statical Methods
我们可以使用数组上的一套数学函数对整个数组或者数组的某个轴向上的数据进行统计计算。You can use aggregations (often called reductions) like 'sum', 'mean', and 'std' either by calling the array instance method or using the top-level Numpy function.
End of explanation
"""
arr.mean() #这里的使用方法就是作为 array instance method
np.mean(arr) # 这里的使用方法就是 top-level Numpy function
arr.sum()
"""
Explanation: 上面代码中,我产生了一些 enormally distributed random data,并且用imshow function 把这个二维数组给画了出来。我们可以使用 aggregate statistics 做一些计算. (其实我在前面已经用到过了这些 array 实例方法。
End of explanation
"""
arr.mean(axis=1) # compute mean across the columns
arr.mean(axis = 0 ) # compute mean down the rows
#如果对axis = 0 和 axis = 1 两个结果的列数有不解,
# 可以回顾一下前面的二维数组的索引方式
# 特别是那个 NumPy 数组元素索引的图,那个图上描述了 axis1 和 axis0 的相对朝向。
"""
Explanation: mean 和 sum 这类的函数可以接受一个 axis 参数 (用于计算该轴向上的统计值),最终结果是一个相对于原数组少了一维的数组:
End of explanation
"""
arr1 = np.array([0,1,2,3,4,5,6,7])
arr1.cumsum()
"""
Explanation: 其他如 ‘cumsum’, ‘cumprod’ 这类函数方法并不聚合,而是产生一个由中间结果组成的数组:
English: Other methods like cumsum and cumprod donot aggregate, instead producing an array of the intermediate results:
End of explanation
"""
arr2 = np.array([[0,1,2],[3,4,5],[6,7,8]])
arr2.cumsum(0)
"""
Explanation: In multidimensional arrays, accumulation functions like cumsum return an array of the same size, but with the partial aggregates computed along the indicated axis according to each lower dimensional slice:
End of explanation
"""
import numpy as np
from numpy.random import randn
arr = randn(100)
print(arr)
(arr>0).sum() # 正值的个数
"""
Explanation: 用于Boolean数组的方法
在上述方法中,布尔值会被强制转换为 1 (True) 和 0 (False)。因此,sum 经常被用来对Boolean数组中的True值计算:
End of explanation
"""
bools = np.array([False, False, True, False])
bools.any() # any of them if True, then the return result is True
bools.all() # all of them should be True, otherwise the return result is False
"""
Explanation: 另外还有两个方法 any 和 all,它们对 Boolean array 很有用。any用于测试数组中是否存在一个或多个True,而all则检查数组中所有值是否都是True。
End of explanation
"""
import numpy as np
from numpy.random import randn
arr_a = randn(8)
print(arr_a)
arr_a.sort() # 注意,它将直接改变数组本身
print(arr_a)
"""
Explanation: 排序 Sorting
跟Python内置的列表一样,NumPy 数组也可以通过 sort 方法就地排序
End of explanation
"""
import numpy as np
arr_b = randn(4,5)
print(arr_b)
arr_c = arr_b.copy()
print('\n')
print(arr_c)
arr_b.sort(1) # 这里我们沿着 axis = 1 方向进行排序,我们发现每个一位数组中的元素都被排序了
print(arr_b)
arr_c[2].sort() #这里我们只选择了编号为2的那个一维数组进行排序
print(arr_c)
"""
Explanation: 对于多维数组,只要我们给定确定的轴编号,它就会沿着特定的轴进行排序。我们这里拿一个二维数组举例
End of explanation
"""
np.sort(arr_c)
print(arr_c, '\n')
print(np.sort(arr_c))
"""
Explanation: The top-level method 'np.sort' returns a sorted copy of an array instead of modifying the array in-place. 这个需要我们区分 np.sort 和数组实例 sort 的地方。
End of explanation
"""
short_arr = randn(10)
short_arr.sort()
print(short_arr, '\n', len(short_arr))
short_arr[int(0.1*len(large_arr))] #(处于0.1分位数位置上)
short_arr[int(0*len(large_arr))] #(处于0分位上)
"""
Explanation: 数组 sort 的应用之一,就是确定数组的分位数(quantile)。
A quick-and-dirty way to compute the quantiles of an array is to sort it, and select the value at a particular rank.
End of explanation
"""
large = randn(2000)
large.sort()
large[int(0.5*len(large))]
"""
Explanation: 上面我们只是使用了很小的数组,我们一眼就可以看出各分位数上的数值;当数组变得很大时候,才能凸显出 sort 的便捷。例如:
End of explanation
"""
import numpy as np
names = np.array(['Bob', 'Joe', 'Will', 'Bob', 'Will', 'Joe', 'Joe'])
np.unique(names)
ints = np.array([3,3,41,4424,523,523,22,22,43]
)
np.unique(ints)
"""
Explanation: 关于 NumPy 排序方法以及诸如间接排序之类的高级技术,我们在第12章还会详细的讨论,在 Pandas 中也有一些特别的排数技术。
Unique and Other Set Logic 唯一化以及其他集合逻辑
NumPy 提供了一些针对一维ndarray的基本集合运算。其中可能最常用的是 np.unique,它用于找出数组中的唯一值(也就是说这个值在数组中只有一个)并返回已排序的结果。
End of explanation
"""
sorted(set(names))
sorted(set(ints))
"""
Explanation: 我们可以拿着与 np.unique 等价的纯python代码来比较一下(Contrast no.unique with the pure Python alternative:)
End of explanation
"""
values = np.array([6,623,43,22,3])
np.in1d(values,[6,43,22])
"""
Explanation: Anotehr function, np.in1d, tests membership of the values in one array in another, returning a boolean array.
另一个函数np.in1d用于测试一个数组的值在另一个数组中的成员资格,返回一个Boolean array
End of explanation
"""
np.unique(values)
np.intersect1d([3,6],[3,22,43])
np.union1d([3,6],[3,22,43])
np.in1d([3,6],[3,22,43])
np.in1d([3,6],[3,6,22])
np.setdiff1d([3,22,6],[6])
np.setxor1d([3,22,6],[6])
"""
Explanation: 这里给出一些 NumPy 中的基本集合函数(set function)
Array set operations
|函数|说明|
|------|-----|
|unique(x)|计算x中的唯一元素,并返回有序结果|
|intersect1d(x,y)|计算x和y的公共元素,并且返回有序结果|
|union1d(x,y)|计算x和y的并集,并返回有序结果|
|in1d(x,y)|得到一个表示“x的元素是否包含于y”的布尔型数组|
|setdiff1d(x,y)|集合的差,即元素在x中且不在y中|
|setxor1d(x,y)|集合的对称差,即存在于一个数组中但不同时存在于两个数组中的元素,相当于是异或|
End of explanation
"""
import numpy as np
arr_c = np.arange(10)
np.save('./chapter04/some_array',arr_c)
"""
Explanation: File Input and Output with Arrays 用于数组的文件输入输出
NumPy 可以用来读写磁盘中的文本数据和二进制数据。在这个章节中,我们将只讨论 NumPy 内建的二进制格式,这主要是因为大部分python用户更喜欢用pandas和其他工具来读取文本和表格数据,这在之后的章节中会进行讨论
将数组以二进制格式保存到磁盘
np.save 和 np.load 是读写磁盘数组数据的两个主要函数。默认情况下,数组是以未压缩的原始二进制格式保存在扩展名为 .npy 的文件中的。
End of explanation
"""
np.load('./chapter04/some_array.npy') # 注意,需要指明文件后缀名。
"""
Explanation: ru如果文件路径末尾没有扩展名 .npy,那么这个扩展名会被自动补全。然后就可以通过 np.load 读取磁盘上的数组:
End of explanation
"""
np.savez('./chapter04/array_archive.npz', a = arr_c, b = arr_c)
np.savez_compressed('./chapter04/array_compressed.npz', a1 = arr_c,
b1 = arr_c)
"""
Explanation: 通过 np.savez 可以将多个数组保存到一个uncompressed npz文件中(注意原书和中文翻译的第一版都把这个npz说成了是压缩文件,这个是错误的,但是原作者第二版,即利用python 3的版本已经更正了,我也查阅了 NumPy 的文档,np.savez保存的并不是压缩文件,如果要压缩文件,可以使用 np.savez_compressed),将数组以关键字参数的形式传入即可:
End of explanation
"""
arch = np.load('./chapter04/array_archive.npz')
arch['b']
arch = np.load('./chapter04/array_compressed.npz')
arch['b1']
"""
Explanation: When loading an .npz file, we get back a dict-like object (我们得到的是一个类似字典的对象)that laods the individual arrays lazily (该对象会对各个数组进行延迟加载)
End of explanation
"""
!cat ./chapter04/array_ex.txt #for windows system, use !type
"""
Explanation: 存取文本文件
从文件加载文本是个很标准的python任务,不过python的文件读写函数很容易另初学者搞糊涂,因此这里我们主要介绍 pandas 中的 read_csv 和 read_table 函数。有时,我们需要用到 np.loadtxt 或者 更为专门化的 np.genfromtxt 将数据记载到普通的 NumPy 数组中。
这些函数都有许多选项可供使用:指定各种分隔符、针对特定列的转换器函数、需要跳过的行数等。这里,以一个简单的逗号分割文件 (CSV) 作为
example:
End of explanation
"""
arr = np.loadtxt('chapter04/array_ex.txt', delimiter = ',')
arr
print(arr)
"""
Explanation: 该文件可以被加载到一个二维数组中,如下所示:
End of explanation
"""
np.savetxt('./chapter04/array_ex-savetxt.txt', arr)
!cat chapter04/array_ex-savetxt.txt
"""
Explanation: np.savetxt 执行的是相反的操作:将数组写到以某种分隔符分开的文本文件中去。 genfromtxt 跟 loadtxt 差不多,只不过它面向的是结构化数组和缺失数据处理。在12章中,我们还会仔细讨论结构化数组的知识。
End of explanation
"""
x = np.array([[1., 2., 3.],[4., 5., 6.]])
y = np.array([[6.,23.,], [-1, 7], [8, 9]])
x
y
x.dot(y)
"""
Explanation: Linear Algebra
Linear algebra, like matrix multiplication, decompisitions, determinants, and other square matrix math, is an important part of any array library. Unlike MATLAB, multiplying two two-dimensional arrays with * is an element-wise product instead a matrix dot product. Thus, there is a function 'dot', both an array method and a function in the numpy namespace, for matrix multiplication:
End of explanation
"""
import numpy as np
np.dot(x,y)
"""
Explanation: x.dot(y) is equivalent to np.dot(x,y)
End of explanation
"""
np.dot(x, np.ones(3))
import numpy as np
x1 = np.array([[2,2],[3,3]])
y1 = np.array([[1,1],[1,1]])
dotvalue1=x1.dot(y1)
dotvalue2=np.dot(x1,y1)
print('dotvalue1 = \n', dotvalue1, '\n' 'dotvalue2 = \n', dotvalue2)
"""
Explanation: A matrix product between a 2D array and a suitably sized 1D array result in a 1D array:
End of explanation
"""
from numpy.linalg import inv, qr
from numpy.random import randn
X = randn(5,5)
mat = X.T.dot(X)
inv(mat)
mat.dot(inv(mat))
q, r = qr(mat)
# QR decomposition is decomposition of a matrix A into a product
# A = Q R
# where Q is an orthogonal matrix, and R is upper triangular matrix
q
q.dot(q.T) # we can see that the result is an unit matrix
r
"""
Explanation: numpy.linalg 中有一组标准的矩阵分解运算以及诸如求逆行列式之类的东西。它们跟 matlab 和 R 等语言所使用的是相同的行业标准级 Fortran 库,如 BLAS、LAPACK、Intel MKL (可能有,这个取决于所使用的 NumPy 版本)等:
End of explanation
"""
import numpy as np
from numpy.random import randn
arr_d = np.arange(10)
arr_e= randn(16).reshape(4,4)
print(arr_d, '\n', arr_e)
np.diag(arr_d) # 对于1D array,它将转化为一个对角化的矩阵
np.diag(arr_e) #当diag作用在一个2D以上数组时,则返回对角线上的元素。
"""
Explanation: 这里,我对常用 numpy.linalg 函数进行一些案例对说明,原书利用了一个表格,但是我自己为了看这本书也重复写了几个表格了,记忆情况并不佳,可能还是一个函数一个例子对这种方法更加容易让人记忆深刻一些。(2017/12/25)
|函数|说明|
|---|---|
|diag|以一维数组的形式返回方阵的对角线(或非对角线)元素|
End of explanation
"""
np.trace(arr_e)
np.trace(np.diag(arr_d))
arr_f = np.arange(64).reshape(4,4,4)
print(arr_f
)
np.trace(arr_f[1])
#可以看到对于多维数组,我们还可以对其中低维度的求trace
"""
Explanation: |函数|说明|
|---|---|
|dot|matrix multiplication, 矩阵乘法|
这已经在前面举过例子,这里略了。
|函数|说明|
|---|---|
|trace|计算对角线元素的和|
End of explanation
"""
from numpy.linalg import det
det(arr_e)
"""
Explanation: |函数|说明|
|---|---|
|det|计算矩阵的行列式|
End of explanation
"""
import numpy as np
from numpy.linalg import eig
eig(arr_e)
# computes the eigenvalues and eigenvectors of a square matrix
#关于这个函数的数学含义,请参考线性代数相关的书籍
"""
Explanation: |函数|说明|
|---|---|
|eig|计算方阵的本征值和本征向量|
End of explanation
"""
from numpy.linalg import inv
# if 'a' is a matrix object,
# the return value is a matrix as well
a = np.array([[1., 2.], [3., 4.]])
ainv = inv(a)
print(a, '\n', ainv)
# inverses of several matrices can be computed at once:
b = np.array([
[
[1.,2.],[3., 4.]
],
[
[1., 3.],[3., 5.]
]
])
binv = inv(b)
binv
"""
Explanation: |函数|说明|
|---|---|
|inv|计算方阵的逆|
End of explanation
"""
from numpy.linalg import pinv
from numpy.random import randn
c = randn(9,6)
bpinv = pinv(c)
bpinv
"""
Explanation: |函数|说明|
|---|---|
|pinv|计算矩阵的Moore-Penrose伪逆|
Compute the Moore-Penrose pseudo-inverse of a matrix: The pseudo-inverse of a matrix $A^+$, is defined as "the matrix that 'sloves' [the least-squares problem ] Ax = b," i.e., if $\bar{x}$ is said solution, then $A^+$ is that matrix such that $\bar{x}$ = $A^+$b
For more information, please refere to linear algebra books
End of explanation
"""
import numpy as np
from numpy.random import randn
a = randn(9,6) + 1j*randn(9,6)
a
# Reconstruction based on full SVD
# factors the matrix a as u * np.diag(s) * V,
# where u and v are unitary and s is a 1D array of a's
# singular values
U, s, V = np.linalg.svd(a, full_matrices = True)
U.shape, s.shape, V.shape
s
"""
Explanation: |函数|说明|
|---|---|
|qr|copute the QR decompisition|
上面提过了,此处略
|函数|说明|
|---|---|
|svd|计算奇异值分解, compute the singular value decomposition SVD|
End of explanation
"""
# Solve the systems of equatons 3* x0 + x1 = 9 and x0 + 2*x1 =8
from numpy.linalg import solve
a = np.array([[3,1],[1,2]])
b = np.array([9,8])
x = solve(a,b)
a
b
x
# check that the solution is correct:
np.allclose(np.dot(a, x), b)
"""
Explanation: |函数|说明|
|---|---|
|solve|计算方程组 Ax = b, 其中 A 为一个方阵|
Note:The solutions are computed using LAPACK routine_gesv. 'a' must be square and of full-rank, i.e., all rows (or, equivalently, columns) must be linearly independent; if either is not true, use lstsq for the least-squares best 'solutions' of the system/equation.
End of explanation
"""
x = np.array([0,1,2,3])
y = np.array([-1, 0.2, 0.9, 2.1])
"""
Explanation: |函数|说明|
|---|---|
|lstsq|计算方程组 $ax = b$ 的最小二乘解|
numpy.linalg.lstsq(a, b, rcond=-1)
Return the equation $a$ $x$ = $b$ by computing a vector $x$ that minimizes the Eouliden 2-norm $ ||b - ax ||^2$. The equation may be under-, over-, or well-determined (i.e. the numer of linearly independent rows of $a$ can be less less, greater, or less than its number of linearly independent columns). If $a$ is square and of full rank, then $x$ (but for round-off error) is the "exact" solution of the equation. (reivsed on the content from scipy.org)
Fit a line, $y = mx + c$, through some noisy data-points:
End of explanation
"""
A = np.vstack([x, np.ones(len(x))]).T
# np.vstack 可以按顺序把array堆叠在一起
# 此处是把 x 和 np.ones(4) 堆在了一起
A #即 A = [[x, 1]]
from numpy.linalg import lstsq
m, c = lstsq(A, y)[0]
print(m, c)
# 我们把数据和拟合的线可以画出来
####basic settings started
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
mpl.rcParams['figure.facecolor'] = '1'
#if choose the grey backgroud, use 0.75
mpl.rcParams['figure.figsize'] = [6.4,4.8]
mpl.rcParams['lines.linewidth'] = 1.5
mpl.rcParams['legend.fancybox'] = True
#####basic settings finished
%matplotlib inline
# plot inline jupyter
import matplotlib.pyplot as plt
# plot orginal data (i.e. four points)
plt.plot(x, y, 'o', label = 'Original data', ms =14)
# plot the fitted line using red line style and linewidth = 2
plt.plot(x, m*x + c, 'r', lw=2, label = 'Fitted line')
# plot the legend
plt.legend()
# plot grid
plt.grid()
plt.show()
#因为上面用到了 numpy.stack,那么我就顺便再举一个例子来说明 vstack 的用法
# 与 vstack 相反的操作是 vsplit
import numpy as np
a = np.array([1,2,3])
b = np.array([4,5,6])
ab = np.vstack((a, b))
m, n= np.vsplit(ab, 2) # 把ab分成2个,分别存在在m和n中
print(ab)
print(m)
"""
Explanation: 通过查看上面x和y的点,我们可以发现这个曲线的大概斜率在1左右,而在y轴上的cut off在-1左右。
我们可以重新写一下上面这个线性方程:$y$ = A p, 此处 A = [ [x, 1] ] 并且 p = [[m], [c]]。现在我们使用 lstsq 去解 p
End of explanation
"""
import numpy as np
samples = np.random.normal(size=(4,4))
samples
"""
Explanation: 随机数生成 Pseudorandom Number Generation
numpy.random 模块对 PYthon 内置对 random 进行了补充,增加了一些搞笑生成随机样本对函数。例如,我们可以用normal来得到一个标准正态分布对 4 * 4 样本数组:
End of explanation
"""
from random import normalvariate
N = 1000000
%timeit samples = [normalvariate(0,1) for _ in range(N)]
#中文翻译版本中这行代码是错的,翻译者写成了 xrange
%timeit np.random.normal(size=N)
"""
Explanation: 与此对比地,在python对内置random函数中,一次只能生成一个样本值。下面我们就来对比下这两种方法对区别,我们将会看到 numpy中对模块有更优越对效率:
End of explanation
"""
####basic settings started
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
mpl.rcParams['figure.facecolor'] = '1'
#if choose the grey backgroud, use 0.75
mpl.rcParams['figure.figsize'] = [6.4,4.8]
mpl.rcParams['lines.linewidth'] = 1.5
mpl.rcParams['legend.fancybox'] = True
#####basic settings finished
%matplotlib inline
# plot inline jupyter
import matplotlib.pyplot as plt
import numpy as np
position = 0 # 初始化位置
walk = []
steps = 1000
for _ in range(steps):
stepwidth = 1 if np.random.randint(0,2) else -1
position += stepwidth
walk.append(position)
#print(walk)
#plot this trajectory
plt.plot(walk[:1000])
"""
Explanation: 下表列出了 numpy.random 中的部分函数。在下一节中,我门将给出一些利用这些函数一次性生成大量样本值的案例。
|函数|说明|
|---|---|
|seed|确定随机数生成器的种子|
|permutation|返回一个序列的随机排列或返回一个随机排列的范围|
|shuffle|对一个序列就地随机排列|
|rand|产生均匀分布的样本值|
|randint|从给定的上下限范围内随机选取整数|
|randn|产生正态分布(平均值为0,标准差为1)的样本值,类似于matlab接口|
|binomial|产生二项分布的样本值|
|normal|产生正态(高斯)分布的样本值|
|beta|产生Beta分布的样本值|
|chisquare|产生卡方分布的样本值|
|gamma|产生Gamma分布的样本值|
|uniform|产生在[0,1)中均匀分布的样本值|
范例:随机漫步 random walks
随机漫步是说明数组操作最好的案例之一。现在,我们来考虑一个简单的随机漫步:我们从0开始,并且以1或者-1作为step width,1和-1出现的概率是均等的。然后我们走1000步,我们可以看看我们会走出什么样的轨迹
End of explanation
"""
import random
position = 0 # 初始化位置
walk = []
steps = 1000
for _ in range(steps):
stepwidth = 1 if random.randint(0,1) else -1
position += stepwidth
walk.append(position)
#print(walk)
#plot this trajectory
plt.plot(walk[:1000])
"""
Explanation: 注意我上面的代码跟原书上的区别,主要在于我并不是python自身的random standard library。我使用的是numpy.random,这两个是有区别的,这主要在于
numpy.random.randint(a, b),返回的值是a ~ (b-1)之间的整数值(包括a 和 b-1);
而python自带的random.randint(a,b) 返回的值是 a ~ b之间的整数值(包括a和b)
End of explanation
"""
nsteps = 1000
import numpy as np
draws = np.random.randint(0, 2, size = nsteps)
steps = np.where(draws > 0, 1, -1)
walk = steps.cumsum()
walk.min()
walk.max()
"""
Explanation: 上面的walk数值,其实就是随机数的累计和。不过上面的方式中,我门都是走一步然后产生一个随机数,其实我们可以用numpy.random.randint一次性地产生N个随机数,这里以N=1000为例
End of explanation
"""
(np.abs(walk) >= 10).argmax()
#Note that using argmax here is not always efficient because
#it always makes a full scan of the array. In this special case,
#once a True is observed we know it to be the maxi‐ mum value.
"""
Explanation: A more complicated statics is the 'first crossing time', the step at which the random walk reaches a particular value. Here we might want to know how long it look the random walk to get at least 10 steps aways from the origin 0 in either direction. np.abs(walk) >= 10 gives us a boolean array indicating where the walk has reached or exceeded 10, but we want the index of the first 10 or -10.
End of explanation
"""
import numpy as np
nwalks = 5000
nsteps = 1000
draws = np.random.randint(0, 2, size=(nwalks, nsteps)) # 0 or 1
steps = np.where(draws > 0, 1, -1)
walks = steps.cumsum(1)
walks
walks.max()
walks.min()
"""
Explanation: 一次模拟多个随机漫步 simulating many random walks at once
如果希望模拟多个随机漫步过程,只需要对上面对代码做一点微调。我们只需要给numpy.random 传入一个二元元祖即可产生一个二维数组,然后我们就能一次性计算5000个随机漫步过程(一行一个)的累计和了。
End of explanation
"""
hits30 = (np.abs(walks) >= 30).any(1)
hits30
hits30.sum() #到达30或者-30的数量
"""
Explanation: 得到这些数据后,我们可以来计算出30或者-30的最小穿越时间。这里得要稍微动一下脑子,因为不是5000个过程都到达了30。我们可以用any方法来对此进行检查
End of explanation
"""
crossing_times= (np.abs(walks[hits30]) >= 30).argmax(1)
crossing_times.mean()
"""
Explanation: 然后我们再利用这个boolean array选出哪些穿越了30(绝对值)的随机漫步(行),并调用argmax在轴1上获取穿越时间:
End of explanation
"""
steps = np.random.normal(loc=0, scale=0.25,
size=(nwalks,nsteps))
steps
"""
Explanation: 这里请尝试其他分布方式得到漫步数据。只需要使用不同的随机数生成函数即可。例如,normal 用于生成指定均值和标准差的正态分布数据
End of explanation
"""
#python list
x = [1,2,3,4]
y = [5,6,7,8]
x*2
x+10
x+y
#numpy arrays
import numpy as np
ax = np.array([1,2,3,4])
ay = np.array([5,6,7,8])
ax*2
ax+10
ax+ay
"""
Explanation: Appendix for chapter04-note
date: 2018 Feb.
I add some note for array operations, the reference book is pthon cookbook by David Beazley et al
3.9 处理大型数组的计算 in "python cookbook"
我们需要对大型数据比如数组和网格(grid)进行计算。在进行大型数据计算对时候,一定要善于用numpy,而不是仅仅用python自身的列表计算。numpy有着更高的计算效率。下面我们用例子来说明,列表和NumPy数组的区别:
End of explanation
"""
def f(x):
return 3*x**2 + 2*x +7
f(ax)
"""
Explanation: 从上面可以看出,numpy数组操作时候是对被作用对所有元素的,这一事实使得数组计算都变得简单和快速。比如我们可以快速地计算多项式:
End of explanation
"""
np.sqrt(ax)
np.cos(ax)
"""
Explanation: NumPy提供了一些通用函数集合,他们也能对数组进行直接对操作。这些通用函数可以作为math模块中所对应函数对替代。示例如下:
End of explanation
"""
grid = np.zeros(shape=(10000,10000), dtype=float)
grid
"""
Explanation: 使用NumPy中的通用函数,其效率要比对数组进行迭代然后使用math模块中的函数每次只处理一个元素快上数倍。因此,只要有可能就应该直接使用这些通用函数。
在底层,NumPy 数组的内层分配方式和C和Fortran是一样的。他们在大块的连续内存中存储。正因如此,NumPy才能创建比通常Python列表大许多的数组。例如,如果像创建10000 * 10000的二维浮点函数,这对numpy而言是很轻松的事情:
End of explanation
"""
grid+10
np.sin(grid+10)
"""
Explanation: 所有的通用操作仍然可以同时施加于所有的元素之上:
End of explanation
"""
import numpy as np
x=list(range(1,5))
y=list(range(5,9))
z=list(range(9,13))
a = np.array(x)
b = np.array(y)
c = np.array(z)
array1 = np.concatenate((a, b, c), axis=0)
array2 = np.stack((a, b, c), axis=0)
array1
array2
#select row 1
array2[1]
#select column 1
array2[:,1]
array2[1:3,1:3]
array2[1:3,1:3] += 10
array2
#broadcast a row vector across an operation on all rows
array2+[100,101,102,103]
array2
#conditional assigan on an array
np.where(a < 10, a, 10) #a<10 is the condition, if ture, return a. I have introduced np.where before in this chapter
"""
Explanation: 关于NumPy,一个特别值得提起的方面就是NumPy扩展了python列表的索引功能——尤其是针对多维数组时更是如此。现在我们来构建一个简单的二维数组然后做一些简单的experiment
End of explanation
"""
import numpy as np
m = np.matrix([[1,-2,3],[0,4,5],[7,8,-9]])
m
#Return transpose 转置矩阵
m.T
#return inverse 逆矩阵
m.I
# create a vector and multiply
v = np.matrix([[2],[3],[4]])
v
m*v
"""
Explanation: 3.10 矩阵和线性代数的计算 in "python cookbook"
3.10.1 Question
如何利用python来进行矩阵乘法,求行列式,解决线性方程等等
3.10.2 解决方案
NumPy 中有个 matrix 对象可以用来处理这种情况。matrix 对象和上述3.9中描述的数组对象有些类似,但是在计算时遵循线性代数规则。下面的例子展示了几个重要的特性:
End of explanation
"""
import numpy as np
import numpy.linalg as nlg
#Determinant
nlg.det(m)
#Eigenvalues
nlg.eigvals(m)
#Solve for x in mx = v
x = nlg.solve(m,v)
x
m*x
v
"""
Explanation: 更多的操作可以在numpy.linalg子模块中找到,例如:
End of explanation
"""
|
VenkatRepaka/deep-learning | intro-to-rnns/Anna_KaRNNa_Exercises.ipynb | mit | import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
"""
Explanation: Anna KaRNNa
In this notebook, we'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Also, some information here at r2rt and from Sherjil Ozair on GitHub. Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
End of explanation
"""
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
"""
Explanation: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
End of explanation
"""
text[:100]
"""
Explanation: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
End of explanation
"""
encoded[:100]
"""
Explanation: And we can see the characters encoded as integers.
End of explanation
"""
len(vocab)
"""
Explanation: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
End of explanation
"""
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
characters_per_batch = n_steps * n_seqs
n_batches = len(arr) // characters_per_batch
# Keep only enough characters to make full batches
arr = arr[:n_batches*characters_per_batch]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = arr[:, n+1:n+1+n_steps]
yield x, y
"""
Explanation: Making training mini-batches
Here is where we'll make our mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="assets/sequence_batching@1x.png" width=500px>
<br>
We have our text encoded as integers as one long array in encoded. Let's create a function that will give us an iterator for our batches. I like using generator functions to do this. Then we can pass encoded into this function and get our batch generator.
The first thing we need to do is discard some of the text so we only have completely full batches. Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences) and $M$ is the number of steps. Then, to get the number of batches we can make from some array arr, you divide the length of arr by the batch size. Once you know the number of batches and the batch size, you can get the total number of characters to keep.
After that, we need to split arr into $N$ sequences. You can do this using arr.reshape(size) where size is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences (n_seqs below), let's make that the size of the first dimension. For the second dimension, you can use -1 as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$ where $K$ is the number of batches.
Now that we have this array, we can iterate through it to get our batches. The idea is each batch is a $N \times M$ window on the array. For each subsequent batch, the window moves over by n_steps. We also want to create both the input and target arrays. Remember that the targets are the inputs shifted over one character. You'll usually see the first input character used as the last target character, so something like this:
python
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
where x is the input batch and y is the target batch.
The way I like to do this window is use range to take steps of size n_steps from $0$ to arr.shape[1], the total number of steps in each sequence. That way, the integers you get from range always point to the start of a batch, and each window is n_steps wide.
Exercise: Write the code for creating batches in the function below. The exercises in this notebook will not be easy. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, type out the solution code yourself.
End of explanation
"""
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
"""
Explanation: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
End of explanation
"""
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob
"""
Explanation: If you implemented get_batches correctly, the above output should look something like
```
x
[[55 63 69 22 6 76 45 5 16 35]
[ 5 69 1 5 12 52 6 5 56 52]
[48 29 12 61 35 35 8 64 76 78]
[12 5 24 39 45 29 12 56 5 63]
[ 5 29 6 5 29 78 28 5 78 29]
[ 5 13 6 5 36 69 78 35 52 12]
[63 76 12 5 18 52 1 76 5 58]
[34 5 73 39 6 5 12 52 36 5]
[ 6 5 29 78 12 79 6 61 5 59]
[ 5 78 69 29 24 5 6 52 5 63]]
y
[[63 69 22 6 76 45 5 16 35 35]
[69 1 5 12 52 6 5 56 52 29]
[29 12 61 35 35 8 64 76 78 28]
[ 5 24 39 45 29 12 56 5 63 29]
[29 6 5 29 78 28 5 78 29 45]
[13 6 5 36 69 78 35 52 12 43]
[76 12 5 18 52 1 76 5 58 52]
[ 5 73 39 6 5 12 52 36 5 78]
[ 5 29 78 12 79 6 61 5 59 63]
[78 69 29 24 5 6 52 5 63 76]]
``
although the exact numbers will be different. Check to make sure the data is shifted over one step fory`.
Building the model
Below is where you'll build the network. We'll break it up into parts so it's easier to reason about each bit. Then we can connect them up into the whole network.
<img src="assets/charRNN.png" width=500px>
Inputs
First off we'll create our input placeholders. As usual we need placeholders for the training data and the targets. We'll also create a placeholder for dropout layers called keep_prob. This will be a scalar, that is a 0-D tensor. To make a scalar, you create a placeholder without giving it a size.
Exercise: Create the input placeholders in the function below.
End of explanation
"""
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell outputs
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([drop]*num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
"""
Explanation: LSTM Cell
Here we will create the LSTM cell we'll use in the hidden layer. We'll use this cell as a building block for the RNN. So we aren't actually defining the RNN here, just the type of cell we'll use in the hidden layer.
We first create a basic LSTM cell with
python
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
where num_units is the number of units in the hidden layers in the cell. Then we can add dropout by wrapping it with
python
tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
You pass in a cell and it will automatically add dropout to the inputs or outputs. Finally, we can stack up the LSTM cells into layers with tf.contrib.rnn.MultiRNNCell. With this, you pass in a list of cells and it will send the output of one cell into the next cell. For example,
python
tf.contrib.rnn.MultiRNNCell([cell]*num_layers)
This might look a little weird if you know Python well because this will create a list of the same cell object. However, TensorFlow will create different weight matrices for all cell objects. Even though this is actually multiple LSTM cells stacked on each other, you can treat the multiple layers as one cell.
We also need to create an initial cell state of all zeros. This can be done like so
python
initial_state = cell.zero_state(batch_size, tf.float32)
Exercise: Below, implement the build_lstm function to create these LSTM cells and the initial state.
End of explanation
"""
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output = tf.concat(lstm_output, axis=1)
# Reshape seq_output to a 2D tensor with lstm_size columns
x = tf.reshape(seq_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w = tf.Variable(tf.truncated_normal([in_size, out_size], stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, siftmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name='predictions')
return out, logits
"""
Explanation: RNN Output
Here we'll create the output layer. We need to connect the output of the RNN cells to a full connected layer with a softmax output. The softmax output gives us a probability distribution we can use to predict the next character, so we want this layer to have size $C$, the number of classes/characters we have in our text.
If our input has batch size $N$, number of steps $M$, and the hidden layer has $L$ hidden units, then the output is a 3D tensor with size $N \times M \times L$. The output of each LSTM cell has size $L$, we have $M$ of them, one for each sequence step, and we have $N$ sequences. So the total size is $N \times M \times L$.
We are using the same fully connected layer, the same weights, for each of the outputs. Then, to make things easier, we should reshape the outputs into a 2D tensor with shape $(M * N) \times L$. That is, one row for each sequence and step, where the values of each row are the output from the LSTM cells. We get the LSTM output as a list, lstm_output. First we need to concatenate this whole list into one array with tf.concat. Then, reshape it (with tf.reshape) to size $(M * N) \times L$.
One we have the outputs reshaped, we can do the matrix multiplication with the weights. We need to wrap the weight and bias variables in a variable scope with tf.variable_scope(scope_name) because there are weights being created in the LSTM cells. TensorFlow will throw an error if the weights created here have the same names as the weights created in the LSTM cells, which they will be default. To avoid this, we wrap the variables in a variable scope so we can give them unique names.
Exercise: Implement the output layer in the function below.
End of explanation
"""
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot =
y_reshaped =
# Softmax cross entropy loss
loss =
return loss
"""
Explanation: Training loss
Next up is the training loss. We get the logits and targets and calculate the softmax cross-entropy loss. First we need to one-hot encode the targets, we're getting them as encoded characters. Then, reshape the one-hot targets so it's a 2D tensor with size $(MN) \times C$ where $C$ is the number of classes/characters we have. Remember that we reshaped the LSTM outputs and ran them through a fully connected layer with $C$ units. So our logits will also have size $(MN) \times C$.
Then we run the logits and targets through tf.nn.softmax_cross_entropy_with_logits and find the mean to get the loss.
Exercise: Implement the loss calculation in the function below.
End of explanation
"""
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
"""
Explanation: Optimizer
Here we build the optimizer. Normal RNNs have have issues gradients exploding and disappearing. LSTMs fix the disappearance problem, but the gradients can still grow without bound. To fix this, we can clip the gradients above some threshold. That is, if a gradient is larger than that threshold, we set it to the threshold. This will ensure the gradients never grow overly large. Then we use an AdamOptimizer for the learning step.
End of explanation
"""
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
#self.loss =
#self.optimizer =
"""
Explanation: Build the network
Now we can put all the pieces together and build a class for the network. To actually run data through the LSTM cells, we will use tf.nn.dynamic_rnn. This function will pass the hidden and cell states across LSTM cells appropriately for us. It returns the outputs for each LSTM cell at each step for each sequence in the mini-batch. It also gives us the final LSTM state. We want to save this state as final_state so we can pass it to the first LSTM cell in the the next mini-batch run. For tf.nn.dynamic_rnn, we pass in the cell and initial state we get from build_lstm, as well as our input sequences. Also, we need to one-hot encode the inputs before going into the RNN.
Exercise: Use the functions you've implemented previously and tf.nn.dynamic_rnn to build the network.
End of explanation
"""
batch_size = 10 # Sequences per batch
num_steps = 50 # Number of sequence steps per batch
lstm_size = 128 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.01 # Learning rate
keep_prob = 0.5 # Dropout keep probability
"""
Explanation: Hyperparameters
Here are the hyperparameters for the network.
batch_size - Number of sequences running through the network in one pass.
num_steps - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
lstm_size - The number of units in the hidden layers.
num_layers - Number of hidden LSTM layers to use
learning_rate - Learning rate for training
keep_prob - The dropout keep probability when training. If you're network is overfitting, try decreasing this.
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to where it originally came from.
Tips and Tricks
Monitoring Validation Loss vs. Training Loss
If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
If your training loss is much lower than validation loss then this means the network might be overfitting. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
If your training/validation loss are about equal then your model is underfitting. Increase the size of your model (either number of layers or the raw number of neurons per layer)
Approximate number of parameters
The two most important parameters that control the model are lstm_size and num_layers. I would advise that you always use num_layers of either 2/3. The lstm_size can be adjusted based on how much data you have. The two important quantities to keep track of here are:
The number of parameters in your model. This is printed when you start training.
The size of your dataset. 1MB file is approximately 1 million characters.
These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make lstm_size larger.
I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
Best models strategy
The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
End of explanation
"""
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
"""
Explanation: Time for training
This is typical training code, passing inputs and targets into the network, then running the optimizer. Here we also get back the final LSTM state for the mini-batch. Then, we pass that state back into the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) I save a checkpoint.
Here I'm saving checkpoints with the format
i{iteration number}_l{# hidden layer units}.ckpt
Exercise: Set the hyperparameters above to train the network. Watch the training loss, it should be consistently dropping. Also, I highly advise running this on a GPU.
End of explanation
"""
tf.train.get_checkpoint_state('checkpoints')
"""
Explanation: Saved checkpoints
Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables
End of explanation
"""
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
"""
Explanation: Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
End of explanation
"""
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
"""
Explanation: Here, pass in the path to a checkpoint and sample from the network.
End of explanation
"""
|
AllenDowney/ModSim | python/soln/examples/orbit_soln.ipynb | gpl-2.0 | # install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
"""
Explanation: Orbiting the Sun
Modeling and Simulation in Python
Copyright 2021 Allen Downey
License: Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
End of explanation
"""
r_0 = 147.09e9 # initial distance m
v_0 = 30.29e3 # initial velocity m/s
"""
Explanation: In a previous example, we modeled the interaction between the Earth and the Sun, simulating what would happen if the Earth stopped in its orbit and fell straight into the Sun.
Now let's extend the model to two dimensions and simulate one revolution of the Earth around the Sun, that is, one year.
At perihelion, the distance from the Earth to the Sun is 147.09 million km and its velocity is 30,290 m/s.
End of explanation
"""
G = 6.6743e-11 # gravitational constant N / kg**2 * m**2
m1 = 1.989e30 # mass of the Sun kg
m2 = 5.972e24 # mass of the Earth kg
t_end = 3.154e7 # one year in seconds
"""
Explanation: Here are the other constants we'll need, all with about 4 significant digits.
End of explanation
"""
# Solution
init = State(x=r_0, y=0, vx=0, vy=-v_0)
# Solution
system = System(init=init,
t_end=t_end)
"""
Explanation: Exercise: Put the initial conditions in a State object with variables x, y, vx, and vy.
Create a System object with variables init and t_end.
End of explanation
"""
# Solution
def universal_gravitation(state, system):
"""Computes gravitational force.
state: State object with distance r
system: System object with m1, m2, and G
returns: Vector
"""
x, y, vx, vy = state
R = Vector(x, y)
mag = G * m1 * m2 / vector_mag(R)**2
direction = -vector_hat(R)
return mag * direction
# Solution
universal_gravitation(init, system)
"""
Explanation: Exercise: Write a function called universal_gravitation that takes a State and a System and returns the gravitational force of the Sun on the Earth as a Vector.
Test your function with the initial conditions; the result should be a Vector with approximate components:
x -3.66e+22
y 0
End of explanation
"""
# Solution
def slope_func(t, state, system):
x, y, vx, vy = state
F = universal_gravitation(state, system)
A = F / m2
return vx, vy, A.x, A.y
# Solution
slope_func(0, init, system)
"""
Explanation: Exercise: Write a slope function that takes a timestamp, a State, and a System and computes the derivatives of the state variables.
Test your function with the initial conditions. The result should be a sequence of four values, approximately
0.0, -30290.0, -0.006, 0.0
End of explanation
"""
# Solution
results, details = run_solve_ivp(system, slope_func)
details.message
"""
Explanation: Exercise: Use run_solve_ivp to run the simulation.
Save the return values in variables called results and details.
End of explanation
"""
from matplotlib.pyplot import plot
def plot_trajectory(results):
x = results.x / 1e9
y = results.y / 1e9
make_series(x, y).plot(label='orbit')
plot(0, 0, 'yo')
decorate(xlabel='x distance (million km)',
ylabel='y distance (million km)')
plot_trajectory(results)
"""
Explanation: You can use the following function to plot the results.
End of explanation
"""
error = results.iloc[-1] - system.init
error
offset = Vector(error.x, error.y)
vector_mag(offset) / 1e9
"""
Explanation: You will probably see that the earth does not end up back where it started, as we expect it to after one year.
The following cells compute the error, which is the distance between the initial and final positions.
End of explanation
"""
details.nfev
"""
Explanation: The problem is that the algorithm used by run_solve_ivp does not work very well with systems like this.
There are two ways we can improve it.
run_solve_ivp takes a keyword argument, rtol, that specifies the "relative tolerance", which determines the size of the time steps in the simulation. Lower values of rtol require smaller steps, which yield more accurate results.
The default value of rtol is 1e-3.
Exercise: Try running the simulation again with smaller values, like 1e-4 or 1e-5, and see what effect it has on the magnitude of offset.
The other way to improve the results is to use a different algorithm. run_solve_ivp takes a keyword argument, method, that specifies which algorithm it should use. The default is RK45, which is a good, general-purpose algorithm, but not particularly good for this system. One of the other options is RK23, which is usually less accurate than RK45 (with the same step size), but for this system it turns out to be unreasonably good, for reasons I don't entirely understand.
Yet another option is 'DOP853', which is particularly good when rtol is small.
Exercise: Run the simulation with one of these methods and see what effect it has on the results. To get a sense of how efficient the methods are, display details.nfev, which is the number of times run_solve_ivp called the slope function.
End of explanation
"""
xlim = results.x.min(), results.x.max()
ylim = results.y.min(), results.y.max()
def draw_func(t, state):
x, y, vx, vy = state
plot(x, y, 'b.')
plot(0, 0, 'yo')
decorate(xlabel='x distance (million km)',
ylabel='y distance (million km)',
xlim=xlim,
ylim=ylim)
# animate(results, draw_func)
"""
Explanation: Animation
You can use the following draw function to animate the results, if you want to see what the orbit looks like (not in real time).
End of explanation
"""
|
BrainIntensive/OnlineBrainIntensive | resources/matplotlib/Examples/formatting_4.ipynb | mit | %load_ext watermark
%watermark -u -v -d -p matplotlib,numpy
"""
Explanation: Sebastian Raschka
back to the matplotlib-gallery at https://github.com/rasbt/matplotlib-gallery
End of explanation
"""
%matplotlib inline
"""
Explanation: <font size="1.5em">More info about the %watermark extension</font>
End of explanation
"""
import matplotlib.pyplot as plt
print(plt.style.available)
"""
Explanation: <br>
<br>
Matplotlib Formatting IV: Style Sheets
One of the coolest features added to matlotlib 1.5 is the support for "styles"! The "styles" functionality allows us to create beautiful plots rather painlessly -- a great feature for everyone who though that matplotlib's default layout looks a bit dated!
<br>
<br>
Sections
The styles that are currently included can be listed via print(plt.style.available):
End of explanation
"""
import numpy as np
plt.style.use('ggplot')
x = np.arange(10)
for i in range(1, 4):
plt.plot(x, i * x**2, label='Group %d' % i)
plt.legend(loc='best')
plt.show()
"""
Explanation: Now, there are two ways to apply the styling to our plots. First, we can set the style for our coding environment globally via the plt.style.use function:
End of explanation
"""
with plt.style.context('fivethirtyeight'):
for i in range(1, 4):
plt.plot(x, i * x**2, label='Group %d' % i)
plt.legend(loc='best')
plt.show()
"""
Explanation: Another way to use styles is via the with context manager, which applies the styling to a specific code block only:
End of explanation
"""
import math
n = len(plt.style.available)
num_rows = math.ceil(n/4)
fig = plt.figure(figsize=(15, 15))
for i, s in enumerate(plt.style.available):
with plt.style.context(s):
ax = fig.add_subplot(num_rows, 4, i+1)
for i in range(1, 4):
ax.plot(x, i * x**2, label='Group %d' % i)
ax.set_xlabel(s, color='black')
ax.legend(loc='best')
fig.tight_layout()
plt.show()
"""
Explanation: Finally, here's an overview of how the different styles look like:
End of explanation
"""
|
JannesKlaas/MLiFC | Week 4/Ch. 19 - LSTM for Email classification.ipynb | mit | from sklearn.datasets import fetch_20newsgroups
twenty_train = fetch_20newsgroups(subset='train', shuffle=True)
"""
Explanation: Ch. 19 - LSTM for Email classification
In the last chapter we already learned about basic recurrent neural networks. In theory, simple RNN's should be able to retain even long term memories. However, in practice, this approach often falls short. This is because of the 'vanishing gradients' problem. Over many timesteps, the network has a hard time keeping up meaningful gradients. See e.g. Learning long-term dependencies with gradient descent is difficult (Bengio, Simard and Frasconi, 1994) for details.
In direct response to the vanishing gradients problem of simple RNN's, the Long Short Term Memory layer was invented. Before we dive into details, let's look at a simple RNN 'unrolled' over time:
You can see that this is the same as the RNN we saw in the previous chapter, just unrolled over time.
The Carry
The central addition of an LSTM over an RNN is the carry. The carry is like a conveyor belt which runs along the RNN layer. At each time step, the carry is fed into the RNN layer. The new carry gets computed in a separate operation from the RNN layer itself from the input, RNN output and old carry.
The Compute Carry can be understood as three parts:
Determine what should be added from input and state:
$$i_t = a(s_t \cdot Ui + in_t \cdot Wi + bi)$$
$$k_t = a(s_t \cdot Uk + in_t \cdot Wk + bk)$$
where $s_t$ is the state at time $t$ (output of the simple rnn layer), $in_t$ is the input at time $t$ and $Ui$, $Wi$ $Uk$, $Wk$ are model parameters (matrices) which will be learned. $a()$ is an activation function.
Determine what should be forgotten from state an input:
$$f_t = a(s_t \cdot Uf) + in_t \cdot Wf + bf)$$
The new carry is the computed as
$$c_{t+1} = c_t * f_t + i_t * k_t$$
While the standard theory claims that the LSTM layer learns what to add and what to forget, in practice nobody knows what really happens inside an LSTM. However, they have been shown to be quite effective at learning long term memory.
Note that LSTMlayers do not need an extra activation function as they already come with a tanh activation function out of the box.
The Data
Without much further ado, let's dive into the task of this chapter. The Newsgroup 20 Dataset is a collection of about 20,000 messages from 20 newsgroups. Usenet Newsgroups where a form of discussion group that where quite popular in the early days of the Internet. They are technically distinct but functionally quite similar to web forums. The newsgroups where usually dedicated to a certain topic, such as cars or apple computers. We can download the newsgroup 20 dataset directly through scikit learn.
End of explanation
"""
twenty_train.data[1]
"""
Explanation: The posts in the newsgroup are very similar to emails. (The \n in the text means a line break)
End of explanation
"""
twenty_train.target_names[twenty_train.target[1]]
"""
Explanation: From the text you might be able to judge that this text is about computer hardware. More specifically it is about Apple computers. You are not expected to have expertise in the discussions around Macs in the 90's so we can also just look at a label:
End of explanation
"""
texts = twenty_train.data # Extract text
target = twenty_train.target # Extract target
# Load tools we need for preprocessing
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
"""
Explanation: Preprocessing the data
You already learned that we have to tokenize the text before we can feed it into a neural network. This tokenization process will also remove some of the features of the original text, such as all punctuation or words that are less common.
End of explanation
"""
vocab_size = 20000
tokenizer = Tokenizer(num_words=vocab_size) # Setup tokenizer
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts) # Generate sequences
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
"""
Explanation: Remember we have to specify the size of our vocabulary. Words that are less frequent will get removed. In this case we want to retain the 20,000 most common words.
End of explanation
"""
# Create inverse index mapping numbers to words
inv_index = {v: k for k, v in tokenizer.word_index.items()}
# Print out text again
for w in sequences[1]:
x = inv_index.get(w)
print(x,end = ' ')
"""
Explanation: Our text is now converted to sequences of numbers. It makes sense to convert some of those sequences back into text to check what the tokenization did to our text. To this end we create an inverse index that maps numbers to words while the tokenizer maps words to numbers.
End of explanation
"""
import numpy as np
# Get the average length of a text
avg = sum( map(len, sequences) ) / len(sequences)
# Get the standard deviation of the sequence length
std = np.sqrt(sum( map(lambda x: (len(x) - avg)**2, sequences)) / len(sequences))
avg,std
"""
Explanation: Measuring text length
In previous chapters, we specified a sequence length and made sure all sequences had the same length. For LSTMs this is not strictly necessary as LSTMs can work with different lengths of sequences. However, it can be a pretty good idea to restrict sequence lengths for the sake of restricting the time needed to train the network and process sequences.
End of explanation
"""
max_length = 100
data = pad_sequences(sequences, maxlen=max_length)
"""
Explanation: You can see, the average text is about 300 words long. However, the standard deviation is quite large which indicates that some texts are much much longer. If some user decided to write an epic novel in the newsgroup it would massively slow down training. So for speed purposes we will restrict sequence length to 100 words. You should try out some different sequence lengths and experiment with processing time and accuracy gains.
End of explanation
"""
import numpy as np
from keras.utils import to_categorical
labels = to_categorical(np.asarray(target))
print('Shape of data:', data.shape)
print('Shape of labels:', labels.shape)
"""
Explanation: Turning labels into One-Hot encodings
Labels can quickly be encoded into one-hot vectors with Keras:
End of explanation
"""
import os
glove_dir = './glove.6B' # This is the folder with the dataset
embeddings_index = {} # We create a dictionary of word -> embedding
f = open(os.path.join(glove_dir, 'glove.6B.100d.txt')) # Open file
# In the dataset, each line represents a new word embedding
# The line starts with the word and the embedding values follow
for line in f:
values = line.split()
word = values[0] # The first value is the word, the rest are the values of the embedding
embedding = np.asarray(values[1:], dtype='float32') # Load embedding
embeddings_index[word] = embedding # Add embedding to our embedding dictionary
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# Create a matrix of all embeddings
all_embs = np.stack(embeddings_index.values())
emb_mean = all_embs.mean() # Calculate mean
emb_std = all_embs.std() # Calculate standard deviation
emb_mean,emb_std
embedding_dim = 100 # We use 100 dimensional glove vectors
word_index = tokenizer.word_index
nb_words = min(vocab_size, len(word_index)) # How many words are there actually
# Create a random matrix with the same mean and std as the embeddings
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embedding_dim))
# The vectors need to be in the same position as their index.
# Meaning a word with token 1 needs to be in the second row (rows start with zero) and so on
# Loop over all words in the word index
for word, i in word_index.items():
# If we are above the amount of words we want to use we do nothing
if i >= vocab_size:
continue
# Get the embedding vector for the word
embedding_vector = embeddings_index.get(word)
# If there is an embedding vector, put it in the embedding matrix
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
"""
Explanation: Loading GloVe embeddings
We will use GloVe embeddings as in the chapters before. This code has been copied from previous chapters:
End of explanation
"""
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation, Embedding
model = Sequential()
model.add(Embedding(vocab_size,
embedding_dim,
input_length=max_length,
weights = [embedding_matrix],
trainable = False))
model.add(LSTM(128))
model.add(Dense(20))
model.add(Activation('softmax'))
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
model.fit(data,labels,validation_split=0.2,epochs=2)
"""
Explanation: Using the LSTM layer
In Keras, the LSTM layer can be used in exactly the same way as the SimpleRNNlayer we used earlier. It only takes the size of the layer as an input, much like a dense layer. An LSTM layer returns only the last output of the sequence by default, just like a SimpleRNN. A simple LSTM network can look like this:
End of explanation
"""
example = data[10] # get the tokens
# Print tokens as text
for w in example:
x = inv_index.get(w)
print(x,end = ' ')
# Get prediction
pred = model.predict(example.reshape(1,100))
# Output predicted category
twenty_train.target_names[np.argmax(pred)]
"""
Explanation: Our model achieves more than 95% accuracy on the validation set in only 2 epochs. Systems like these can be used to assign emails in customer support centers, suggest responses, or classify other forms of text like invoices which need to be assigned to an department. Let's take a look at how our model classified one of the texts:
End of explanation
"""
model = Sequential()
model.add(Embedding(vocab_size,
embedding_dim,
input_length=max_length,
weights = [embedding_matrix],
trainable = False))
# Now with recurrent dropout with a 10% chance of removing any element
model.add(LSTM(128, recurrent_dropout=0.1))
model.add(Dense(20))
model.add(Activation('softmax'))
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
model.fit(data,labels,validation_split=0.2,epochs=2)
"""
Explanation: Recurrent Dropout
You have already heard of dropout. Dropout removes some elements of one layers input at random. A common and important tool in recurrent neural networks is recurrent dropout. Recurrent dropout does not remove any inputs between layers but inputs between time steps.
Just as regular dropout, recurrent dropout has a regularizing effect and can prevent overfitting. It is used in Keras by simply passing an argument to the LSTM or RNN layer. Recurrent Dropout, unlike regular dropout, does not have an own layer.
End of explanation
"""
|
Cyberface/nrutils_dev | review/notebooks/compare_waveforms_from_two_codes.ipynb | mit | # Setup ipython environment
%load_ext autoreload
%autoreload 2
%matplotlib inline
# Setup plotting backend
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 0.8
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['axes.labelsize'] = 20
from matplotlib.pyplot import *
# Import useful things
from nrutils import scsearch,gwylm
from numpy import array
"""
Explanation: Compare Two NR Simulations from Different Groups: SXS, GaTech, q=2 Nonspinning
End of explanation
"""
# Search for a simulation. Hopefully the results will be from different codes. NOTE that this could be done more manually so that we don't "hope" but know.
A = scsearch( q=[1,4], nonspinning=True, verbose=True )
# Select which of the search results we wish to keep
U,V = A[77],A[131]
# Load the modes
u = gwylm(U,lmax=2,verbose=True)
v = gwylm(V,lmax=2,verbose=True)
# Plot the waveforms
u.plot(); v.plot()
"""
Explanation: <center>Find Simulations and Load data
End of explanation
"""
#
theta,phi = 0,0
a,b = u.recompose(theta,phi,kind='strain'),v.recompose(theta,phi,kind='strain')
"""
Explanation: <center>Recompose the Waveforms
End of explanation
"""
figure( figsize=2*array([5,3]) )
plot( a.t - a.intrp_t_amp_max, a.amp )
plot( b.t - b.intrp_t_amp_max, b.amp )
gca().set_yscale("log", nonposy='clip')
ylim([1e-5,1e-1])
xlim([-400,100])
title('the amplitudes should be approx. the same')
a.plot();b.plot();
"""
Explanation: <center>Plot the amplitudes to verify correct scaling between GT and SXS waveforms
End of explanation
"""
|
seanjmcm/TrafficSign | Traffic_Sign_Classifier_sept.ipynb | mit | # Load pickled data
import pickle
import cv2 # for grayscale and normalize
# TODO: Fill this in based on where you saved the training and testing data
training_file ='traffic-signs-data/train.p'
validation_file='traffic-signs-data/valid.p'
testing_file = 'traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_trainLd, y_trainLd = train['features'], train['labels']
X_validLd, y_validLd = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
#X_trainLd=X_trainLd.astype(float)
#y_trainLd=y_trainLd.astype(float)
#X_validLd=X_validLd.astype(float)
#y_validLd=y_validLd.astype(float)
print("Xtrain shape : "+str(X_trainLd.shape)+" ytrain shape : "+str(y_trainLd.shape))
print("Xtrain shape : "+str(X_trainLd.shape)+" ytrain shape : "+str(y_trainLd.shape))
print("X_test shape : "+str(X_test.shape)+" y_test shape : "+str(y_test.shape))
from sklearn.model_selection import train_test_split
"""
Explanation: Self-Driving Car Engineer Nanodegree
Deep Learning
Project: Build a Traffic Sign Recognition Classifier
In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
Note: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
"File -> Download as -> HTML (.html). Include the finished document along with this notebook as your submission.
In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a write up template that can be used to guide the writing process. Completing the code template and writeup template will cover all of the rubric points for this project.
The rubric contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
Step 0: Load The Data
End of explanation
"""
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
import numpy as np
# TODO: Number of training examples
n_train = X_trainLd.shape[0]
# TODO: Number of validation examples
n_validation = X_validLd.shape[0]
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_trainLd.shape[1:4]
# TODO: How many unique classes/labels there are in the dataset.
#n_classes = n_train+n_validation+n_test -- this doesn't seem correct 43 in excel file
n_classes = 43
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
"""
Explanation: Step 1: Dataset Summary & Exploration
The pickled data is a dictionary with 4 key/value pairs:
'features' is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
'labels' is a 1D array containing the label/class id of the traffic sign. The file signnames.csv contains id -> name mappings for each id.
'sizes' is a list containing tuples, (width, height) representing the original width and height the image.
'coords' is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES
Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the pandas shape method might be useful for calculating some of the summary results.
Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
End of explanation
"""
import random
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
index = random.randint(0, len(X_trainLd))
image = X_trainLd[100] #squeeze : Remove single-dimensional entries from the shape of an array.
image = image.astype(float)
#normalise
def normit(img):
size = img.shape[2]
imagenorm = cv2.normalize(img, dst =image_shape, alpha=0, beta=25, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
image = img.astype(float)
norm = (image-128.0)/128.0
return norm
temp = normit(image)
plt.figure(figsize=(1,1))
plt.imshow(temp.squeeze())
"""
Explanation: Include an exploratory visualization of the dataset
Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
The Matplotlib examples and gallery pages are a great resource for doing visualizations in Python.
NOTE: It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
End of explanation
"""
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
import cv2
from sklearn.utils import shuffle
print("Test")
## xtrain
grey_X_train = np.zeros(shape=[X_trainLd.shape[0],X_trainLd.shape[1],X_trainLd.shape[2]])
norm_X_train = np.zeros(shape=[X_trainLd.shape[0],X_trainLd.shape[1],X_trainLd.shape[2],3])
norm_X_train = norm_X_train.astype(float)
X_train, y_train = shuffle(X_trainLd, y_trainLd)
shuff_X_train, shuff_y_train =X_train, y_train
X_valid, y_valid = X_validLd, y_validLd
i=0
for p in X_train:
t = normit(p)
norm_X_train[i] = t
i=i+1
print("after normalise")
##validate
norm_X_valid = np.zeros(shape=[X_validLd.shape[0],X_validLd.shape[1],X_validLd.shape[2],3])
norm_X_valid=norm_X_valid.astype(float)
i=0
for v in X_valid:
tv = normit(v)
#tempv = tv.reshape(32,32,1)
norm_X_valid[i] = tv
i=i+1
##test
norm_X_test=[]
norm_X_test = np.zeros(shape=[X_test.shape[0],X_test.shape[1],X_test.shape[2],3])
norm_X_test=norm_X_test.astype(float)
i=0
for testim in X_test:
tt = normit(testim)
norm_X_test[i] = tt
i=i+1
print("fin")
image22 = norm_X_train[110] ; imageb4 = X_train[110]; imagev=norm_X_valid[100]; imaget=norm_X_test[100]
plt.figure(figsize=(1,1))
plt.imshow(imagev.squeeze())
plt.figure(figsize=(1,1))
plt.imshow(imaget.squeeze()) #squeeze : Remove single-dimensional entries from the shape of an array
"""
Explanation: Step 2: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the German Traffic Sign Dataset.
The LeNet-5 implementation shown in the classroom at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
There are various aspects to consider when thinking about this problem:
Neural network architecture (is the network over or underfitting?)
Play around preprocessing techniques (normalization, rgb to grayscale, etc)
Number of examples per label (some have more than others).
Generate fake data.
Here is an example of a published baseline model on this problem. It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
Pre-process the Data Set (normalization, grayscale, etc.)
Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, (pixel - 128)/ 128 is a quick way to approximately normalize the data and can be used in this project.
Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
End of explanation
"""
### Define your architecture here.
### Feel free to use as many code cells as needed.
import tensorflow as tf
EPOCHS = 30
BATCH_SIZE = 128 #SMcM change to 256 from 128
#X_train=X_train.astype(float)
X_train=norm_X_train
#print(X_train[20])
#X_train=shuff_X_train
#X_valid=norm_X_valid
from tensorflow.contrib.layers import flatten
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0.0
sigma = 0.1 #SMcM changed from 0.1 to 0.2
# SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5,3, 6), mean = mu, stddev = sigma)) #SMcM depth cahnged to 3
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b #try same should be better (padding)
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
#conv1 = tf.nn.relu(conv1) #SMcM add an extra relu
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
print("model")
image22 = X_train[110] #squeeze : Remove single-dimensional entries from the shape of an array
print(norm_X_train.shape)
print(X_train.shape)
plt.figure(figsize=(1,1))
plt.imshow(image22.squeeze())
#print(image22)
"""
Explanation: Model Architecture
Train, Validate and Test the Model
End of explanation
"""
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
#Features and Labels
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
print("start")
#Training Pipeline
rate = 0.0025 # SMCM decreased rate to .0008 from 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
#Model Evaluation
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
#Train the Model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(norm_X_valid, y_valid)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './sign')
print("Model saved")
"""
Explanation: A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
End of explanation
"""
#evaluate the model
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
print("restored")
test_accuracy = evaluate(norm_X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
"""
Explanation: Evaluate the Model
evaluate the performance of the model on the test set.
End of explanation
"""
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
#http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset
#http://benchmark.ini.rub.de/Dataset/GTSRB_Online-Test-Images.zip
"""
Explanation: Step 3: Test a Model on New Images
To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
You may find signnames.csv useful as it contains mappings from the class id (integer) to the actual sign name.
Load and Output the Images
End of explanation
"""
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
"""
Explanation: Predict the Sign Type for Each Image
End of explanation
"""
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
"""
Explanation: Analyze Performance
End of explanation
"""
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
"""
Explanation: Output Top 5 Softmax Probabilities For Each Image Found on the Web
For each of the new images, print out the model's softmax probabilities to show the certainty of the model's predictions (limit the output to the top 5 probabilities for each image). tf.nn.top_k could prove helpful here.
The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
tf.nn.top_k will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. tk.nn.top_k is used to choose the three classes with the highest probability:
```
(5, 6) array
a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
0.12789202],
[ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
0.15899337],
[ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
0.23892179],
[ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
0.16505091],
[ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
0.09155967]])
```
Running it through sess.run(tf.nn.top_k(tf.constant(a), k=3)) produces:
TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
[ 0.28086119, 0.27569815, 0.18063401],
[ 0.26076848, 0.23892179, 0.23664738],
[ 0.29198961, 0.26234032, 0.16505091],
[ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
[0, 1, 4],
[0, 5, 1],
[1, 3, 5],
[1, 4, 3]], dtype=int32))
Looking just at the first row we get [ 0.34763842, 0.24879643, 0.12789202], you can confirm these are the 3 largest probabilities in a. You'll also notice [3, 0, 5] are the corresponding indices.
End of explanation
"""
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
"""
Explanation: Project Writeup
Once you have completed the code implementation, document your results in a project writeup using this template as a guide. The writeup can be in a markdown or pdf file.
Note: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
"File -> Download as -> HTML (.html). Include the finished document along with this notebook as your submission.
Step 4 (Optional): Visualize the Neural Network's State with Test Images
This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.
Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the LeNet lab's feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.
For an example of what feature map outputs look like, check out NVIDIA's results in their paper End-to-End Deep Learning for Self-Driving Cars in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.
<figure>
<img src="visualize_cnn.png" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above)</p>
</figcaption>
</figure>
<p></p>
End of explanation
"""
|
NEONScience/NEON-Data-Skills | tutorials-in-development/Python/neon_api/neon_api_02_downloading_observation_py.ipynb | agpl-3.0 | import requests
import json
import pandas as pd
SERVER = 'http://data.neonscience.org/api/v0/'
SITECODE = 'TEAK'
PRODUCTCODE = 'DP1.10003.001'
"""
Explanation: syncID:
title: "Downlaoding NEON Observation Data with Python"
description: ""
dateCreated: 2020-04-24
authors: Maxwell J. Burner
contributors: Donal O'Leary
estimatedTime:
packagesLibraries: requests, json, pandas
topics: api, data management
languagesTool: python
dataProduct: DP1.10003.001
code1:
tutorialSeries: python-neon-api-series
urlTitle: python-neon-api-02-downloading-observational
In this tutorial we will learn to download Observational Sampling (OS) data from the NEON API into the Python environment.
<div id="ds-objectives" markdown="1">
### Objectives
After completing this tutorial, you will be able to:
* Navigate a NEON API request from the *data/* endpoint
* Describe the naming conventions of NEON OS data files
* Understand how to download NEON observational data using the Python Pandas library
* Describe the basic components of a Pandas dataframe
### Install Python Packages
* **requests**
* **json**
* **numpy**
* **pandas**
We will not actually use the NumPy package in this tutorial; it is listed here because the Pandas package is built on top of NumPy, and requires that the latter be present.
</div>
In this tutorial we will learn how to download specific NEON data files into Python. We will specifically look at how to use the Pandas package to read in CSV files of observational data.
In the previous tutorial, we saw some of the data files containing information on land bird breeding counts. These are an example of NEON observational data. NEON has three basic types of data: Observational Sampling (OS), Instrumentation Sampling (IS), and Remote Sensing or Airborne Observation Platform data (AOP). The process for requesting data is about the same for all three, but downloading and navigating the data tends to be very different depending on which specific data product we are using.
Here we will discuss downloading observational data, as it tends to be the simplest to handle.
Libraries Downloaded
In addition to used requests and json packages again, we will use the Pandas package to read in the data. Pandas is a library that adds data frame objects to Python, based on the data frames of the R programming language; these offer a great way to store and manipulate tabular data.
End of explanation
"""
#Make Request
data_request = requests.get(SERVER+'data/'+PRODUCTCODE+'/'+SITECODE+'/'+'2018-06')
data_json = data_request.json()
#View names of files
for file in data_json['data']['files']:
print(file['name'])
"""
Explanation: Look up Data Files
We already know from the last tutorial that landbird breeding counts (DP1.10003.001) are available at the Lower Teakettle site for 2018-06. We can again make a request to see what files in particular are available.
End of explanation
"""
print(data_json['data']['files'][6]['name'])
"""
Explanation: Let's take a closer look at a file name.
End of explanation
"""
#Print names and URLs of files with birdcount data
for file in data_json['data']['files']:
if('countdata' in file['name']): #Show all files with 'countdata' in their name
print(file['name'],file['url'])
if('basic' in file['name']):
bird_count_url = file['url'] #save url of file with basic bird count data
"""
Explanation: The format for most NEON data product file names is:
NEON.D[two-digit domain number].[site code].[data product ID].[file-specific name].[date of file creation].[file extension]
So the file whose name we singled out is domain 17, Lower Teakettle Site, Breeding Landbird point counts (DP1.10003.001), brd_perpoint.2018-06.basic, created 2019-11-07 at 15:32:35 Universal Time. The file name brd_perpoint.2018-06.basic indicates that this is the 'basic' version of bird counts by point, observed in June 2018.
Bird counts and other observational data are usually kept in CSV files in the NEON database. Often the data for a particular month-site combination will be available in through two different .csv files that represent two different 'download packages'; a 'basic' package storing only the main measurements, and an 'expanded' package that also lists the uncertainties involved in each measurement. Let's save the URL for the basic count data CSV file.
End of explanation
"""
#Read bird count CSV data into a Pandas Dataframe
df_bird = pd.read_csv(bird_count_url)
"""
Explanation: Read file into Pandas Dataframe
There are a couple options for reading CSV files into Python. For files read directly from NEON's data repository, one popular option is the 'read_csv' function from the Pandas package. This function converts the contents of the target file into a pandas dataframe object, and has the added advantage of being able to read data files accessed through the web (Python has its own built-in package for reading CSV files, but this package can only read files present on your machine).
End of explanation
"""
#View the column names
df_bird.columns
#Print out dimensions of the new dataframe
print('Number of columns: ',df_bird.shape[1])
print('Number of Rows: ',df_bird.shape[0])
#Print out names and data types of dataframe columns
print(df_bird.dtypes)
"""
Explanation: Pandas is a popular Python package for data analysis and data manipulation. The package implements dataframe objects based on the dataframes used in the R programming language, and uses these objects for storing and manipulating tabular data.
A dataframe is a two-dimensional table of data, a grid built of rows and columns of values. Generally the columns correspond to the different variables being measured, while the rows correspond to each entry or measurement taken (in this case, each bird counted). Dataframes also have a header containing labels for each column, and an index containing labels for each row; both are 'index' objects stored as attributes of the dataframe object.
Python dataframes store their contents, header, and index in different attributes of the dataframe object. Other attributes contain metadata such as the overall shape of the dataframe, and the data type of each column.
You can find more about Pandas at their official site, which includes a tutorials page here.
End of explanation
"""
#View first five rows of dataframe using the 'head' method
df_bird.head(5)
"""
Explanation: Pandas dataframes classify data as integer, floating point (decimal numbers), or object; the last category ususally indicates data stored as strings, such as text labels or date-time data.
End of explanation
"""
#View names of files
for file in data_json['data']['files']:
if( (not('countdata' in file['name'])) & (not('perpoint' in file['name'])) ):
print(file['name'])
"""
Explanation: We can now manipulate this dataframe using the various methods and functions of the Pandas library.
Variable Information
Look again at the list of files available, specifically those that are NOT counting data.
End of explanation
"""
#Get variables information as pandas dataframe
for file in data_json['data']['files']:
if('variables' in file['name']):
df_variables = pd.read_csv(file['url'])
#View metadata and first few rows
print('Number of rows: ', df_variables.shape[0])
print('Number of columns: ',df_variables.shape[1])
print('Data Columns:\n')
print(df_variables.dtypes)
df_variables.head(5)
"""
Explanation: While the .zip files are packages containing multiple bird count data tables, the remaining files mostly serve to provide context to the data. The variables CSV file in particular contains a dataset with information on the variables used in the count data tables. This provides useful information such as units and defintions for each variable.
End of explanation
"""
#Subset to view only variables in the basic countdata table
df_variables[(df_variables['table'] == 'brd_countdata')&(df_variables['downloadPkg'] == 'basic')]
"""
Explanation: The table includes a column called 'table' indicating in which file a variable appears. We want to see information on the variables for the basic bird count table, since that is the table we downloaded. We can do this using comparisons and subsetting.
End of explanation
"""
|
tensorflow/docs-l10n | site/ko/agents/tutorials/3_policies_tutorial.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2021 The TF-Agents Authors.
End of explanation
"""
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
from tf_agents.networks import network
from tf_agents.policies import py_policy
from tf_agents.policies import random_py_policy
from tf_agents.policies import scripted_py_policy
from tf_agents.policies import tf_policy
from tf_agents.policies import random_tf_policy
from tf_agents.policies import actor_policy
from tf_agents.policies import q_policy
from tf_agents.policies import greedy_policy
from tf_agents.trajectories import time_step as ts
"""
Explanation: 정책
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/agents/tutorials/3_policies_tutorial"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/agents/tutorials/3_policies_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행하기</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/agents/tutorials/3_policies_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서 소스 보기</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/agents/tutorials/3_policies_tutorial.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드하기</a></td>
</table>
소개
강화 학습 용어에서 정책은 환경의 관찰 값을 행동 또는 행동에 대한 분포로 매핑합니다. TF-Agents에서 환경의 관찰 값은 명명된 튜플 TimeStep('step_type', 'discount', 'reward', 'observation')에 포함되며, 정책은 타임스텝을 행동 또는 행동 분포에 매핑합니다. 대부분의 정책은 timestep.observation을 사용하고 일부 정책은 timestep.step_type(예: 상태 저장 정책에서 에피소드 시작 시 상태 재설정)을 사용하지만, timestep.discount 및 timestep.reward는 일반적으로 무시됩니다.
정책은 다음과 같은 방식으로 TF-Agents의 다른 구성 요소와 관련됩니다. 대부분의 정책에는 TimeSteps로부터 행동 및/또는 행동에 대한 분포를 계산하는 신경망이 있습니다. 에이전트는 여러 가지 목적으로 하나 이상의 정책을 포함할 수 있습니다(예: 배포를 위해 훈련되는 기본 정책 및 데이터 수집을 위한 노이즈 정책). 정책을 저장/복원할 수 있으며 데이터 수집, 평가 등을 위해 에이전트와 독립적으로 사용할 수 있습니다.
일부 정책은 Tensorflow에서 작성하기가 더 쉽고(예: 신경망이 있는 정책) 다른 정책은 Python에서 작성하기가 더 쉽습니다(예: 작업 스크립트 수행). 따라서 TF 에이전트에서는 Python 및 Tensorflow 정책을 모두 허용합니다. 또한, TensorFlow로 작성된 정책은 Python 환경에서 사용해야 할 수도 있으며 그 반대의 경우도 마찬가지입니다. 예를 들어 TensorFlow 정책은 훈련에 사용되지만, 나중에 프로덕션 Python 환경에 배포됩니다. 이를 쉽게 하기 위해 Python과 TensorFlow 정책 간 변환용 래퍼를 제공합니다.
또 다른 흥미로운 정책 클래스는 특정 유형의 노이즈를 추가하거나 확률적 정책의 최대(greedy) 또는 엡실론 최대(epsilon-greedy) 버전을 만들고 여러 정책을 무작위로 혼합하는 등 특정 방식으로 주어진 정책을 수정하는 정책 래퍼입니다.
설정
tf-agents를 아직 설치하지 않은 경우, 다음을 실행합니다.
End of explanation
"""
class Base(object):
@abc.abstractmethod
def __init__(self, time_step_spec, action_spec, policy_state_spec=()):
self._time_step_spec = time_step_spec
self._action_spec = action_spec
self._policy_state_spec = policy_state_spec
@abc.abstractmethod
def reset(self, policy_state=()):
# return initial_policy_state.
pass
@abc.abstractmethod
def action(self, time_step, policy_state=()):
# return a PolicyStep(action, state, info) named tuple.
pass
@abc.abstractmethod
def distribution(self, time_step, policy_state=()):
# Not implemented in python, only for TF policies.
pass
@abc.abstractmethod
def update(self, policy):
# update self to be similar to the input `policy`.
pass
@property
def time_step_spec(self):
return self._time_step_spec
@property
def action_spec(self):
return self._action_spec
@property
def policy_state_spec(self):
return self._policy_state_spec
"""
Explanation: Python 정책
Python 정책의 인터페이스는 policies/py_policy.PyPolicy에 정의되어 있습니다. 주요 메서드는 다음과 같습니다.
End of explanation
"""
action_spec = array_spec.BoundedArraySpec((2,), np.int32, -10, 10)
my_random_py_policy = random_py_policy.RandomPyPolicy(time_step_spec=None,
action_spec=action_spec)
time_step = None
action_step = my_random_py_policy.action(time_step)
print(action_step)
action_step = my_random_py_policy.action(time_step)
print(action_step)
"""
Explanation: 가장 중요한 메서드는 환경의 관찰 값을 포함하는 time_step을 다음 속성이 포함된 PolicyStep 명명된 튜플에 매핑하는 action(time_step)입니다.
action: 환경에 적용할 행동
state: 다음 행동 호출에 제공될 정책의 상태(예: RNN 상태)
info: 행동 로그 확률과 같은 선택적 보조 정보
time_step_spec 및 action_spec은 입력 타임스텝 및 출력 행동의 사양입니다. 정책에는 일반적으로 상태 저장 정책에서 상태를 재설정하는 데 사용되는 reset 함수가 있습니다. update(new_policy) 함수는 new_policy 쪽으로 self를 업데이트합니다.
이제 Python 정책의 몇 가지 예제를 살펴보겠습니다.
예제 1: 임의 Python 정책
PyPolicy의 간단한 예제는 주어진 불연속/연속 action_spec에 대한 무작위 행동을 생성하는 RandomPyPolicy입니다. 입력 time_step은 무시됩니다.
End of explanation
"""
action_spec = array_spec.BoundedArraySpec((2,), np.int32, -10, 10)
action_script = [(1, np.array([5, 2], dtype=np.int32)),
(0, np.array([0, 0], dtype=np.int32)), # Setting `num_repeats` to 0 will skip this action.
(2, np.array([1, 2], dtype=np.int32)),
(1, np.array([3, 4], dtype=np.int32))]
my_scripted_py_policy = scripted_py_policy.ScriptedPyPolicy(
time_step_spec=None, action_spec=action_spec, action_script=action_script)
policy_state = my_scripted_py_policy.get_initial_state()
time_step = None
print('Executing scripted policy...')
action_step = my_scripted_py_policy.action(time_step, policy_state)
print(action_step)
action_step= my_scripted_py_policy.action(time_step, action_step.state)
print(action_step)
action_step = my_scripted_py_policy.action(time_step, action_step.state)
print(action_step)
print('Resetting my_scripted_py_policy...')
policy_state = my_scripted_py_policy.get_initial_state()
action_step = my_scripted_py_policy.action(time_step, policy_state)
print(action_step)
"""
Explanation: 예제 2: 스크립팅된 Python 정책
스크립팅된 정책은 (num_repeats, action) 튜플의 목록으로 표시되는 행동의 스크립트를 재생합니다. action 함수가 호출될 때마다 지정된 반복 횟수가 완료될 때까지 목록에서 다음 행동을 반환한 후 목록의 다음 행동으로 이동합니다. reset 메서드를 호출하여 목록의 처음부터 실행을 시작할 수 있습니다.
End of explanation
"""
action_spec = tensor_spec.BoundedTensorSpec(
(2,), tf.float32, minimum=-1, maximum=3)
input_tensor_spec = tensor_spec.TensorSpec((2,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
my_random_tf_policy = random_tf_policy.RandomTFPolicy(
action_spec=action_spec, time_step_spec=time_step_spec)
observation = tf.ones(time_step_spec.observation.shape)
time_step = ts.restart(observation)
action_step = my_random_tf_policy.action(time_step)
print('Action:')
print(action_step.action)
"""
Explanation: TensorFlow 정책
TensorFlow 정책은 Python 정책과 같은 인터페이스를 따릅니다. 몇 가지 예제를 살펴보겠습니다.
예제 1: 임의 TF 정책
RandomTFPolicy는 주어진 불연속/연속 action_spec에 따라 무작위 행동을 생성하는 데 사용될 수 있습니다. 입력time_step은 무시됩니다.
End of explanation
"""
class ActionNet(network.Network):
def __init__(self, input_tensor_spec, output_tensor_spec):
super(ActionNet, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name='ActionNet')
self._output_tensor_spec = output_tensor_spec
self._sub_layers = [
tf.keras.layers.Dense(
action_spec.shape.num_elements(), activation=tf.nn.tanh),
]
def call(self, observations, step_type, network_state):
del step_type
output = tf.cast(observations, dtype=tf.float32)
for layer in self._sub_layers:
output = layer(output)
actions = tf.reshape(output, [-1] + self._output_tensor_spec.shape.as_list())
# Scale and shift actions to the correct range if necessary.
return actions, network_state
"""
Explanation: 예제 2: Actor 정책
Actor 정책은 time_steps를 행동에 매핑하는 네트워크 또는 time_steps를 행동에 대한 분포에 매핑하는 네트워크를 사용하여 생성됩니다.
행동 네트워크 사용하기
다음과 같이 네트워크를 정의합니다.
End of explanation
"""
input_tensor_spec = tensor_spec.TensorSpec((4,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
action_spec = tensor_spec.BoundedTensorSpec((3,),
tf.float32,
minimum=-1,
maximum=1)
action_net = ActionNet(input_tensor_spec, action_spec)
my_actor_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=action_net)
"""
Explanation: TensorFlow에서 대부분의 네트워크 레이어는 배치 연산을 위해 설계되었으므로 입력 time_steps가 배치 처리되고 네트워크의 출력도 배치 처리됩니다. 또한, 네트워크는 주어진 action_spec의 올바른 범위에서 행동을 생성할 책임이 있습니다. 일반적으로, 예를 들어 최종 레이어에 대한 tanh 활성화를 사용하여 [-1, 1]에서의 행동을 생성한 다음 이를 입력 action_spec과 같은 올바른 범위로 조정하고 이동하면 됩니다(예: tf_agents/agents/ddpg/networks.actor_network()).
이제 위의 네트워크를 사용하여 actor 정책을 만들 수 있습니다.
End of explanation
"""
batch_size = 2
observations = tf.ones([2] + time_step_spec.observation.shape.as_list())
time_step = ts.restart(observations, batch_size)
action_step = my_actor_policy.action(time_step)
print('Action:')
print(action_step.action)
distribution_step = my_actor_policy.distribution(time_step)
print('Action distribution:')
print(distribution_step.action)
"""
Explanation: time_step_spec 다음에 오는 모든 time_steps 배치에 적용할 수 있습니다.
End of explanation
"""
class ActionDistributionNet(ActionNet):
def call(self, observations, step_type, network_state):
action_means, network_state = super(ActionDistributionNet, self).call(
observations, step_type, network_state)
action_std = tf.ones_like(action_means)
return tfp.distributions.MultivariateNormalDiag(action_means, action_std), network_state
action_distribution_net = ActionDistributionNet(input_tensor_spec, action_spec)
my_actor_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=action_distribution_net)
action_step = my_actor_policy.action(time_step)
print('Action:')
print(action_step.action)
distribution_step = my_actor_policy.distribution(time_step)
print('Action distribution:')
print(distribution_step.action)
"""
Explanation: 위의 예제에서는 행동 텐서를 생성하는 행동 네트워크를 사용하여 정책을 만들었습니다. 이 경우, policy.distribution(time_step)은 policy.action(time_step)의 출력 주위에서의 결정적(델타) 분포입니다. 확률적 정책을 생성하는 한 가지 방법은 행동에 노이즈를 추가하는 정책 래퍼로 actor 정책을 래핑하는 것입니다. 또 다른 방법은 아래와 같이 행동 네트워크 대신 행동 분포 네트워크를 사용하여 actor 정책을 만드는 것입니다.
행동 분포 네트워크 사용하기
End of explanation
"""
input_tensor_spec = tensor_spec.TensorSpec((4,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
action_spec = tensor_spec.BoundedTensorSpec((),
tf.int32,
minimum=0,
maximum=2)
num_actions = action_spec.maximum - action_spec.minimum + 1
class QNetwork(network.Network):
def __init__(self, input_tensor_spec, action_spec, num_actions=num_actions, name=None):
super(QNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
self._sub_layers = [
tf.keras.layers.Dense(num_actions),
]
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self._sub_layers:
inputs = layer(inputs)
return inputs, network_state
batch_size = 2
observation = tf.ones([batch_size] + time_step_spec.observation.shape.as_list())
time_steps = ts.restart(observation, batch_size=batch_size)
my_q_network = QNetwork(
input_tensor_spec=input_tensor_spec,
action_spec=action_spec)
my_q_policy = q_policy.QPolicy(
time_step_spec, action_spec, q_network=my_q_network)
action_step = my_q_policy.action(time_steps)
distribution_step = my_q_policy.distribution(time_steps)
print('Action:')
print(action_step.action)
print('Action distribution:')
print(distribution_step.action)
"""
Explanation: 위의 예제에서 행동은 주어진 행동 사양 [-1, 1]의 범위로 잘립니다. ActorPolicy의 생성자 인수는 기본적으로 clip = True이기 때문입니다. 이 값을 false로 설정하면 네트워크에서 생성된 잘리지 않은 행동이 반환됩니다.
예를 들어, stochastic_policy.distribution().mode()를 행동으로 선택하는, 그리고 이 최대 행동 주변의 결정론적/델타 분포를 distribution()으로 선택하는 GreedyPolicy 래퍼를 사용하여 확률적 정책을 결정론적 정책으로 변환할 수 있습니다.
예제 3: Q 정책
Q 정책은 DQN과 같은 에이전트에서 사용되며 각 불연속 행동에 대한 Q 값을 예측하는 Q 네트워크를 기반으로 합니다. 주어진 타임스텝에서 Q 정책의 행동 분포는 q 값을 로짓으로 사용하여 작성된 범주형 분포입니다.
End of explanation
"""
my_greedy_policy = greedy_policy.GreedyPolicy(my_q_policy)
action_step = my_greedy_policy.action(time_steps)
print('Action:')
print(action_step.action)
distribution_step = my_greedy_policy.distribution(time_steps)
print('Action distribution:')
print(distribution_step.action)
"""
Explanation: 정책 래퍼
정책 래퍼를 사용하여 주어진 정책을 래핑하고 수정할 수 있습니다(예: 노이즈 추가). 정책 래퍼는 정책 (Python/TensorFlow)의 서브 클래스이므로 다른 정책과 마찬가지로 사용할 수 있습니다.
예제: 최대 정책(Greedy Policy)
최대 래퍼(greedy wrapper)를 사용하여 distribution()을 구현하는 모든 TensorFlow 정책을 래핑할 수 있습니다. GreedyPolicy.action()은 wrapped_policy.distribution().mode()를 반환하고, GreedyPolicy.distribution()은 GreedyPolicy.action() 주위의 결정적/델타 분포입니다.
End of explanation
"""
|
emjotde/UMZ | Wyklady/08/Konkursy2.v3.ipynb | cc0-1.0 | def runningMeanFast(x, N):
return np.convolve(x, np.ones((N,))/N, mode='valid')
def powerme(x1,x2,n):
X = []
for m in range(n+1):
for i in range(m+1):
X.append(np.multiply(np.power(x1,i),np.power(x2,(m-i))))
return np.hstack(X)
def safeSigmoid(x, eps=0):
y = 1.0/(1.0 + np.exp(-x))
# przytnij od dolu i gory
if eps > 0:
y[y < eps] = eps
y[y > 1 - eps] = 1 - eps
return y
def h(theta, X, eps=0.0):
return safeSigmoid(X*theta, eps)
def J(h,theta,X,y, lamb=0):
m = len(y)
f = h(theta, X, eps=10**-7)
j = -np.sum(np.multiply(y, np.log(f)) +
np.multiply(1 - y, np.log(1 - f)), axis=0)/m# \
#+ lamb/(2*m) * np.sum(np.power(theta[1:],2))
return j
def dJ(h,theta,X,y,lamb=0):
g = 1.0/y.shape[0]*(X.T*(h(theta,X)-y))
#g[1:] += lamb/float(y.shape[0]) * theta[1:]
return g
def SGD(h, fJ, fdJ, theta, X, Y,
alpha=0.001, maxEpochs=1.0, batchSize=100,
adaGrad=False, logError=True, validate=0.0, valStep=100, lamb=0):
errorsX, errorsY = [], []
errorsVX, errorsVY = [], []
XT, YT = X, Y
if validate > 0:
mv = int(X.shape[0] * validate)
XV, YV = X[:mv], Y[:mv]
XT, YT = X[mv:], Y[mv:]
m, n = XT.shape
start, end = 0, batchSize
maxSteps = (m * float(maxEpochs)) / batchSize
if adaGrad:
hgrad = np.matrix(np.zeros(n)).reshape(n,1)
for i in range(int(maxSteps)):
XBatch, YBatch = XT[start:end,:], YT[start:end,:]
grad = fdJ(h, theta, XBatch, YBatch, lamb=lamb)
if adaGrad:
hgrad += np.multiply(grad, grad)
Gt = 1.0 / (10**-7 + np.sqrt(hgrad))
theta = theta - np.multiply(alpha * Gt, grad)
else:
theta = theta - alpha * grad
if logError:
errorsX.append(float(i*batchSize)/m)
errorsY.append(fJ(h, theta, XBatch, YBatch).item())
if validate > 0 and i % valStep == 0:
errorsVX.append(float(i*batchSize)/m)
errorsVY.append(fJ(h, theta, XV, YV).item())
if start + batchSize < m:
start += batchSize
else:
start = 0
end = min(start + batchSize, m)
return theta, (errorsX, errorsY, errorsVX, errorsVY)
def classifyBi(theta, X):
prob = h(theta, X)
return prob
n = 6
sgd = True
data = np.matrix(np.loadtxt("ex2data2.txt",delimiter=","))
np.random.shuffle(data)
X = powerme(data[:,0], data[:,1],n)
Y = data[:,2]
pyplot.figure(figsize=(16,8))
pyplot.subplot(121)
pyplot.scatter(X[:,2].tolist(),
X[:,1].tolist(),
c=Y.tolist(),
s=100, cmap=pyplot.cm.get_cmap('prism'));
if sgd:
theta = np.matrix(np.zeros(X.shape[1])).reshape(X.shape[1],1)
thetaBest, err = SGD(h, J, dJ, theta, X, Y, alpha=1, adaGrad=True, maxEpochs=2500, batchSize=100,
logError=True, validate=0.25, valStep=1, lamb=0)
xx, yy = np.meshgrid(np.arange(-1.5, 1.5, 0.02),
np.arange(-1.5, 1.5, 0.02))
l = len(xx.ravel())
C = powerme(xx.reshape(l,1),yy.reshape(l,1),n)
z = classifyBi(thetaBest, C).reshape(np.sqrt(l),np.sqrt(l))
pyplot.contour(xx, yy, z, levels=[0.5], lw=3);
pyplot.ylim(-1,1.2);
pyplot.xlim(-1,1.2);
pyplot.legend();
pyplot.subplot(122)
pyplot.plot(err[0],err[1], lw=3, label="Training error")
pyplot.plot(err[2],err[3], lw=3, label="Validation error");
pyplot.legend()
pyplot.ylim(0.2,0.8);
"""
Explanation: Jak wygrać konkursy 2
Bagging - Uzupełnienie
Ważenie podczas głosowania/uśredniania
W Bagging, losujemy $m$ przykładów z powtorzeniami.
Prawie 40% danych nie jest wykorzystywanych, ponieważ $\lim_{n \rightarrow \infty}\left(1-\frac{1}{n}\right)^n = e^{-1} \approx 0.368 $.
Możemy te dany wykorzystać jako zestaw walidacyjny i obliczyć na nim błąd $J_w(\theta)$.
Wtedy gdy mamy $N$ klasyfikatorów, dla $i$-tego klasyfikatora obliczamy $w_i$ (dlaczego $-J$?):
$$ w_i = \dfrac{\exp(-J_w(\theta_i))}{
\sum_{j=1}^N \exp(-J_w(\theta_j))} $$
Klasyfikacja przez ważone głosowanie (zbiór klas $C$, $y_i$ to odpowiedź $i$-tego klasyfikatora):
$$y = \mathop{\mathrm{argmax}}{c \in C} \sum{i=1}^N w_i I(c = y_i) $$
gdzie
$$I(A) = \left{\begin{array}{cl}1 & \textrm{gdy zachodzi zdarzenie A}\ 0 & \textrm{wpp.}\end{array}\right.$$
Klasyfikacja przez obliczenie ważonych średnich prawdopodobieństw (zbiór klas $C$, $y_i$ to odpowiedź $i$-tego klasyfikatora):
$$y = \mathop{\mathrm{argmax}}{c \in C} \dfrac{w_ip{c,i}}{\sum_{j=1}^{N} w_j p_{c,j}} $$
gdzie $p_{c,i}$ jest prawdopodobieństwem wyboru klasy $c$ przez $i$-ty klasyfikator.
Ważony bagging na MNIST
Do samodzielnego sprawdzenia w ramach zadań bonusowych na ćwiczeniach (20 pkt.).
Zjawisko nadmiernego dopasowania i regularyzacja
End of explanation
"""
def J(h,theta,X,y,lamb=0):
m = len(y)
f = h(theta, X, eps=10**-7)
j = -np.sum(np.multiply(y, np.log(f)) +
np.multiply(1 - y, np.log(1 - f)), axis=0)/m \
+ lamb/(2*m) * np.sum(np.power(theta[1:] ,2))
return j
def dJ(h,theta,X,y,lamb=0):
m = float(y.shape[0])
g = 1.0/y.shape[0]*(X.T*(h(theta,X)-y))
g[1:] += lamb/m * theta[1:]
return g
n = 6
lam = 0.01
data = np.matrix(np.loadtxt("ex2data2.txt",delimiter=","))
np.random.shuffle(data)
X = powerme(data[:,0], data[:,1],n)
Y = data[:,2]
theta = np.matrix(np.zeros(X.shape[1])).reshape(X.shape[1],1)
thetaBest, err = SGD(h, J, dJ, theta, X, Y, alpha=1, adaGrad=True, maxEpochs=2500, batchSize=100,
logError=True, validate=0.25, valStep=1, lamb=lam)
xx, yy = np.meshgrid(np.arange(-1.5, 1.5, 0.02),
np.arange(-1.5, 1.5, 0.02))
l = len(xx.ravel())
C = powerme(xx.reshape(l,1),yy.reshape(l,1),n)
z = classifyBi(thetaBest, C).reshape(np.sqrt(l),np.sqrt(l))
pyplot.figure(figsize=(16,8))
pyplot.subplot(121)
pyplot.scatter(X[:,2].tolist(),
X[:,1].tolist(),
c=Y.tolist(),
s=100, cmap=pyplot.cm.get_cmap('prism'));
pyplot.contour(xx, yy, z, levels=[0.5], lw=3);
pyplot.ylim(-1,1.2);
pyplot.xlim(-1,1.2);
pyplot.legend();
pyplot.subplot(122)
pyplot.plot(err[0],err[1], lw=3, label="Training error")
pyplot.plot(err[2],err[3], lw=3, label="Validation error");
pyplot.legend()
pyplot.ylim(0.2,0.8);
"""
Explanation: Regularyzacja
Metoda zapobiegania zjawiskom nadmiernego dopasowania (overfitting)
Kara za ekstremalne wartości parametrów $\theta$
Najbardziej popularne metody to $L_1$ i $L_2$ ($L_p = ||x||^p_p = \sum_{i=0}^n |x_i|^p$)
$L_1$ nie jest różniczkowalna, w metodach opartach na gradientach stosuje się raczej $L_2$.
Regularyzacja dla regresji logistycznej
Funkcja kosztu
$$\small
\begin{array}{rl}
J(\theta)=&-\dfrac{1}{m} [\sum_{i=1}^{m} y^{(i)} \log h_\theta(x^{(i)})+ (1-y^{(i)}) \log (1-h_\theta(x^{(i)}))]\ &\color{red}{+ \dfrac{\lambda}{2m}\sum_{j=1}^{n}\theta^2_j}
\end{array}
$$
Gradient
$$\small
\begin{array}{llll}
\dfrac{\partial J(\theta)}{\partial \theta_0} &=& \dfrac{1}{m}\displaystyle\sum_{i=1}^m (h_{\theta}(x^{(i)})-y^{(i)})x^{(i)}0 & \textrm{gdy $j = 0$ }\
\dfrac{\partial J(\theta)}{\partial \theta_j} &=& \dfrac{1}{m}\displaystyle\sum{i=1}^m (h_{\theta}(x^{(i)})-y^{(i)})x^{(i)}_j \color{red}{+ \dfrac{\lambda}{m}\theta_j} & \textrm{gdy $j \ge 1 $} \
\end{array}
$$
Implementacja
End of explanation
"""
%matplotlib inline
import numpy as np
from matplotlib import pyplot
def h(theta, X):
return X.dot(theta)
def norm(X,Y):
return np.linalg.pinv(X.T.dot(X)).dot(X.T).dot(Y)
def rho(a,b):
return np.sqrt((a-b)**2)
def kneighbors(k, t, x, y):
d = rho(t, x)
nn = np.argsort(d)[:k]
return y[nn]
def Gauss(x,t,l=.01):
t = rho(x,t)/l
return 1./np.sqrt(2*np.pi)*np.exp(-1./2.*t**2)
def Kernel(K,t,x,y,l=.01):
return np.array([np.sum(K(x,t1,l)*y)/np.sum(K(x,t1,l), axis=1) for t1 in t])
def J(Yp,Y):
return np.sqrt(1.0/(2*len(y))*(Yp-Y).dot(Yp-Y))
def true(x):
return 2*x*np.sin(4*x)
m = 300
x = np.linspace(0, 5, m)
ytrue = true(x)
y = ytrue + 3*np.random.randn(m)
t = np.linspace(0, 5, m)
ttrue = true(t)
"""
Explanation: Różne twarze (nie)dopasowania
<img style="margin:auto" width="90%" src="fit.png"/>
Bias and Variance
Bias (błąd systematyczny):
Błąd wynikający z błędnych założeń co do algorytmu uczącego się.
Duży błąd systematyczny powoduje brak dopasowania.
Variance (wariancja):
Błąd wynikający z nadwrażliwości na małe fluktuacje w zestawie uczącym.
Wysoka wariancja może spowodować nadmierne dopasowanie (modelująć szum a nie sygnał).
<img style="margin:auto" width="60%" src="bias2.png"/>
<img style="margin:auto" width="60%" src="curves.jpg"/>
Poznaliśmy już inne sposoby (niż regularyzacja) na błędy bias i variance?
Jaki jest efekt zwiększenia liczby cech?
Wczesne przerwanie trenowania na podstawie obserwacji blędu na danych walidujących (early stopping) chroni przed "bias" czy "variance"?
Metody walidacji, szczególnie walidacji krzyżowej chronią przed "bias" czy "variance"?
Metody ensemble chronią przed "bias" czy "variance"?
3. Urozmaicenie metod
W metodach Ensemble stwierdziliśmy, że różnorodność pomaga (bootstrapping)
Warto zatem sprawdzić kombinacje zupełnie różnych metod (mówiliśmy głównie o metodach parametrycznych)
Poznaliśmy już naiwne klasyfikatory Bayesa (inna metoda nieparametryczna)
Jako kolejną metodę nieparametryczną, warto omówić metodę $k$-najbliższych sąsiadów
Metoda $k$-najbliższych sąsiadów
<img style="margin-left: auto; margin-right: auto" width="50%" src="https://upload.wikimedia.org/wikipedia/commons/e/e7/KnnClassification.svg"/>
* ang. $k$-nearest-neighbours ($k$-NN) algorithm/method
Zarys metody
Zestaw uczący przechowujemy w całości.
Dla przykładu testowego liczymy odległośc do wszystkich przykładów w zbiorze uczącym.
Wybieramy $k$ elementów o najmniejszej odległości ($k$ najbliższych sąsiadów!).
Akumulujemy wyniki ze zbioru $k$ sąsiadów.
Uwaga: Metoda kNN jest bardzo wrażliwa na różnice wielkości cech. Trzeba normalizować.
Dyskusja:
* Jest to metoda klasyfikacji czy regresji?
* Jakie są oczywiste wady i zalety (MNIST)?
Dobór $k$
Czynnik wygładzający, im większe, tym więcej sasiadów bierze udział w głosowaniu/średniej.
Dyskusja:
* Jakie wartości może przyjąć $k$?
* Jakie wartości wydają się sensowne?
* W jaki sposób dobieramy $k$?
End of explanation
"""
n = 11 #stopień wielomianu
show1 = True
xtuple = [x**i for i in range(n+1)]
xreg = np.vstack(xtuple).T
theta = norm(xreg,y)
ttuple = [t**i for i in range(n+1)]
treg = np.vstack(ttuple).T
pyplot.figure(figsize=(16,10))
if show1:
pyplot.plot(x, ytrue, label="originał", lw=3)
pyplot.scatter(x,y, s=40)
if n > 0:
predict = h(theta, treg)
pyplot.plot(t, predict, label="reg. lin. n=" + str(n), lw=3, color="red")
print("RMSE: ", J(predict,ttrue))
pyplot.xlim(0,5)
pyplot.legend();
"""
Explanation: Regresja liniowa (wielomianowa)
End of explanation
"""
k = 16
show2 = True
pyplot.figure(figsize=(16,10))
if show2:
pyplot.plot(x, ytrue, label="originał", lw=3)
pyplot.scatter(x,y, s=40)
if k > 0:
predict = np.array([np.mean(kneighbors(k, i, x, y)) for i in t])
#predict = Kernel(D1,t,x,y,0.1)
pyplot.plot(t, predict, label="k="+str(k), lw=3, color="red")
print("RMSE: ", J(predict,ttrue))
pyplot.xlim(0,5)
pyplot.legend();
"""
Explanation: Regresja za pomocą k-NN
End of explanation
"""
k = 16
data = np.matrix(np.loadtxt("ex2data2.txt",delimiter=","))
#np.random.shuffle(data)
n = 1
X = powerme(data[:,0], data[:,1],n)
Y = data[:,2]
def rho(a,b):
p = np.sqrt(np.sum(np.power((a-b),2), axis=1))
return p
def kneighbors(k, t, x, y):
d = rho(t, x)
nn = np.argsort(d.ravel()).ravel()[0,:k]
return np.array(y[nn]).reshape(k,).astype(int)
xx, yy = np.meshgrid(np.arange(-1.5, 1.5, 0.02),
np.arange(-1.5, 1.5, 0.02))
l = len(xx.ravel())
C = powerme(xx.reshape(l,1),yy.reshape(l,1),n)
Z = [np.argmax(np.bincount(kneighbors(k,c,X,Y))) for c in C]
pyplot.figure(figsize=(10,10))
pyplot.scatter(X[:,2].tolist(),
X[:,1].tolist(),
c=Y.tolist(),
s=100, cmap=pyplot.cm.get_cmap('prism'));
pyplot.contour(xx, yy, np.array(Z).reshape(np.sqrt(l),np.sqrt(l)), levels=[0.5], lw=3);
pyplot.ylim(-1,1.2);
pyplot.xlim(-1,1.2);
"""
Explanation: Regresja: Ceny mieszkań w Poznaniu
Do samodzielnego sprawdzenia w ramach zadań bonusowych na ćwiczeniach (20 pkt.)
Klasyfikacja kNN
End of explanation
"""
xg = np.linspace(-3,3,300)
yg = 1/np.sqrt(2*np.pi)*np.exp(-1./2.*xg**2)
pyplot.figure(figsize=(12,6))
pyplot.plot(xg,yg,lw=3);
n = 6
k = 16
l = 0.1
show3 = False
pyplot.figure(figsize=(16,10))
if show3:
pyplot.plot(x, ytrue, label="originał", lw=3)
pyplot.scatter(x,y, s=40)
if k > 0:
predict1 = np.array([np.mean(kneighbors(k, i, x, y)) for i in t])
pyplot.plot(t, predict1, label="k="+str(k), lw=3, color="green")
if l > 0:
predict2 = Kernel(D1,t,x,y,l)
pyplot.plot(t, predict2, label="lambda=" + str(l), lw=3, color="red")
print("RMSE: ", J(predict2,ttrue))
pyplot.xlim(0,5)
pyplot.legend();
"""
Explanation: Klasyfikacja: MNIST
Do samodzielnego sprawdzenia w ramach zadań bonusowych na ćwiczeniach (20 pkt.)
Miary odległości
Odległość euklidesowa: $$\rho_1(x,x^{\prime}) = \sqrt{\sum_{i=1}^n \left(x_i - x_i^{\prime}\right)^2}$$
Ważona odległość euklidesowa (skąd wziąć wagi?): $$\rho_2(x,x^{\prime}) = \sqrt{\sum_{i=1}^n \dfrac{1}{w_i^2}\left(x_i - x_i^{\prime}\right)^2}$$
Wiele innych ...
Dyskusja
* Co robimy w przypadku cech nienumerycznych?
Uczenie metryk
Wikipedia: Large margin nearest neighbor
Trenujemy macierz $M$ (gdy $M=I$, to $\rho$ odległość euklidesowa):
$$ \rho(x, x^{\prime}) = \left((x - x^{\prime})^{T} M (x - x^{\prime})\right)^{\frac{1}{2}} $$
<img style="margin-left:auto; margin-right:auto" width="70%" src="https://upload.wikimedia.org/wikipedia/commons/thumb/a/aa/Lmnn.png/1920px-Lmnn.png"/>
Ważenie sasiądów według odległości
<img style="margin-left: auto; margin-right: auto" width="50%" src="https://upload.wikimedia.org/wikipedia/commons/e/e7/KnnClassification.svg"/>
Rozpatrzmy większy okrąg (kreskowany). Czy faktycznie powinniśmy wybrać klasę niebieską?
Dla zbioru $K(x) = { (x^{(1)},y^{(1)}), \dots, (x^{(k)},y^{(k)}) }$ zawierającym $k$ najbliższych sąsiadów $x$ obliczamy:
Wagi (widzieliśmy to już gdzieś?):
$$ W(x, x^{(i)}) = \dfrac{\exp(-\rho(x,x^{(i)}))}{
\sum_{j=1}^k \exp(-\rho(x,x^{(j)}))} $$
Ważona regresja:
$$y = \sum_{i=1}^k W(x, x^{(i)}) y^{(i)}$$
Ważona klasyfikacja (zbiór klas $C$):
$$y = \mathop{\mathrm{argmax}}{c \in C} \sum{i=1}^k W(x, x^{(i)}) I(c = y^{(i)}) $$
Metody kernelowe
Ważymy wszystkie przykłady
$$ y = \dfrac{\sum_{i=1}^m K(x^{(i)},x)y^{(i)}}{\sum_{i=1}^mK(x^{(i)},x)} $$
$$ K_\lambda(x,y) = D\left(\dfrac{\rho(x,y)}{\lambda}\right)$$
Przykładowy kernel $D$ (jaka to funkcja?):
$$ D(t) = \frac{1}{\sqrt{2\pi}}e^{-\frac{1}{2}t^2} $$
Kernel Gaussowski
End of explanation
"""
|
bjsmith/motivation-simulation | test-jupyter-widgets-clone3.ipynb | gpl-3.0 | from matplotlib.pyplot import figure, plot, xlabel, ylabel, title, show
from IPython.display import display
text = widgets.FloatText()
floatText = widgets.FloatText(description='MyField',min=-5,max=5)
floatSlider = widgets.FloatSlider(description='MyField',min=-5,max=5)
#https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Basics.html
float_link = widgets.jslink((floatText, 'value'), (floatSlider, 'value'))
"""
Explanation: Basic plot example
End of explanation
"""
floatSlider.value=1
txtArea = widgets.Text()
display(txtArea)
myb= widgets.Button(description="234")
def add_text(b):
txtArea.value = txtArea.value + txtArea.value
myb.on_click(add_text)
display(myb)
"""
Explanation: Here we will set the fields to one of several values so that we can see pre-configured examples.
End of explanation
"""
|
biof-309-python/BIOF309-2016-Fall | Week_03/Week03 - 02 - Week 2 Homework Review.ipynb | mit | # This sequence is the first 100 nucleotides of the Influenza H1N1 Virus segment 8
flu_ns1_seq = 'GTGACAAAGACATAATGGATCCAAACACTGTGTCAAGCTTTCAGGTAGATTGCTTTCTTTGGCATGTCCGCAAACGAGTTGCAGACCAAGAACTAGGTGA'
"""
Explanation: Week 2 Homework - Review
We have seen this week how to print and manipulate text string in python. Lets use the skills we have learned to write a program to calculate the GC percentage of a DNA sequence. Recall that the GC percentage of a DNA sequence can be a sign that we are looking at a gene.
Pseudocode
Pseudocode is the term used to describe a draft outline of a program written in plain English (or whatever language you write it in :-) ). We use pseudocode to discuss the functionality of the program as well as key elements in the program. Starting a program by using pseudocode can help to get your logic down quickly without having to be concerned with hte exact details or syntax of the programming language.
Write a python program to calculate a GC percentage
End of explanation
"""
from __future__ import division
# Write your code here (if you wish)
flu_ns1_seq_upper = flu_ns1_seq.upper()
# Count the number of "C"s in the above sequence
c_count = flu_ns1_seq_upper.count('C')
# Count the number of "G"s in the above sequence
g_count = flu_ns1_seq_upper.count('G')
# Add "C" and "G" counts together
g_c_count = c_count + g_count
# Count the total number of nucleotides in the sequence
sequence_length = len(flu_ns1_seq_upper)
# Divide the total number of "C" and "G" nucleotides by the total number of nucleotides
gc_percentage = g_c_count / sequence_length
# Print the percentage
print(gc_percentage)
"""
Explanation: Pseudocode:
- Count the number of "C"s in the above sequence
- Count the number of "G"s in the above sequence
- Add "C" and "G" counts together
- Count the total number of nucleotides in the sequence
- Divide teh total number of "C" and "G" nucleotides by the total number of nucleotides
- Print the percentage
NOTE: Please get into teh good habit of commenting your code and describing what you are going to do or are doing. There must be at least one comment in your code.
End of explanation
"""
%%writefile GC_calculator.py
from __future__ import division
flu_ns1_seq = 'GTGACAAAGACATAATGGATCCAAACACTGTGTCAAGCTTTCAGGTAGATTGCTTTCTTTGGCATGTCCGCAAACGAGTTGCAGACCAAGAACTAGGTGA'
# Write your code here (if you wish)
flu_ns1_seq_upper = flu_ns1_seq.upper()
# Count the number of "C"s in the above sequence
c_count = flu_ns1_seq_upper.count('C')
# Count the number of "G"s in the above sequence
g_count = flu_ns1_seq_upper.count('G')
# Add "C" and "G" counts together
g_c_count = c_count + g_count
# Count the total number of nucleotides in the sequence
sequence_length = len(flu_ns1_seq_upper)
# Divide the total number of "C" and "G" nucleotides by the total number of nucleotides
gc_percentage = g_c_count / sequence_length * 100
# Print the percentage
print(gc_percentage)
!python GC_calculator.py
"""
Explanation: If you would like to create a file with your source doe paste it in the cell below and run. Please remember to add your name to the file.
End of explanation
"""
|
alephcero/adsProject | 1_Model_by_Individual.ipynb | gpl-3.0 | # helper functions
import getEPH
import categorize
import schoolYears
import make_dummy
import functionsForModels
# libraries
import pandas as pd
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt)
np.random.seed(1024)
%matplotlib inline
"""
Explanation: New York University
Applied Data Science 2016 Final Project
Measuring household income under Redatam in CensusData
1. Model by Individual
Project Description: Lorem ipsum
Members:
- Felipe Gonzales
- Ilan Reinstein
- Fernando Melchor
- Nicolas Metallo
Sources:
- http://dlab-geo.github.io/geocoding-geopy/slides/index.html#2
- https://gist.github.com/rgdonohue/c4beedd3ca47d29aef01
- http://darribas.org/gds_scipy16/ipynb_md/07_spatial_clustering.html
- https://glenbambrick.com/2016/01/09/csv-to-shapefile-with-pyshp/
- http://statsmodels.sourceforge.net/devel/examples/generated/example_wls.html
LIBRARIES
End of explanation
"""
# get data using 'getEPHdbf' function
getEPH.getEPHdbf('t310')
data1 = pd.read_csv('data/cleanDatat310.csv')
data2 = categorize.categorize(data1)
data3 = schoolYears.schoolYears(data2)
data = make_dummy.make_dummy(data3)
dataModel = functionsForModels.prepareDataForModel(data)
dataModel.head()
"""
Explanation: DATA HANDLING
End of explanation
"""
fig = plt.figure(figsize=(16,12))
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
ax4 = fig.add_subplot(2,2,4)
ax1.plot(dataModel.education,dataModel.P47T,'ro')
ax1.set_ylabel('Ingreso total')
ax1.set_xlabel('Educacion')
ax2.plot(dataModel.age,dataModel.P47T,'ro')
ax2.set_xlabel('Edad')
ax3.plot(dataModel.education,dataModel.P21,'bo')
ax3.set_ylabel('Ingreso Laboral')
ax3.set_xlabel('Educacion')
ax4.plot(dataModel.age,dataModel.P21,'bo')
ax4.set_xlabel('Edad')
"""
Explanation: DATA EXPLORATION
Plot for: Education ~ Age
End of explanation
"""
fig = plt.figure(figsize=(16,12))
ax1 = fig.add_subplot(2,2,1)
ax2 = fig.add_subplot(2,2,2)
ax3 = fig.add_subplot(2,2,3)
ax4 = fig.add_subplot(2,2,4)
sns.kdeplot(dataModel.P47T,ax=ax1,color = 'red')
sns.kdeplot(dataModel.lnIncomeT,ax=ax2,color = 'red')
sns.kdeplot(dataModel.P21,ax=ax3)
sns.kdeplot(dataModel.lnIncome,ax=ax4)
print 'mean:', dataModel.lnIncome.mean(), 'std:', dataModel.lnIncome.std()
print 'mean:', dataModel.P21.mean(), 'std:', dataModel.P21.std()
plt.boxplot(list(dataModel.P21), 0, 'gD')
"""
Explanation: Reference:
P21: Refers to individual income by main activity (occupation)
P47T: Refers to total individual income (includes capital gains)
lnIncomeT: Refers to ln of P21
lnIncome: Refers to ln of P47T
Plot for: LnIncome
End of explanation
"""
g = sns.JointGrid(x="education", y="lnIncome", data=dataModel)
g.plot_joint(sns.regplot, order=2)
g.plot_marginals(sns.distplot)
g2 = sns.JointGrid(x="age", y="lnIncome", data=dataModel)
g2.plot_joint(sns.regplot, order=2)
g2.plot_marginals(sns.distplot)
"""
Explanation: Plot for: LnIncome ~ Educ and Age
End of explanation
"""
dataModel1 = functionsForModels.runModel(dataModel, income = 'P21')
"""
Explanation: REGRESSION MODEL (ECLAC)
Background:
The ECLAC (Economic Comission for Latin America and the Caribbean) estimates income by using a regression model based on the following variables (education, gender and age):
- x1: primary
- x2: secondary
- x3: university
- x4: male_14to24
- x5: male_25to34
- x6: female_14to24
- x7: female_25to34
- x8: female_35more
MODEL # 1 - ECLAC
End of explanation
"""
dataModel2 = functionsForModels.runModel(dataModel, income = 'lnIncome', variables= [
'primary','secondary','university',
'male_14to24','male_25to34',
'female_14to24', 'female_25to34', 'female_35more'])
"""
Explanation: MODEL # 2 - ECLAC (Using Log of Individual Income)
End of explanation
"""
dataModel3 = functionsForModels.runModel(dataModel, income = 'P47T')
"""
Explanation: MODEL # 3 - ECLAC (Using Total Individual Income)
End of explanation
"""
dataModel4 = functionsForModels.runModel(dataModel, income = 'lnIncomeT')
"""
Explanation: MODEL # 4 - ECLAC (Using Log of Total Individual Income)
End of explanation
"""
dataModel5 = functionsForModels.runModel(dataModel, income = 'lnIncomeT', variables=['education','education2',
'age','age2','female'])
"""
Explanation: REGRESSION MODEL (ALTERNATIVE)
Background:
We tested an alternative model similar to the ECLAC Regression Model using a second polynomial to account for the non-linear relationship between age, education and income.
MODEL # 1 - ALTERNATIVE (Using Log of Total Individual Income)
End of explanation
"""
dataModel6 = functionsForModels.runModel(dataModel, income = 'lnIncome', variables=['education','education2',
'age','age2','female'])
"""
Explanation: MODEL # 1 - ALTERNATIVE (Using Log of Individual Income)
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cmcc/cmip6/models/cmcc-esm2-hr5/land.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cmcc', 'cmcc-esm2-hr5', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: CMCC
Source ID: CMCC-ESM2-HR5
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:50
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
soracom/handson | cloud/gcp/src/datalab/sensor_data_analysis.ipynb | apache-2.0 | %%bq query -n requests
SELECT datetime, cpu_temperature, temperature
FROM `soracom_handson.raspi_env`
order by datetime asc
import google.datalab.bigquery as bq
import pandas as pd
df_from_bq = requests.execute(output_options=bq.QueryOutput.dataframe()).result()
# データの確認
df_from_bq
# 文字列型でデータが取得されているので変換
df_from_bq['datetime'] = pd.to_datetime(df_from_bq['datetime'])
df_from_bq['cpu_temperature'] = df_from_bq['cpu_temperature'].astype('float')
df_from_bq['temperature'] = df_from_bq['temperature'].astype('float')
# 時系列データ化する
df = df_from_bq[['cpu_temperature', 'temperature']]
df.index = df_from_bq['datetime'].values
# データの確認
df
"""
Explanation: 全体の流れ
分析の前準備
BigQueryから収集したデータの抽出
データを扱いやすく整形
統計情報を確認
データの可視化
データの特徴
外気温は比較的一定
CPU温度はよく変化する
分析の実施
CPU温度が外気温を越えて熱くならないようにコントロールしたい。そのためにどうデータを扱うかのサンプルを確認する。ここで作ったモデルを実際にプロダクション環境に組み込めることを想定して作りましょう。
分析の前準備
BigQueryからのデータ抽出とデータ整形
End of explanation
"""
df.describe()
"""
Explanation: 統計情報の確認
End of explanation
"""
df.plot(y=['cpu_temperature', 'temperature'], figsize=(16,4), alpha=0.5)
"""
Explanation: 時系列データとして可視化する
End of explanation
"""
df.plot(kind='scatter', x='cpu_temperature', y='temperature', c='cpu_temperature', cmap='winter')
"""
Explanation: 散布図として可視化する
End of explanation
"""
# 散布図から定性的に確認できた相関をさらに定量的に確認する
import numpy as np
np.corrcoef(df['cpu_temperature'], df['temperature'])
"""
Explanation: 相関係数を確認する
End of explanation
"""
df.tail(1)['cpu_temperature'] > df.tail(1)['temperature']
"""
Explanation: 分析の実施
Step1:最新のCPU温度と外気温を比較する
時間順にソートされた最新のデータを使って、CPU温度と外気温を比較する方法
End of explanation
"""
N = 10
threashold = 8
df_N = df.tail(N)
sum(df_N['cpu_temperature'] > df_N['temperature']) > threashold
"""
Explanation: この方法の問題点
温度の大小関係が少しでも変わるとすぐに判定が変わってしまう。もう少しなだらかに判定するロジックとしたい
Step2:最新からN個のデータを使ってCPU温度と外気温を比較する
時間順にソートされたN個のデータを使って、CPU温度が外気温より大きくなっている場合の数をカウントする方法
End of explanation
"""
# 10項移動平均
df.rolling(window=20, center=False).mean()
# 10項移動平均の最新のものを比較材料に使う
moving_average = df.rolling(window=10, center=False).mean().tail(1)
moving_average['cpu_temperature'] > moving_average['temperature']
"""
Explanation: この方法の問題点
Step1よりは確実に良いロジックとなっているが、引き続き、閾値の設定がややシビアに思える。もう少しなだからな判定ロジックとしたい。
Step3:移動平均使って比較する
End of explanation
"""
|
mne-tools/mne-tools.github.io | stable/_downloads/568aae18ec92d284aff29cfb5f3c11e7/resolution_metrics.ipynb | bsd-3-clause | # Author: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
#
# License: BSD-3-Clause
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_resolution_matrix
from mne.minimum_norm import resolution_metrics
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path / 'subjects'
meg_path = data_path / 'MEG' / 'sample'
fname_fwd = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = meg_path / 'sample_audvis-cov.fif'
fname_evo = meg_path / 'sample_audvis-ave.fif'
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# forward operator with fixed source orientations
mne.convert_forward_solution(forward, surf_ori=True,
force_fixed=True, copy=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution
# free source orientation
inverse_operator = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
"""
Explanation: Compute spatial resolution metrics in source space
Compute peak localisation error and spatial deviation for the point-spread
functions of dSPM and MNE. Plot their distributions and difference of
distributions. This example mimics some results from :footcite:HaukEtAl2019,
namely Figure 3 (peak localisation error for PSFs, L2-MNE vs dSPM) and Figure 4
(spatial deviation for PSFs, L2-MNE vs dSPM).
End of explanation
"""
rm_mne = make_inverse_resolution_matrix(forward, inverse_operator,
method='MNE', lambda2=lambda2)
ple_mne_psf = resolution_metrics(rm_mne, inverse_operator['src'],
function='psf', metric='peak_err')
sd_mne_psf = resolution_metrics(rm_mne, inverse_operator['src'],
function='psf', metric='sd_ext')
del rm_mne
"""
Explanation: MNE
Compute resolution matrices, peak localisation error (PLE) for point spread
functions (PSFs), spatial deviation (SD) for PSFs:
End of explanation
"""
rm_dspm = make_inverse_resolution_matrix(forward, inverse_operator,
method='dSPM', lambda2=lambda2)
ple_dspm_psf = resolution_metrics(rm_dspm, inverse_operator['src'],
function='psf', metric='peak_err')
sd_dspm_psf = resolution_metrics(rm_dspm, inverse_operator['src'],
function='psf', metric='sd_ext')
del rm_dspm, forward
"""
Explanation: dSPM
Do the same for dSPM:
End of explanation
"""
brain_ple_mne = ple_mne_psf.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=1,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_mne.add_text(0.1, 0.9, 'PLE MNE', 'title', font_size=16)
"""
Explanation: Visualize results
Visualise peak localisation error (PLE) across the whole cortex for MNE PSF:
End of explanation
"""
brain_ple_dspm = ple_dspm_psf.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=2,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_dspm.add_text(0.1, 0.9, 'PLE dSPM', 'title', font_size=16)
"""
Explanation: And dSPM:
End of explanation
"""
diff_ple = ple_mne_psf - ple_dspm_psf
brain_ple_diff = diff_ple.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=3,
clim=dict(kind='value', pos_lims=(0., 1., 2.)))
brain_ple_diff.add_text(0.1, 0.9, 'PLE MNE-dSPM', 'title', font_size=16)
"""
Explanation: Subtract the two distributions and plot this difference
End of explanation
"""
brain_sd_mne = sd_mne_psf.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=4,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_mne.add_text(0.1, 0.9, 'SD MNE', 'title', font_size=16)
"""
Explanation: These plots show that dSPM has generally lower peak localization error (red
color) than MNE in deeper brain areas, but higher error (blue color) in more
superficial areas.
Next we'll visualise spatial deviation (SD) across the whole cortex for MNE
PSF:
End of explanation
"""
brain_sd_dspm = sd_dspm_psf.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=5,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_dspm.add_text(0.1, 0.9, 'SD dSPM', 'title', font_size=16)
"""
Explanation: And dSPM:
End of explanation
"""
diff_sd = sd_mne_psf - sd_dspm_psf
brain_sd_diff = diff_sd.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=6,
clim=dict(kind='value', pos_lims=(0., 1., 2.)))
brain_sd_diff.add_text(0.1, 0.9, 'SD MNE-dSPM', 'title', font_size=16)
"""
Explanation: Subtract the two distributions and plot this difference:
End of explanation
"""
|
samueljrowell/UVM-ME249-CFD | ME249-Lecture-3.ipynb | gpl-2.0 | %matplotlib inline
# plots graphs within the notebook
%config InlineBackend.figure_format='svg' # not sure what this does, may be default images to svg format
from IPython.display import Image
from IPython.core.display import HTML
def header(text):
raw_html = '<h4>' + str(text) + '</h4>'
return raw_html
def box(text):
raw_html = '<div style="border:1px dotted black;padding:2em;">'+str(text)+'</div>'
return HTML(raw_html)
def nobox(text):
raw_html = '<p>'+str(text)+'</p>'
return HTML(raw_html)
def addContent(raw_html):
global htmlContent
htmlContent += raw_html
class PDF(object):
def __init__(self, pdf, size=(200,200)):
self.pdf = pdf
self.size = size
def _repr_html_(self):
return '<iframe src={0} width={1[0]} height={1[1]}></iframe>'.format(self.pdf, self.size)
def _repr_latex_(self):
return r'\includegraphics[width=1.0\textwidth]{{{0}}}'.format(self.pdf)
class ListTable(list):
""" Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders an HTML Table in
IPython Notebook. """
def _repr_html_(self):
html = ["<table>"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
font = {'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 18,
}
"""
Explanation: Lecture 3: Accuracy in Fourier's Space
End of explanation
"""
import matplotlib.pyplot as plt
import numpy as np
Lx = 2.*np.pi
Nx = 256
u = np.zeros(Nx,dtype='float64')
du = np.zeros(Nx,dtype='float64')
ddu = np.zeros(Nx,dtype='float64')
k_0 = 2.*np.pi/Lx
x = np.linspace(Lx/Nx,Lx,Nx)
Nwave = 32
uwave = np.zeros((Nx,Nwave),dtype='float64')
duwave = np.zeros((Nx,Nwave),dtype='float64')
dduwave = np.zeros((Nx,Nwave),dtype='float64')
#ampwave = np.array([0., 1.0, 2.0, 3.0])
ampwave = np.random.random(Nwave)
#print(ampwave)
#phasewave = np.array([0.0, 0.0, np.pi/2, np.pi/2])
phasewave = np.random.random(Nwave)*2*np.pi
#print(phasewave)
for iwave in range(Nwave):
uwave[:,iwave] = ampwave[iwave]*np.cos(k_0*iwave*x+phasewave[iwave])
duwave[:,iwave] = -k_0*iwave*ampwave[iwave]*np.sin(k_0*iwave*x+phasewave[iwave])
dduwave[:,iwave] = -(k_0*iwave)**2*ampwave[iwave]*np.cos(k_0*iwave*x+phasewave[iwave])
u = np.sum(uwave,axis=1)
#print(u)
plt.plot(x,u,lw=2)
plt.xlim(0,Lx)
plt.legend(loc=3, bbox_to_anchor=[0, 1],
ncol=3, shadow=True, fancybox=True)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$u$', fontdict = font)
plt.show()
plt.show()
#check FT^-1(FT(u))
u_hat = np.fft.fft(u)
v = np.real(np.fft.ifft(u_hat))
plt.plot(x,u,'r-',lw=2,label='u')
plt.plot(x,v,'b--',lw=2,label='after ifft(fft(u))')
plt.xlim(0,Lx)
plt.legend(loc=3, bbox_to_anchor=[0, 1],
ncol=3, shadow=True, fancybox=True)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$u$', fontdict = font)
plt.show()
print('error',np.linalg.norm(u-v,np.inf))
"""
Explanation: <h1>Discrete Fourier Series</h1>
Consider a function $f$ periodic over a domain $0\leq x\leq 2\pi$, discretized by $N_x$ points. The longest wavelength wave that can be contained in the domain is $L_x$. A phyiscal understanding of Fourier series is the representation of a system as the sum of many waves fo wavelengths smaller or equal to $L_x$. In a discrete sense, the series of wave used to decompose the system is defined as:
$$
a_n\exp\left(\hat{\jmath}\frac{2\pi n}{Lx}\right)
$$
such that
<p class='alert alert-danger'>
$$
f(x) = \sum_{n=-\infty}^{\infty}a_n\exp\left(\hat{\jmath}\frac{2\pi nx}{Lx}\right)
$$
</p>
and
<p class='alert alert-danger'>
$$
a_n = \frac{1}{L_x}\int_Lf(x)\exp\left(-\hat{\jmath}\frac{2\pi nx}{Lx}\right)dx
$$
</p>
Often the reduction to wavenumber is used, where
<p class='alert alert-danger'>
$$
k_n = \frac{2\pi n}{L_x}
$$
</p>
Note that if $x$ is time instead of distance, $L_x$ is a time $T$ and the smallest frequency contained in the domain is $f_0=1/T_0$ and the wavenumber $n$ is $k_n=2\pi f_0n=2\pi f_n$ with $f_n$ for $\vert n\vert >1$ are the higher frequencies.
<h1>Discrete Fourier Transform (DFT)</h1>
In scientific computing we are interested in applying Fourier series on vectors or matrices, containing a integer number of samples. The DFT is the fourier series for the number of samples. DFT functions available in python or any other language only care about the number of samples, therefore the wavenumber is
<p class='alert alert-danger'>
$$
k_n=\frac{2\pi n}{N_x}
$$
</p>
Consider a function $f$ periodic over a domain $0\leq x\leq 2\pi$, discretized by $N_x$ points. The nodal value is $f_i$ located at $x_i=(i+1)\Delta x$ with $\Delta x=L_x/Nx$. The DFT is defined as
<p class='alert alert-danger'>
$$
\hat{f}_k=\sum_{i=0}^{N_x-1}f_i\exp\left(-2\pi\hat{\jmath}\frac{ik}{N_x}\right)
$$
</p>
The inverse DFT is defined as
<p class='alert alert-danger'>
$$
f_i=\sum_{k=0}^{N_x-1}\hat{f}_k\exp\left(2\pi\hat{\jmath}\frac{ik}{N_x}\right)
$$
</p>
<h1>Fast Fourier Transform (FFT)</h1>
Using symmetries, the FFT reduces computational costs and stores in the following way:
<p class='alert alert-danger'>
$$
\hat{f}_k=\sum_{i=-Nx/2+1}^{N_x/2}f_i\exp\left(-2\pi\mathbf{j}\frac{ik}{N_x}\right)
$$
</p>
<p class='alert alert-info'>
Compared to the Fourier series, DFT or FFT assumes that the system can be accurately captured by a finite number of waves. It is up to the user to ensure that the number of computational points is sufficient to capture the smallest scale, or smallest wavelength or highest frequence. Remember that the function on which FT is applied must be periodic over the domain and the grid spacing must be uniform.
</p>
There are FT algorithms for unevenly space data, but this is beyond the scope of this notebook.
<h1>Example 1: Filtering</h1>
The following provides examples of low- and high-pass filters based on Fourier transform. A ideal low-(high-) pass filter passes frequencies that are lower than a threshold without attenuation and removes frequencies that are higher than the threshold.
When applied to spatial data (function of $x$ rather than $t$-time), the FT (Fourier Transform) of a variable is function of wavenumbers
$$
k_n=\frac{2\pi n}{L_x}
$$
or wavelengths
$$
\lambda_n=\frac{2\pi}{k_n}
$$
End of explanation
"""
F = np.zeros(Nx/2+1,dtype='float64')
F = np.real(u_hat[0:Nx/2+1]*np.conj(u_hat[0:Nx/2+1]))
k = np.hstack((np.arange(0,Nx/2+1),np.arange(-Nx/2+1,0)))
plt.loglog(k[0:Nx/2+1],F,'r-',lw=2,label='F_u')
plt.legend(loc=3, bbox_to_anchor=[0, 1],
ncol=3, shadow=True, fancybox=True)
plt.xlabel('$k$', fontdict = font)
plt.ylabel('$F_u(k)$', fontdict = font)
plt.show()
plt.show
"""
Explanation: <h2>Spectrum</h2>
For now we will define the spectrum f $f$ as
<p class='alert alert-danger'>
$$
F(k_n) = \hat{f}_n.\hat{f}_n^*
$$
</p>
which can be interpreted as the energy contained in the $k_n$ wavenumber. This is helpful when searching for the most energitic scales or waves in our system. Thanks to the symmetries of the FFT, the spectrum is defined over $n=0$ to $N_x/2$
End of explanation
"""
# filtering the smaller waves
def low_pass_filter_fourier(a,k,kcutoff):
N = a.shape[0]
a_hat = np.fft.fft(u)
filter_mask = np.where(np.abs(k) > kcut)
a_hat[filter_mask] = 0.0 + 0.0j
a_filter = np.real(np.fft.ifft(a_hat))
return a_filter
kcut=Nwave/2+1
k = np.hstack((np.arange(0,Nx/2+1),np.arange(-Nx/2+1,0)))
v = low_pass_filter_fourier(u,k,kcut)
u_filter_exact = np.sum(uwave[:,0:kcut+1],axis=1)
plt.plot(x,v,'r-',lw=2,label='filtered with fft')
plt.plot(x,u_filter_exact,'b--',lw=2,label='filtered (exact)')
plt.plot(x,u,'g:',lw=2,label='original')
plt.legend(loc=3, bbox_to_anchor=[0, 1],
ncol=3, shadow=True, fancybox=True)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$u$', fontdict = font)
plt.show()
print('error:',np.linalg.norm(v-u_filter_exact,np.inf))
F = np.zeros(Nx/2+1,dtype='float64')
F_filter = np.zeros(Nx/2+1,dtype='float64')
u_hat = np.fft.fft(u)
F = np.real(u_hat[0:Nx/2+1]*np.conj(u_hat[0:Nx/2+1]))
v_hat = np.fft.fft(v)
F_filter = np.real(v_hat[0:Nx/2+1]*np.conj(v_hat[0:Nx/2+1]))
k = np.hstack((np.arange(0,Nx/2+1),np.arange(-Nx/2+1,0)))
plt.loglog(k[0:Nx/2+1],F,'r-',lw=2,label='F_u')
plt.loglog(k[0:Nx/2+1],F_filter,'b-',lw=2,label='F_v')
plt.legend(loc=3, bbox_to_anchor=[0, 1],
ncol=3, shadow=True, fancybox=True)
plt.xlabel('$k$', fontdict = font)
plt.ylabel('$F_u(k)$', fontdict = font)
plt.show()
plt.show
"""
Explanation: <h2>Low-Pass Filter</h2>
The following code filters the original signal by half the wavenumbers using FFT and compares to exact filtered function
End of explanation
"""
u_hat = np.fft.fft(u)
kfilter = 3
k = np.linspace(0,Nx-1,Nx)
filter_mask = np.where((k < kfilter) | (k > Nx-kfilter) )
u_hat[filter_mask] = 0.+0.j
v = np.real(np.fft.ifft(u_hat))
plt.plot(x,v,'r-',lw=2)
plt.plot(x,uwave[:,3],'b--',lw=2)
plt.show()
"""
Explanation: <h2> High-Pass Filter</h2>
From the example below, develop a function for a high-pass filter.
End of explanation
"""
|
frucci/kaggle_quora_competition | Tagger.ipynb | gpl-3.0 | import ourfunctions as f
from time import time
import gc
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
import re
from gensim.models import word2vec
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
train_df = pd.read_csv("./train.csv", low_memory=True) # chunksize=)
test_df = pd.read_csv("./test.csv", low_memory=True)#, chunksize=)
print(train_df.shape)
print(test_df.shape)
## adjusting the nan value
train_df.fillna("", inplace=True)
test_df.fillna("", inplace=True)
train_df.info()
question1, question2 = 'question1', 'question2'
"""
Explanation: Table of Contents
<p><div class="lev3 toc-item"><a href="#Clear-train-and-test-first-step-and-save-the-results" data-toc-modified-id="Clear-train-and-test-first-step-and-save-the-results-001"><span class="toc-item-num">0.0.1 </span>Clear train and test first step and save the results</a></div><div class="lev3 toc-item"><a href="#Create-the-tagged-phrases" data-toc-modified-id="Create-the-tagged-phrases-002"><span class="toc-item-num">0.0.2 </span>Create the tagged phrases</a></div><div class="lev4 toc-item"><a href="#add-tagger-to-dataframe" data-toc-modified-id="add-tagger-to-dataframe-0021"><span class="toc-item-num">0.0.2.1 </span>add tagger to dataframe</a></div><div class="lev3 toc-item"><a href="#Create-data-for-cluster" data-toc-modified-id="Create-data-for-cluster-003"><span class="toc-item-num">0.0.3 </span>Create data for cluster</a></div><div class="lev3 toc-item"><a href="#Variables-with-tagger" data-toc-modified-id="Variables-with-tagger-004"><span class="toc-item-num">0.0.4 </span>Variables with tagger</a></div><div class="lev3 toc-item"><a href="#Create-test/train-with-clear-phrase" data-toc-modified-id="Create-test/train-with-clear-phrase-005"><span class="toc-item-num">0.0.5 </span>Create test/train with clear phrase</a></div>
End of explanation
"""
# first clear
f.add_clear_first(train_df, question1)
f.add_clear_first(train_df, question2)
f.add_clear_first(test_df, question1)
f.add_clear_first(test_df, question2)
from joblib import Parallel, delayed
import multiprocessing
from itertools import chain
def clear_first_many(t):
return [f.clear_first(x) for x in t]
def parallel_clear(df, q):
step = min(len(df) // 100, 10000)
start = df.index[0]
stop = df.index[-1]
num_cores = multiprocessing.cpu_count()
return Parallel(n_jobs=num_cores)(
delayed(clear_first_many)(df.loc[i:i + step-1][q].values)
for i in range(start, stop, step))
def flatmap(f, items):
return chain.from_iterable(map(f, items))
def add_clear1(df, q):
l = flatmap(lambda x: x, parallel_clear(df, q))
myl = [x for x in l]
df[q + '_clear1'] = myl
add_clear1(train_df, question1)
train_df
add_clear1(train_df, question2)
test_df
"""
Explanation: Clear train and test first step and save the results
End of explanation
"""
unique_questions = f.create_unique_questions(train_df, test_df)
l = [ x for x in unique_questions['questions'].values ]
unique_questions['questions_clear_1'] = pd.Series(l)
f.create_tagger_csv(unique_questions, 2000)
start_stop = [
[30000, 32001],
[46000, 48001],
[54000, 56001],
[158000, 160001],
[248000, 250001],
[258000, 260001],
[342000, 344001],
[358000, 360001],
[390000, 392001],
[396000, 398001],
[432000, 434001],
[440000, 442001],
[460000, 462001],
[544000, 546001],
[558000, 560001],
[574000, 576001],
[696000, 698001],
[756000, 758001],
[848000, 850001],
[890000, 892001],
[958000, 960001],
[970000, 972001],
[1010000, 1012001],
]
f.fix_creations_csv_tagger(start_top, 20)
"""
Explanation: Create the tagged phrases
End of explanation
"""
start = time()
import os
mycsv = os.listdir('csv_question_tag/')
df = pd.concat([pd.read_csv('csv_question_tag/'+csv) for csv in mycsv])
stop = time()
print((stop - start)/60, 'minutes')
df.drop('Unnamed: 0', axis=1,inplace=True)
df.columns = ['tagger', 'questions_clear_1']
df.drop_duplicates(inplace=True)
list_tagger = df.set_index('questions_clear_1').to_dict()['tagger']
temp_dict = unique_questions.set_index('questions').to_dict()['questions_clear_1']
del df
del unique_questions
gc.collect()
from functools import reduce
def recover_tagg_list(tag_string):
l = [
re.split(r'[\'\"],\s[\'\"]', t[2:-1])
for t in tag_string[1:-2].split('], ')
]
return l
def get_phrase_from_tagger(tagg_list):
l = [
c[2] if (c[2] != '<unknown>') and (c[2] != '@card@') else c[0]
for c in tagg_list if len(c) == 3
]
return reduce(lambda x, y: x + ' ' + y, l)
def convert_tag_to_phrase(df, question_after_tag, tagger):
df[question_after_tag] = df.apply(
axis=1, func=lambda x: get_phrase_from_tagger( recover_tagg_list(x[tagger])) )
df = pd.read_csv('tagg_list.csv')
df.dropna(inplace=True)
convert_tag_to_phrase(df, 'quest_tag', 'tagger')
def clear_second(text):
text = f.clear_correzioni_mano(text)
text = f.clear_text_second_step(text)
text = f.remove_punctuations(text)
text = f.remove_stopwors(text, f.my_stopwords)
return text
df['quest_final'] = df.apply(axis=1, func=lambda x:clear_second(x['quest_tag']))
df.to_csv('risultato_tagger.csv')
"""
Explanation: add tagger to dataframe
End of explanation
"""
df = pd.read_csv('risultato_tagger.csv', encoding='latin1')
convert = df.set_index('questions_clear_1').to_dict()['quest_final']
question1, question2 = 'question1', 'question2'
f.add_clear_first(train_df, question1)
f.add_clear_first(train_df, question2)
f.add_clear_first(test_df, question1)
f.add_clear_first(test_df, question2)
def get_final_quest(x):
try:
return convert[x]
except KeyError:
return 'ciccia'
train_df['question1_final'] = train_df.apply(axis=1, func=lambda x: get_final_quest(x['question1_clear_1']))
train_df['question2_final'] = train_df.apply(axis=1, func=lambda x: get_final_quest(x['question2_clear_1']))
test_df['question1_final'] = test_df.apply(axis=1, func=lambda x: get_final_quest(x['question1_clear_1']))
test_df['question2_final'] = test_df.apply(axis=1, func=lambda x: get_final_quest(x['question2_clear_1']))
train_df.to_csv('train_clear.csv')
test_df.to_csv('test_clear.csv')
tagger_list = {
'CC': 'Coordinating conjunction',
'CD': 'Cardinal number',
'DT': 'Determiner',
'EX': 'Existential there',
'FW': 'Foreign word',
'IN': 'Preposition or subordinating conjunction',
'JJ': 'Adjective',
'JJR': 'Adjective, comparative',
'JJS': 'Adjective, superlative',
'LS': 'List item marker',
'MD': 'Modal',
'NN': 'Noun, singular or mass',
'NNS': 'Noun, plural',
'NP': 'Proper noun, singular',
'NPS': 'Proper noun, plural',
'PDT': 'Predeterminer',
'POS': 'Possessive ending',
'PP': 'Personal pronoun',
'PP$': 'Possessive pronoun',
'RB': 'Adverb',
'RBR': 'Adverb, comparative',
'RBS': 'Adverb, superlative',
'RP': 'Particle',
'SYM': 'Symbol',
'TO': 'to',
'UH': 'Interjection',
'VB': 'Verb, base form',
'VBD': 'Verb, past tense',
'VBG': 'Verb, gerund or present participle',
'VBN': 'Verb, past participle',
'VBP': 'Verb, non-3rd person singular present',
'VBZ': 'Verb, 3rd person singular present',
'WDT': 'Wh-determiner',
'WP': 'Wh-pronoun',
'WP$': 'Possessive wh-pronoun',
'WRB': 'Wh-adverb'
}
"""
Explanation: Create test/train with clear phrase
End of explanation
"""
|
gabrielhpbc/CD | APS5_alunos.ipynb | mit | %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import expon
from numpy import arange
import scipy.stats as stats
#Abrir o arquivo
df = pd.read_csv('earthquake.csv')
#listar colunas
print(list(df))
"""
Explanation: APS 5 - Questões com auxílio do Pandas
Nome: <font color=blue> Gabriel Heusi Pereira Bueno de Camargo </font>
APS INDIVIDUAL
Data de Entrega: 26/Set até às 23h59 via GitHub.
Vamos trabalhar com dados do USGS (United States Geological Survey) para tentar determinar se os abalos detectados no hemisfério Norte têm grande probabilidade de serem testes nucleares.
End of explanation
"""
df.head()
"""
Explanation: Liste as primeiras linhas do DataFrame
End of explanation
"""
df.loc[(df.Latitude >=0), "Hemisfério"] = "Norte"
df.loc[(df.Latitude <0), "Hemisfério"] = "Sul"
df.head()
df.Magnitude.describe()
"""
Explanation: Q1 - Manipulando o DataFrame
Crie uma coluna chamada Hemisfério baseada na Latitude
A regra de formação é a seguinte:
Valor | Critério
---|---
Norte | Latitude positiva
Sul | Latitude negativa
End of explanation
"""
f = plt.figure(figsize=(11,5))
faixas = arange(5,9,0.65)
plot = df.Magnitude.plot.hist(bins=faixas , title="Histograma de Magnitude",normed=1,alpha = 0.9,color="g")
plt.xlabel("Magnitude")
plt.ylabel("Densidade")
plt.show()
"""
Explanation: Q2 - Fit e Histograma
Faça o Histograma da Magnitude. Interprete.
End of explanation
"""
mu = df.Magnitude.mean()
dp = df.Magnitude.std()
fig = plt.figure(figsize=(11, 5))
plot= df.Magnitude.plot.hist(bins = faixas, title='HISTOGRAMA Magnitude ', normed=1, alpha=0.9,color = 'r')
a = sorted(df.Magnitude)
plt.plot(a, stats.norm.pdf(a, loc = mu, scale = dp))
plt.title('Histograma X Pdf')
"""
Explanation: Faça o fit de uma distribuição exponencial sobre os dados da Magnitude, achando os valores de loc e scale. Interprete loc e scale no caso da exponencial.
Documentação: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.expon.html
Refaça o Histograma plotando a fdp (função densidade de probabilidade) da exponencial com os parâmetros achados no fit em cima. Cuidado com o domínio utilizado. Interprete.
End of explanation
"""
ct = pd.crosstab(df.Hemisfério,df.Type,margins=True,normalize = True)
ct
"""
Explanation: Q3 - Tabela cruzada
Faça uma tabela de cruzamento das variáveis Hemisfério e Type
Sua tabela deve ser <font color=red> normalizada</font>
End of explanation
"""
probNorte = ct.Earthquake.Norte/ct.Earthquake.All
print(probNorte)
"""
Explanation: Q3.1 - Qual a probabilidade de ocorrer um terremoto no hemisfério norte?
Adicione na célula abaixo o cálculo:
End of explanation
"""
probNuclear = ct["Nuclear Explosion"]["Norte"]/ct.All.Norte
print(probNuclear)
"""
Explanation: Explique o seu raciocínio
O cálculo da probabilidade nesse caso se baseia na análise dos casos que ocorrem no Norte em comparação com os casos totais de terremoto. Portanto para saber a probabilidade de ocorrer um terremoto no hemisfério Norte basta dividir esse valor, apresentado no crosstab, pela probabilidade total.
Q3.2 - Dado que aconteceu no Norte, qual a probabilidade de ele ter sido Nuclear Explosion?
Calcule a resposta abaixo, ou explique como a encontrou
Se for cálculo preencha a célula a seguir:
End of explanation
"""
plt.scatter(x = df['Magnitude Error'],
y = df['Depth'])
plt.show()
"""
Explanation: Se conseguir obter a resposta sem calcular, insira a resposta abaixo:
A probabilidade de ter sido Nuclear Explosion é ...
Q4 - Análise bivariada
Faça o plot de dispersão (scatter plot) entre as variáveis Magnitude Error e Depth
End of explanation
"""
df["Depth"].corr(df["Magnitude Error"])
"""
Explanation: Calcule a correlação entre as variáveis Magnitude Error e Depth
End of explanation
"""
Lat = df["Latitude"].describe()
Long = df["Longitude"].describe()
print(Lat,Long)
df.boxplot(column = ["Latitude","Longitude"])
plt.show()
"""
Explanation: Explique o que significa o valor da correlação calculada acima?
A correlação apresentada acima mostra uma espécie de dependência entre as duas variáveis, no caso Magnitude Error e Depth, observando o gráfico mostrado acima os valores são bem distantes, mas é justamente isso e o valor da correlação mostrado, que é baixo, que mostra uma alta dependência entre as duas variáveis, não há grande discrepância entre os valores. O fato de ser negativo justificaria uma reta descrescente.
Q5 - Describe e boxplot
Faça o describe e o boxplot da Latitude e da Longitude. Explique os valores
End of explanation
"""
df.loc[(df.Type=="Nuclear Explosion")&(df["Magnitude Type"]=="MB")&(df["Hemisfério"]=="Sul"),"Hemis"]="Sul"
df.loc[(df.Type=="Nuclear Explosion")&(df["Magnitude Type"]=="MB")&(df["Hemisfério"]=="Norte"),"Hemis"]="Norte"
sul=df["Hemis"].value_counts("Sul")
sul
"""
Explanation: Q6 - Tirando conclusões com base nos dados
Em um certo lugar já ocorreram abalos com Magnitude Type MB e Type Nuclear Explosion.
Responda:
* É mais provável que tenha sido no norte ou no sul?
Assuma que os Magnitude Type e Type são independentes
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | blogs/form_parser/formparsing.ipynb | apache-2.0 | !sudo chown -R jupyter:jupyter /home/jupyter/imported/formparsing.ipynb
from IPython.display import Markdown as md
### change to reflect your notebook
_nb_repo = 'training-data-analyst'
_nb_loc = "blogs/form_parser/formparsing.ipynb"
_nb_title = "Form Parsing Using Google Cloud Document AI"
### no need to change any of this
_nb_safeloc = _nb_loc.replace('/', '%2F')
_nb_safetitle = _nb_title.replace(' ', '+')
md("""
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name={1}&url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2F{3}%2Fblob%2Fmaster%2F{2}&download_url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2F{3}%2Fraw%2Fmaster%2F{2}">
<img src="https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png"/> Run in AI Platform Notebook</a>
</td>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/GoogleCloudPlatform/{3}/blob/master/{0}">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/GoogleCloudPlatform/{3}/blob/master/{0}">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://raw.githubusercontent.com/GoogleCloudPlatform/{3}/master/{0}">
<img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
""".format(_nb_loc, _nb_safetitle, _nb_safeloc, _nb_repo))
"""
Explanation: Form Parsing using Google Cloud Document AI
This notebook shows how to use Google Cloud Document AI to parse a campaign disclosure form.
It accompanies this Medium article:
https://medium.com/@lakshmanok/how-to-parse-forms-using-google-cloud-document-ai-68ad47e1c0ed
End of explanation
"""
%%bash
if [ ! -f scott_walker.pdf ]; then
curl -O https://storage.googleapis.com/practical-ml-vision-book/images/scott_walker.pdf
fi
!ls *.pdf
from IPython.display import IFrame
IFrame("./scott_walker.pdf", width=600, height=300)
"""
Explanation: Document
As an example, let's take this US election campaign disclosure form.
End of explanation
"""
BUCKET="ai-analytics-solutions-kfpdemo" # CHANGE to a bucket that you own
!gsutil cp scott_walker.pdf gs://{BUCKET}/formparsing/scott_walker.pdf
!gsutil ls gs://{BUCKET}/formparsing/scott_walker.pdf
"""
Explanation: Note: If the file is not visible, simply open the PDF file by double-clicking on it in the left hand menu.
Upload to Cloud Storage
Document AI works with documents on Cloud Storage, so let's upload the doc.
End of explanation
"""
!gcloud auth list
"""
Explanation: Enable Document AI
First enable Document AI in your project by visiting
https://console.developers.google.com/apis/api/documentai.googleapis.com/overview
Find out who you are running as:
End of explanation
"""
%%bash
PDF="gs://ai-analytics-solutions-kfpdemo/formparsing/scott_walker.pdf" # CHANGE to your PDF file
REGION="us" # change to EU if the bucket is in the EU
cat <<EOM > request.json
{
"inputConfig":{
"gcsSource":{
"uri":"${PDF}"
},
"mimeType":"application/pdf"
},
"documentType":"general",
"formExtractionParams":{
"enabled":true
}
}
EOM
# Send request to Document AI.
PROJECT=$(gcloud config get-value project)
echo "Sending the following request to Document AI in ${PROJECT} ($REGION region), saving to response.json"
cat request.json
curl -X POST \
-H "Authorization: Bearer "$(gcloud auth application-default print-access-token) \
-H "Content-Type: application/json; charset=utf-8" \
-d @request.json \
https://${REGION}-documentai.googleapis.com/v1beta2/projects/${PROJECT}/locations/us/documents:process \
> response.json
!tail response.json
"""
Explanation: Create a service account authorization by visiting
https://console.cloud.google.com/iam-admin/serviceaccounts/create
Give this service account Document AI Core Service Account authorization
Give the above ACTIVE ACCOUNT the ability to use the service account you just created.
Call Document AI
End of explanation
"""
import json
ifp = open('response.json')
response = json.load(ifp)
allText = response['text']
print(allText[:100])
"""
Explanation: Note: If you get a 403 PERMISSION DENIED error, please re-run all the cells from the top.
Parse the response
Let's use Python to parse the response and pull out specific fields.
End of explanation
"""
print(allText.index("CASH ON HAND"))
"""
Explanation: Option 1: Parsing blocks of text
As an example, let's try to get the "Cash on Hand". This is in Page 2 and the answer is $75,931.36
All the data in the document is the allText field. we just need to find the right starting and ending index
for what we want to extract.
End of explanation
"""
response['pages'][1]['blocks'][5]
response['pages'][1]['blocks'][5]['layout']['textAnchor']['textSegments'][0]
startIndex = int(response['pages'][1]['blocks'][5]['layout']['textAnchor']['textSegments'][0]['startIndex'])
endIndex = int(response['pages'][1]['blocks'][5]['layout']['textAnchor']['textSegments'][0]['endIndex'])
allText[startIndex:endIndex]
"""
Explanation: We know that "Cash on Hand" is on Page 2.
End of explanation
"""
def extractText(allText, elem):
startIndex = int(elem['textAnchor']['textSegments'][0]['startIndex'])
endIndex = int(elem['textAnchor']['textSegments'][0]['endIndex'])
return allText[startIndex:endIndex].strip()
amount = float(extractText(allText, response['pages'][1]['blocks'][6]['layout']))
print(amount)
"""
Explanation: Cool, we are at the right part of the document! Let's get the next block, which should be the actual amount.
End of explanation
"""
response['pages'][1].keys()
response['pages'][1]['formFields'][2]
fieldName = extractText(allText, response['pages'][1]['formFields'][2]['fieldName'])
fieldValue = extractText(allText, response['pages'][1]['formFields'][2]['fieldValue'])
print('key={}\nvalue={}'.format(fieldName, fieldValue))
"""
Explanation: Option 2: Parsing form fields
What we did with blocks of text was quite low-level. Document AI understands that forms tend to have key-value pairs, and part of the JSON response includes these extracted key-value pairs as well.
Besides FormField Document AI also supports getting Paragraph and Table from the document.
End of explanation
"""
|
adieuadieu/educathingamajigs | udacity/dlnd/p1-your-first-network/dlnd-your-first-neural-network.ipynb | unlicense | %matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
End of explanation
"""
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
"""
Explanation: Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
End of explanation
"""
rides[:24*10].plot(x='dteday', y='cnt')
"""
Explanation: Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
End of explanation
"""
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
"""
Explanation: Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().
End of explanation
"""
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
"""
Explanation: Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
End of explanation
"""
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
"""
Explanation: Splitting the data into training, testing, and validation sets
We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
End of explanation
"""
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
"""
Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
End of explanation
"""
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
# https://en.wikipedia.org/wiki/Activation_function
sigmoid = lambda x: 1 / (1 + np.exp(-x))
tanh = lambda x: (2 / (1 + np.exp(-2 * x))) - 1
gaussian = lambda x: np.exp(-x**2)
sinusoid = lambda x: np.sin(x)
self.activation_function = sigmoid
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
"""
NOTES:
borrows from
https://discussions.udacity.com/t/im-completely-stuck-and-confused/215739/5
and
https://discussions.udacity.com/t/having-a-hard-time-implementing-the-backprop/215435/11
omission of division by records clarified by:
https://discussions.udacity.com/t/why-in-project-1-we-dont-divide-the-record-num/216716
"""
#### Implement the forward pass here ####
### Forward pass ###
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual output.
# Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) # errors propagated to the hidden layer
hidden_gradient = hidden_outputs * (1 - hidden_outputs) # sigmoid derivative
# hidden_gradient = 1 - (hidden_outputs ** 2) # tanh derivative
# hidden_gradient = -2 * hidden_outputs * np.exp(-hidden_outputs ** 2) # gaussian derivative
# hidden_gradient = np.cos(hidden_outputs) # sinusoid derivative
# Update the weights
delta_hidden_output = output_errors * hidden_outputs.T
delta_hidden_input = hidden_errors * hidden_gradient
self.weights_hidden_to_output += self.lr * delta_hidden_output # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * np.dot(delta_hidden_input, inputs.T) # update input-to-hidden weights with gradient descent step
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
"""
Explanation: Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
End of explanation
"""
import sys
### Set the hyperparameters here ###
"""
activation | epochs | learnRate | hidden | output | Training loss | Validation loss | note
---------------------------------------------------------------------------------------------------------------
sigmoid | 3000 | 0.005 | 21 | 1 | 0.079 | 0.167
tanh | 3000 | 0.005 | 21 | 1 | 0.063 | 0.152
sigmoid | 3000 | 0.005 | 28 | 1 | 0.076 | 0.182
sigmoid | 3000 | 0.01 | 28 | 1 | 0.063 | 0.236
sigmoid | 3000 | 0.005 | 42 | 1 | 0.084 | 0.184
sigmoid | 3000 | 0.005 | 56 | 1 | 0.066 | 0.144
sigmoid | 3000 | 0.005 | 70 | 1 | 0.074 | 0.143
sigmoid | 3000 | 0.005 | 56 | 56 | 0.047 | 0.156
tanh | 3000 | 0.005 | 56 | 56 | 0.044 | 0.144
sigmoid | 3000 | 0.005 | 56 | 112 | 0.043 | 0.202
tanh | 3000 | 0.005 | 56 | 112 | 0.063 | 0.152
sigmoid | 1000 | 0.005 | 59 | 118 | 0.047 | 0.145
sigmoid | 3000 | 0.005 | 59 | 118 | 0.039 | 0.164
tanh | 3000 | 0.005 | 59 | 118 | 0.043 | 0.163
sigmoid | 1500 | 0.01 | 59 | 118 | 0.041 | 0.132
sigmoid | 2000 | 0.01 | 59 | 118 | 0.039 | 0.134
sigmoid | 3000 | 0.01 | 59 | 118 | 0.037 | 0.137
sigmoid | 10000 | 0.01 | 59 | 118 | 0.029 | 0.175 overfit?
sigmoid | 3000 | 0.05 | 59 | 118 | 0.041 | 0.147
sigmoid | 3000 | 0.01 | 118 | 118 | 0.036 | 0.132
sigmoid | 3000 | 0.01 | 177 | 177 | 0.035 | 0.161
sigmoid | 3000 | 0.1 | 177 | 177 | nan | nan :-(
sigmoid | 3000 | 0.005 | 177 | 177 | 0.037 | 0.152
sigmoid | 3000 | 0.1 | 56 | 56 | 0.048 | 0.163
"""
epochs = 3000
learning_rate = 0.01
hidden_nodes = 28
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# https://discussions.udacity.com/t/learning-rate-hyperparameter/216978/8?u=marco-611
#if e %(epochs / 15) == 0:
# learning_rate /= 2
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=.5)
"""
Explanation: Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of epochs
This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
End of explanation
"""
fig, ax = plt.subplots(figsize=(16,8))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
fig, ax = plt.subplots(figsize=(16,8))
features, targets = data.drop(target_fields, axis=1), data[target_fields]
mean, std = scaled_features['cnt']
predictions = network.run(features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
"""
Explanation: Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
End of explanation
"""
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
"""
Explanation: Thinking about your results
Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
Your answer below
The model predicts the data fairly well during "normal" times. For example, it does a good time making prediction during regular business hours.. regular probably being the key there. It fails with the winter/xmas holiday, and I think this may be because we've sliced that data out of the training set and used it in our test data, so the network never really had a good chance to learn from the holiday data (only once, during winter 2011.) The model seems to kind of know something is going on with the holiday as the predictions do reflect that it's a holiday, but the weight of it being a holiday doesn't seem to be as strong as it should be.
Unit tests
Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.
End of explanation
"""
|
rthadani/coursera-ml | notebooks/classification/module-4-linear-classifier-regularization-pandas.ipynb | epl-1.0 | products = pd.read_csv('../../data/amazon_baby_subset.csv')
products['sentiment']
products['sentiment'].size
products.head(10).name
print ('# of positive reviews =', len(products[products['sentiment']==1]))
print ('# of negative reviews =', len(products[products['sentiment']==-1]))
# The same feature processing (same as the previous assignments)
# ---------------------------------------------------------------
import json
with open('../../data/important_words.json', 'r') as f: # Reads the list of most frequent words
important_words = json.load(f)
important_words = [str(s) for s in important_words]
def remove_punctuation(text):
import string
translator = str.maketrans('', '', string.punctuation)
return str(text).translate(translator)
# Remove punctuation.
products['review_clean'] = products['review'].apply(remove_punctuation)
# Split out the words into individual columns
for word in important_words:
products[word] = products['review_clean'].apply(lambda s : s.split().count(word))
"""
Explanation: Load and process review dataset
End of explanation
"""
with open('../../data/module-4-assignment-train-idx.json', 'r') as f:
train_idx = json.load(f)
train_data = products.ix[train_idx]
with open ('../../data/module-4-assignment-validation-idx.json', 'r') as f:
v_idx = json.load(f)
validation_data = products.ix[v_idx]
"""
Explanation: Train-Validation split
We split the data into a train-validation split with 80% of the data in the training set and 20% of the data in the validation set. We use seed=2 so that everyone gets the same result.
Note: In previous assignments, we have called this a train-test split. However, the portion of data that we don't train on will be used to help select model parameters. Thus, this portion of data should be called a validation set. Recall that examining performance of various potential models (i.e. models with different parameters) should be on a validation set, while evaluation of selected model should always be on a test set.
End of explanation
"""
import numpy as np
def get_numpy_data(data_frame, features, label):
data_frame['intercept'] = 1
features = ['intercept'] + features
features_frame = data_frame[features]
feature_matrix = features_frame.as_matrix()
label_array = data_frame[label]
return(feature_matrix, label_array)
feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment')
feature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment')
"""
Explanation: Convert Frame to NumPy array
Just like in the second assignment of the previous module, we provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned: one representing features and another representing class labels.
Note: The feature matrix includes an additional column 'intercept' filled with 1's to take account of the intercept term.
End of explanation
"""
def prediction(score):
return (1 / (1 + np.exp(-score)))
'''
produces probablistic estimate for P(y_i = +1 | x_i, w).
estimate ranges between 0 and 1.
'''
def predict_probability(feature_matrix, coefficients):
# Take dot product of feature_matrix and coefficients
scores = np.dot(feature_matrix, coefficients)
# Compute P(y_i = +1 | x_i, w) using the link function
predictions = np.apply_along_axis(prediction, 0, scores)
# return predictions
return predictions
"""
Explanation: Building on logistic regression with no L2 penalty assignment
Let us now build on Module 3 assignment. Recall from lecture that the link function for logistic regression can be defined as:
$$
P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))},
$$
where the feature vector $h(\mathbf{x}_i)$ is given by the word counts of important_words in the review $\mathbf{x}_i$.
We will use the same code as in this past assignment to make probability predictions since this part is not affected by the L2 penalty. (Only the way in which the coefficients are learned is affected by the addition of a regularization term.)
End of explanation
"""
def feature_derivative_with_L2(errors, feature, coefficient, l2_penalty, feature_is_constant):
# Compute the dot product of errors and feature
derivative = np.dot(feature, errors)
# add L2 penalty term for any feature that isn't the intercept.
if not feature_is_constant:
derivative = derivative - 2 * l2_penalty * coefficient
return derivative
"""
Explanation: Adding L2 penalty
Let us now work on extending logistic regression with L2 regularization. As discussed in the lectures, the L2 regularization is particularly useful in preventing overfitting. In this assignment, we will explore L2 regularization in detail.
Recall from lecture and the previous assignment that for logistic regression without an L2 penalty, the derivative of the log likelihood function is:
$$
\frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right)
$$
Adding L2 penalty to the derivative
It takes only a small modification to add a L2 penalty. All terms indicated in red refer to terms that were added due to an L2 penalty.
Recall from the lecture that the link function is still the sigmoid:
$$
P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))},
$$
We add the L2 penalty term to the per-coefficient derivative of log likelihood:
$$
\frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) \color{red}{-2\lambda w_j }
$$
The per-coefficient derivative for logistic regression with an L2 penalty is as follows:
$$
\frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) \color{red}{-2\lambda w_j }
$$
and for the intercept term, we have
$$
\frac{\partial\ell}{\partial w_0} = \sum{i=1}^N h_0(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right)
$$
Note: As we did in the Regression course, we do not apply the L2 penalty on the intercept. A large intercept does not necessarily indicate overfitting because the intercept is not associated with any particular feature.
Write a function that computes the derivative of log likelihood with respect to a single coefficient $w_j$. Unlike its counterpart in the last assignment, the function accepts five arguments:
* errors vector containing $(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w}))$ for all $i$
* feature vector containing $h_j(\mathbf{x}_i)$ for all $i$
* coefficient containing the current value of coefficient $w_j$.
* l2_penalty representing the L2 penalty constant $\lambda$
* feature_is_constant telling whether the $j$-th feature is constant or not.
End of explanation
"""
def compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty):
indicator = (sentiment==+1)
scores = np.dot(feature_matrix, coefficients)
lp = np.sum((indicator-1)*scores - np.log(1. + np.exp(-scores))) - l2_penalty*np.sum(coefficients[1:]**2)
return lp
"""
Explanation: Quiz Question: In the code above, was the intercept term regularized?
To verify the correctness of the gradient ascent algorithm, we provide a function for computing log likelihood (which we recall from the last assignment was a topic detailed in an advanced optional video, and used here for its numerical stability).
$$\ell\ell(\mathbf{w}) = \sum_{i=1}^N \Big( (\mathbf{1}[y_i = +1] - 1)\mathbf{w}^T h(\mathbf{x}_i) - \ln\left(1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))\right) \Big) \color{red}{-\lambda\|\mathbf{w}\|_2^2} $$
End of explanation
"""
from math import sqrt
def logistic_regression_with_L2(feature_matrix, sentiment, initial_coefficients, step_size, l2_penalty, max_iter):
coefficients = np.array(initial_coefficients) # make sure it's a numpy array
for itr in range(max_iter):
# Predict P(y_i = +1|x_i,w) using your predict_probability() function
# YOUR CODE HERE
predictions = predict_probability(feature_matrix, coefficients)
# Compute indicator value for (y_i = +1)
indicator = (sentiment==+1)
# Compute the errors as indicator - predictions
errors = indicator - predictions
for j in range(len(coefficients)): # loop over each coefficient
# Recall that feature_matrix[:,j] is the feature column associated with coefficients[j].
# Compute the derivative for coefficients[j]. Save it in a variable called derivative
# YOUR CODE HERE
derivative = feature_derivative_with_L2(errors, feature_matrix[:, j], coefficients[j], l2_penalty, j == 0)
# add the step size times the derivative to the current coefficient
coefficients[j] += (step_size * derivative)
# Checking whether log likelihood is increasing
if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \
or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:
lp = compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty)
print ('iteration %*d: log likelihood of observed labels = %.8f' % \
(int(np.ceil(np.log10(max_iter))), itr, lp))
return coefficients
"""
Explanation: Quiz Question: Does the term with L2 regularization increase or decrease $\ell\ell(\mathbf{w})$?
The logistic regression function looks almost like the one in the last assignment, with a minor modification to account for the L2 penalty. Fill in the code below to complete this modification.
End of explanation
"""
# run with L2 = 0
coefficients_0_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=0, max_iter=501)
# run with L2 = 4
coefficients_4_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=4, max_iter=501)
# run with L2 = 10
coefficients_10_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=10, max_iter=501)
# run with L2 = 1e2
coefficients_1e2_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e2, max_iter=501)
# run with L2 = 1e3
coefficients_1e3_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e3, max_iter=501)
# run with L2 = 1e5
coefficients_1e5_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train,
initial_coefficients=np.zeros(194),
step_size=5e-6, l2_penalty=1e5, max_iter=501)
"""
Explanation: Explore effects of L2 regularization
Now that we have written up all the pieces needed for regularized logistic regression, let's explore the benefits of using L2 regularization in analyzing sentiment for product reviews. As iterations pass, the log likelihood should increase.
Below, we train models with increasing amounts of regularization, starting with no L2 penalty, which is equivalent to our previous logistic regression implementation.
End of explanation
"""
important_words.insert(0, 'intercept')
data = np.array(important_words)
table = pd.DataFrame(columns = ['words'], data = data)
def add_coefficients_to_table(coefficients, column_name):
table[column_name] = coefficients
return table
important_words.remove('intercept')
add_coefficients_to_table(coefficients_0_penalty, 'coefficients [L2=0]')
add_coefficients_to_table(coefficients_4_penalty, 'coefficients [L2=4]')
add_coefficients_to_table(coefficients_10_penalty, 'coefficients [L2=10]')
add_coefficients_to_table(coefficients_1e2_penalty, 'coefficients [L2=1e2]')
add_coefficients_to_table(coefficients_1e3_penalty, 'coefficients [L2=1e3]')
add_coefficients_to_table(coefficients_1e5_penalty, 'coefficients [L2=1e5]')
"""
Explanation: Compare coefficients
We now compare the coefficients for each of the models that were trained above. We will create a table of features and learned coefficients associated with each of the different L2 penalty values.
Below is a simple helper function that will help us create this table.
End of explanation
"""
def make_tuple(column_name):
word_coefficient_tuples = [(word, coefficient) for word, coefficient in zip( table['words'], table[column_name])]
return word_coefficient_tuples
positive_words = list(map(lambda x: x[0], sorted(make_tuple('coefficients [L2=0]'), key=lambda x:x[1], reverse=True)[:5]))
negative_words = list(map(lambda x: x[0], sorted(make_tuple('coefficients [L2=0]'), key=lambda x:x[1], reverse=False)[:5]))
positive_words
negative_words
"""
Explanation: Using the coefficients trained with L2 penalty 0, find the 5 most positive words (with largest positive coefficients). Save them to positive_words. Similarly, find the 5 most negative words (with largest negative coefficients) and save them to negative_words.
Quiz Question. Which of the following is not listed in either positive_words or negative_words?
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 6
def make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list):
cmap_positive = plt.get_cmap('Reds')
cmap_negative = plt.get_cmap('Blues')
xx = l2_penalty_list
plt.plot(xx, [0.]*len(xx), '--', lw=1, color='k')
table_positive_words = table[table['words'].isin(positive_words)]
table_negative_words = table[table['words'].isin(negative_words)]
del table_positive_words['words']
del table_negative_words['words']
for i in range(len(positive_words)):
color = cmap_positive(0.8*((i+1)/(len(positive_words)*1.2)+0.15))
plt.plot(xx, table_positive_words[i:i+1].as_matrix().flatten(),
'-', label=positive_words[i], linewidth=4.0, color=color)
for i in range(len(negative_words)):
color = cmap_negative(0.8*((i+1)/(len(negative_words)*1.2)+0.15))
plt.plot(xx, table_negative_words[i:i+1].as_matrix().flatten(),
'-', label=negative_words[i], linewidth=4.0, color=color)
plt.legend(loc='best', ncol=3, prop={'size':16}, columnspacing=0.5)
plt.axis([1, 1e5, -1, 2])
plt.title('Coefficient path')
plt.xlabel('L2 penalty ($\lambda$)')
plt.ylabel('Coefficient value')
plt.xscale('log')
plt.rcParams.update({'font.size': 18})
plt.tight_layout()
make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list=[0, 4, 10, 1e2, 1e3, 1e5])
"""
Explanation: Let us observe the effect of increasing L2 penalty on the 10 words just selected. We provide you with a utility function to plot the coefficient path.
End of explanation
"""
def get_classification_accuracy(feature_matrix, sentiment, coefficients):
scores = np.dot(feature_matrix, coefficients)
apply_threshold = np.vectorize(lambda x: 1. if x > 0 else -1.)
predictions = apply_threshold(scores)
num_correct = (predictions == sentiment).sum()
accuracy = num_correct / len(feature_matrix)
return accuracy
"""
Explanation: Quiz Question: (True/False) All coefficients consistently get smaller in size as the L2 penalty is increased.
Quiz Question: (True/False) The relative order of coefficients is preserved as the L2 penalty is increased. (For example, if the coefficient for 'cat' was more positive than that for 'dog', this remains true as the L2 penalty increases.)
Measuring accuracy
Now, let us compute the accuracy of the classifier model. Recall that the accuracy is given by
$$
\mbox{accuracy} = \frac{\mbox{# correctly classified data points}}{\mbox{# total data points}}
$$
Recall from lecture that that the class prediction is calculated using
$$
\hat{y}_i =
\left{
\begin{array}{ll}
+1 & h(\mathbf{x}_i)^T\mathbf{w} > 0 \
-1 & h(\mathbf{x}_i)^T\mathbf{w} \leq 0 \
\end{array}
\right.
$$
Note: It is important to know that the model prediction code doesn't change even with the addition of an L2 penalty. The only thing that changes is the estimated coefficients used in this prediction.
Based on the above, we will use the same code that was used in Module 3 assignment.
End of explanation
"""
train_accuracy = {}
train_accuracy[0] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_0_penalty)
train_accuracy[4] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_4_penalty)
train_accuracy[10] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_10_penalty)
train_accuracy[1e2] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e2_penalty)
train_accuracy[1e3] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e3_penalty)
train_accuracy[1e5] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e5_penalty)
validation_accuracy = {}
validation_accuracy[0] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_0_penalty)
validation_accuracy[4] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_4_penalty)
validation_accuracy[10] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_10_penalty)
validation_accuracy[1e2] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e2_penalty)
validation_accuracy[1e3] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e3_penalty)
validation_accuracy[1e5] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e5_penalty)
# Build a simple report
for key in sorted(validation_accuracy.keys()):
print("L2 penalty = %g" % key)
print("train accuracy = %s, validation_accuracy = %s" % (train_accuracy[key], validation_accuracy[key]))
print("--------------------------------------------------------------------------------")
# Optional. Plot accuracy on training and validation sets over choice of L2 penalty.
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = 10, 6
sorted_list = sorted(train_accuracy.items(), key=lambda x:x[0])
plt.plot([p[0] for p in sorted_list], [p[1] for p in sorted_list], 'bo-', linewidth=4, label='Training accuracy')
sorted_list = sorted(validation_accuracy.items(), key=lambda x:x[0])
plt.plot([p[0] for p in sorted_list], [p[1] for p in sorted_list], 'ro-', linewidth=4, label='Validation accuracy')
plt.xscale('symlog')
plt.axis([0, 1e3, 0.78, 0.786])
plt.legend(loc='lower left')
plt.rcParams.update({'font.size': 18})
plt.tight_layout
"""
Explanation: Below, we compare the accuracy on the training data and validation data for all the models that were trained in this assignment. We first calculate the accuracy values and then build a simple report summarizing the performance for the various models.
End of explanation
"""
|
kubeflow/pipelines | components/gcp/dataproc/submit_hadoop_job/sample.ipynb | apache-2.0 | %%capture --no-stderr
!pip3 install kfp --upgrade
"""
Explanation: Name
Data preparation using Hadoop MapReduce on YARN with Cloud Dataproc
Label
Cloud Dataproc, GCP, Cloud Storage, Hadoop, YARN, Apache, MapReduce
Summary
A Kubeflow Pipeline component to prepare data by submitting an Apache Hadoop MapReduce job on Apache Hadoop YARN to Cloud Dataproc.
Details
Intended use
Use the component to run an Apache Hadoop MapReduce job as one preprocessing step in a Kubeflow Pipeline.
Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|----------|-------------|----------|-----------|-----------------|---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | | |
| region | The Dataproc region to handle the request. | No | GCPRegion | | |
| cluster_name | The name of the cluster to run the job. | No | String | | |
| main_jar_file_uri | The Hadoop Compatible Filesystem (HCFS) URI of the JAR file containing the main class to execute. | No | List | | |
| main_class | The name of the driver's main class. The JAR file that contains the class must be either in the default CLASSPATH or specified in hadoop_job.jarFileUris. | No | String | | |
| args | The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission. | Yes | List | | None |
| hadoop_job | The payload of a HadoopJob. | Yes | Dict | | None |
| job | The payload of a Dataproc job. | Yes | Dict | | None |
| wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | | 30 |
Note:
main_jar_file_uri: The examples for the files are :
- gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar
- hdfs:/tmp/test-samples/custom-wordcount.jarfile:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar
Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created job. | String
Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this guide.
* Create a new cluster.
* The component can authenticate to GCP. Refer to Authenticating Pipelines to GCP for details.
* Grant the Kubeflow user service account the role roles/dataproc.editor on the project.
Detailed description
This component creates a Hadoop job from Dataproc submit job REST API.
Follow these steps to use the component in a pipeline:
Install the Kubeflow Pipeline SDK:
End of explanation
"""
import kfp.components as comp
dataproc_submit_hadoop_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/dataproc/submit_hadoop_job/component.yaml')
help(dataproc_submit_hadoop_job_op)
"""
Explanation: Load the component using KFP SDK
End of explanation
"""
PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
OUTPUT_GCS_PATH = '<Please put your output GCS path here>'
REGION = 'us-central1'
MAIN_CLASS = 'org.apache.hadoop.examples.WordCount'
INTPUT_GCS_PATH = 'gs://ml-pipeline-playground/shakespeare1.txt'
EXPERIMENT_NAME = 'Dataproc - Submit Hadoop Job'
"""
Explanation: Sample
Note: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
Setup a Dataproc cluster
Create a new Dataproc cluster (or reuse an existing one) before running the sample code.
Prepare a Hadoop job
Upload your Hadoop JAR file to a Cloud Storage bucket. In the sample, we will use a JAR file that is preinstalled in the main cluster, so there is no need to provide main_jar_file_uri.
Here is the WordCount example source code.
To package a self-contained Hadoop MapReduce application from the source code, follow the MapReduce Tutorial.
Set sample parameters
End of explanation
"""
!gsutil cat $INTPUT_GCS_PATH
"""
Explanation: Insepct Input Data
The input file is a simple text file:
End of explanation
"""
!gsutil rm $OUTPUT_GCS_PATH/**
"""
Explanation: Clean up the existing output files (optional)
This is needed because the sample code requires the output folder to be a clean folder. To continue to run the sample, make sure that the service account of the notebook server has access to the OUTPUT_GCS_PATH.
CAUTION: This will remove all blob files under OUTPUT_GCS_PATH.
End of explanation
"""
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc submit Hadoop job pipeline',
description='Dataproc submit Hadoop job pipeline'
)
def dataproc_submit_hadoop_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
main_jar_file_uri = '',
main_class = MAIN_CLASS,
args = json.dumps([
INTPUT_GCS_PATH,
OUTPUT_GCS_PATH
]),
hadoop_job='',
job='{}',
wait_interval='30'
):
dataproc_submit_hadoop_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
main_jar_file_uri=main_jar_file_uri,
main_class=main_class,
args=args,
hadoop_job=hadoop_job,
job=job,
wait_interval=wait_interval)
"""
Explanation: Example pipeline that uses the component
End of explanation
"""
pipeline_func = dataproc_submit_hadoop_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
"""
Explanation: Compile the pipeline
End of explanation
"""
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
"""
Explanation: Submit the pipeline for execution
End of explanation
"""
!gsutil cat $OUTPUT_GCS_PATH/*
"""
Explanation: Inspect the output
The sample in the notebook will count the words in the input text and save them in sharded files. The command to inspect the output is:
End of explanation
"""
|
calee0219/Course | DataMining/hw0/hw0.ipynb | mit | #!/usr/bin/env python3
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import Imputer
from sklearn.metrics import pairwise
from pyproj import Geod
df = pd.read_csv('201707-citibike-tripdata.csv')
"""
Explanation: 2017 NCTU Data Maning HW0
0416037 李家安
Info
Group 3
Dataset: New York Citi Bike Trip Histories, first data
Task
Preprocess null data and noice
Future used for station_id, time, in_flow_count, out_flow_count
Query
average distance
top 3 frequent stations pairs
top 3 highest average out-flow/in-flow
most popular station
find insteresting query or observation in dataset
End of explanation
"""
print('column numbers: ' + str(len(df.columns)))
print('row numbers: ' + str(len(df.index)))
df
"""
Explanation: Data info
End of explanation
"""
print(df.isnull().sum().sum())
print(pd.isnull(df).sum() > 0)
df = df[~df.isin(df[df.isnull().any(axis=1)])].dropna().reset_index(drop=True)
df
"""
Explanation: Data Preprocessing
Remove NAN
觀察發現只有 gender 有出現 NAN,又我覺得 gender 無法以現有資料補全,因此選擇 drop 有 NAN 的 row
End of explanation
"""
try:
station = pd.read_csv('station.csv')
except:
station = pd.DataFrame(df[['start station id', 'start station name', 'start station latitude', 'start station longitude']])
station.columns = ['id', 'name', 'latitude', 'longitude']
tmp = pd.DataFrame(df[['end station id', 'end station name', 'end station latitude', 'end station longitude']])
tmp.columns = ['id', 'name', 'latitude', 'longitude']
station = pd.concat([station, tmp])
station = station.sort_values('id').drop_duplicates().reset_index(drop=True)
station.to_csv('station.csv', index=False)
station
"""
Explanation: station table
End of explanation
"""
try:
path = read_csv('path.csv')
except:
path = df.drop(['start station name', 'start station latitude', 'start station longitude', 'end station name', 'end station latitude', 'end station longitude'], axis=1)
path.to_csv('path.csv', index=False)
path
"""
Explanation: path table
End of explanation
"""
import bisect
try:
out_flow = pd.read_csv('out_flow.csv')
in_flow = pd.read_csv('in_flow.csv')
except:
begin = datetime.datetime(2017, 7, 1, 0, 0, 0)
end = datetime.datetime(2017, 7, 31, 23, 30, 0)
date_list = [ end - datetime.timedelta(seconds=x*60*30) for x in range(0, 1488)][::-1]
#print(date_list)
tmp = pd.DataFrame(np.zeros((len(station['id']), len(date_list))), columns=date_list)
in_flow = pd.DataFrame({'id': list(station['id'])}).join(tmp)
out_flow = pd.DataFrame({'id': list(station['id'])}).join(tmp)
#in_flow
get_idx = {}
for idx, row in station.iterrows():
get_idx[row.iloc[0]] = idx
for idx, row in path.iterrows():
date = datetime.datetime.strptime(row.iloc[1], "%Y-%m-%d %H:%M:%S")
out_num = bisect.bisect_right(date_list, date)
out_flow.iloc[get_idx[row.iloc[3]], out_num] += 1
date = datetime.datetime.strptime(row.iloc[2], "%Y-%m-%d %H:%M:%S")
in_num = bisect.bisect_right(date_list, date)
in_flow.iloc[get_idx[row.iloc[4]], in_num] += 1
out_flow.to_csv('out_flow.csv', index=False)
in_flow.to_csv('in_flow.csv', index=False)
print('in-flow')
print(in_flow)
print('out-flow')
print(out_flow)
"""
Explanation: in / out flow table
Get in / out flow count
End of explanation
"""
from pyproj import Geod
wgs84_geod = Geod(ellps='WGS84')
def Distance(lat1,lon1,lat2,lon2):
az12,az21,dist = wgs84_geod.inv(lon1,lat1,lon2,lat2)
return dist
try:
dist = pd.read_csv('dist.csv')
except:
dist = pd.DataFrame(columns=['begin', 'end', 'dist'])
for idx1, row1 in station.iterrows():
for idx2, row2 in station.iterrows():
dist = dist.append(pd.Series([row1.iloc[0], row2.iloc[0], Distance(row1.iloc[2], row1.iloc[3], row2.iloc[2], row2.iloc[3])], index=['begin','end', 'dist']), ignore_index=True)
#print(row1.iloc[0], row2.iloc[0], Distance(row1.iloc[2], row1.iloc[3], row2.iloc[2], row2.iloc[3]))
#print(dist)
dist.to_csv('dist.csv', index=False)
dist
sz = station.shape[0]
n = sz * (sz-1) / 2
ans = 0
for idx, row in dist.iterrows():
if row.iloc[0] == row.iloc[1]:
continue
ans += row.iloc[2] / n
print('average distance:', ans, 'm')
"""
Explanation: Query
1. average distance
End of explanation
"""
from collections import defaultdict
import datetime
weekday = dict()
weekday = defaultdict(lambda: 0, weekday)
weekend = dict()
weekend = defaultdict(lambda: 0, weekend)
for idx, row in path.iterrows():
if datetime.datetime.strptime(row.iloc[1], "%Y-%m-%d %H:%M:%S").isoweekday() > 5: # weekend
weekend[(row.iloc[3], row.iloc[4])] += 1
else:
weekday[(row.iloc[3], row.iloc[4])] += 1
top_weekday = sorted(weekday, key=weekday.get)[::-1][:3]
top_weekend = sorted(weekend, key=weekend.get)[::-1][:3]
print(top_weekday)
print(top_weekend)
"""
Explanation: 2. top 3 frequent stations pairs on weekday / weekend
End of explanation
"""
from collections import defaultdict
import datetime
weekday_in = dict()
weekday_in = defaultdict(lambda: 0, weekday_in)
weekend_in = dict()
weekend_in = defaultdict(lambda: 0, weekend_in)
weekday_out = dict()
weekday_out = defaultdict(lambda: 0, weekday_out)
weekend_out = dict()
weekend_out = defaultdict(lambda: 0, weekend_out)
allDay_in = dict()
allDay_in = defaultdict(lambda: 0, allDay_in)
allDay_out = dict()
allDay_out = defaultdict(lambda: 0, allDay_out)
allDay = dict()
allDay = defaultdict(lambda: 0, allDay)
for idx, row in path.iterrows():
if 'freq' not in station.columns:
allDay_in[row.iloc[4]] += 1
allDay_out[row.iloc[3]] += 1
allDay[row.iloc[4]] += 1
allDay[row.iloc[3]] += 1
if datetime.datetime.strptime(row.iloc[1], "%Y-%m-%d %H:%M:%S").isoweekday() > 5: # weekend
weekend_out[row.iloc[3]] += 1
weekend_in[row.iloc[4]] += 1
else:
weekday_out[row.iloc[3]] += 1
weekday_in[row.iloc[4]] += 1
if 'freq' not in station.columns:
station['in'] = station['id'].map(allDay_in)
station['out'] = station['id'].map(allDay_out)
station['freq'] = station['id'].map(allDay)
station.to_csv('station.csv', index=False)
top_weekday_in = sorted(weekday_in, key=weekday_in.get)[::-1][:3]
top_weekday_out = sorted(weekday_out, key=weekday_out.get)[::-1][:3]
top_weekend_in = sorted(weekend_in, key=weekend_in.get)[::-1][:3]
top_weekend_out = sorted(weekend_out, key=weekend_out.get)[::-1][:3]
print('weekday in_flow:', top_weekday_in)
print('weekday out_flow:', top_weekday_in)
print('weekend in_flow:', top_weekend_in)
print('weekend out_flow:', top_weekend_in)
"""
Explanation: Weekday top 3
432 to 3263
3263 to 432
519 to 498
Weekend top 3
3254 to 3182
3182 to 3182
3182 to 3254
3. top 3 frequent stations with highest average out-flow / in-flow on weekday / weekend
End of explanation
"""
import matplotlib.pyplot as plt
most = station.nlargest(1, 'freq').index[0]
in_most = in_flow.iloc[most][1:]
out_most = out_flow.iloc[most][1:]
plt.figure()
axes = plt.gca()
axes.set_ylim([-10,130])
out_most.plot(figsize=(15, 5))
in_most.plot(figsize=(15, 5))
plt.show()
"""
Explanation: 4. most popular station
a. line-chart for in-flow / out-flow
drow line chart
End of explanation
"""
from sklearn.metrics import pairwise
pairwise.pairwise_distances(np.array([in_most.tolist(), out_most.tolist()]), metric='minkowski', n_jobs=4, p=2)[0][1]
"""
Explanation: b. Calculate the distance function
End of explanation
"""
plt.figure()
axes = plt.gca()
axes.set_ylim([-50,120])
in_most.sub(in_most.mean()).plot(figsize=(15, 5))
out_most.sub(out_most.mean()).plot(figsize=(15, 5))
plt.show()
# print(in_most.mean())
# print(out_most.mean())
pairwise.pairwise_distances(np.array([in_most.sub(in_most.mean()).tolist(), out_most.sub(out_most.mean()).tolist()]), metric='minkowski', n_jobs=4, p=2)[0][1]
"""
Explanation: c. in / out flow -mean line chart and distance
End of explanation
"""
plt.figure()
axes = plt.gca()
axes.set_ylim([-5,10])
in_most.sub(in_most.mean()).divide(in_most.std()).plot(figsize=(15, 5))
out_most.sub(out_most.mean()).divide(out_most.std()).plot(figsize=(15, 5))
plt.show()
#print(in_most.mean(), in_most.std())
#print(out_most.mean(), out_most.std())
pairwise.pairwise_distances(np.array([in_most.sub(in_most.mean()).divide(in_most.std()).tolist(), out_most.sub(out_most.mean()).divide(out_most.std()).tolist()]), metric='minkowski', n_jobs=4, p=2)[0][1]
"""
Explanation: d. in / out flow -mean/std lin chart and distance
End of explanation
"""
from sklearn import linear_model
import matplotlib.pyplot as plt
reg_in = linear_model.LinearRegression(normalize=True)
reg_out = linear_model.LinearRegression(normalize=True)
y = [i for i in range(len(in_most))]
in_X = [[i] for i in in_most.tolist()]
out_X = [[i] for i in out_most.tolist()]
reg_in.fit(X=in_X, y=y)
reg_out.fit(X=out_X, y=y)
in_df = pd.DataFrame(reg_in.predict(in_X)).T
in_df.columns = in_most.keys()
out_df = pd.DataFrame(reg_out.predict(out_X)).T
out_df.columns = out_most.keys()
in_s = in_df.iloc[0][:]
out_s = out_df.iloc[0][:]
plt.figure()
in_s.plot(figsize=(15, 5))
out_s.plot(figsize=(15, 5))
plt.show()
pairwise.pairwise_distances(np.array([in_s.tolist(), out_s.tolist()]), metric='minkowski', n_jobs=4, p=2)[0][1]
"""
Explanation: e. in / out flow linear regression line chart and distance
End of explanation
"""
from sklearn.neighbors.kde import KernelDensity
in_tmp = [[i, j] for i,j in enumerate(in_most)]
out_tmp = [[j, i] for i,j in enumerate(out_most)]
kde_in = KernelDensity(kernel='gaussian', bandwidth=30).fit(in_tmp)
kde_out = KernelDensity(kernel='cosine', bandwidth=30).fit(out_tmp)
in_arr = kde_in.score_samples(in_tmp)
out_arr = kde_out.score_samples(out_tmp)
in_ss = pd.Series(in_arr.tolist(), index=in_most.keys())
out_ss = pd.Series(out_arr.tolist(), index=in_most.keys())
#print(in_ss)
plt.figure()
# axes = plt.gca()
# axes.set_ylim([0,1])
in_ss.plot(figsize=(15, 5))
out_ss.plot(figsize=(15, 5))
plt.show()
"""
Explanation: f. smoothing
End of explanation
"""
|
gigjozsa/HI_analysis_course | chapter_00_preface/00_appendix.ipynb | gpl-2.0 | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
"""
Explanation: Content
Glossary
0. Preface
Previous: 1. Preface: References and further reading
0. Preface: Appendix<a id='preface:sec:appendix'></a>
0. Preface: Appendix: Images<a id='preface:sec:appendix_images'></a>
End of explanation
"""
from wand.image import Image as WImage
img = WImage(filename='00_figures/bla.eps')
img
"""
Explanation: Here you find an example to insert an image.
<img src='00_figures/bla.jpg' width=70%>
<div align="center">**Figure 0.A1**: Spherical triangle $STZ$
</div>
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.23/_downloads/b96d98f7c704193a3ede176aaf9433d2/85_brainstorm_phantom_ctf.ipynb | bsd-3-clause | # Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import fit_dipole
from mne.datasets.brainstorm import bst_phantom_ctf
from mne.io import read_raw_ctf
print(__doc__)
"""
Explanation: Brainstorm CTF phantom dataset tutorial
Here we compute the evoked from raw for the Brainstorm CTF phantom
tutorial dataset. For comparison, see :footcite:TadelEtAl2011 and:
https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf
References
.. footbibliography::
End of explanation
"""
data_path = bst_phantom_ctf.data_path(verbose=True)
# Switch to these to use the higher-SNR data:
# raw_path = op.join(data_path, 'phantom_200uA_20150709_01.ds')
# dip_freq = 7.
raw_path = op.join(data_path, 'phantom_20uA_20150603_03.ds')
dip_freq = 23.
erm_path = op.join(data_path, 'emptyroom_20150709_01.ds')
raw = read_raw_ctf(raw_path, preload=True)
"""
Explanation: The data were collected with a CTF system at 2400 Hz.
End of explanation
"""
sinusoid, times = raw[raw.ch_names.index('HDAC006-4408')]
plt.figure()
plt.plot(times[times < 1.], sinusoid.T[times < 1.])
"""
Explanation: The sinusoidal signal is generated on channel HDAC006, so we can use
that to obtain precise timing.
End of explanation
"""
events = np.where(np.diff(sinusoid > 0.5) > 0)[1] + raw.first_samp
events = np.vstack((events, np.zeros_like(events), np.ones_like(events))).T
"""
Explanation: Let's create some events using this signal by thresholding the sinusoid.
End of explanation
"""
raw.plot()
"""
Explanation: The CTF software compensation works reasonably well:
End of explanation
"""
raw.apply_gradient_compensation(0) # must un-do software compensation first
mf_kwargs = dict(origin=(0., 0., 0.), st_duration=10.)
raw = mne.preprocessing.maxwell_filter(raw, **mf_kwargs)
raw.plot()
"""
Explanation: But here we can get slightly better noise suppression, lower localization
bias, and a better dipole goodness of fit with spatio-temporal (tSSS)
Maxwell filtering:
End of explanation
"""
tmin = -0.5 / dip_freq
tmax = -tmin
epochs = mne.Epochs(raw, events, event_id=1, tmin=tmin, tmax=tmax,
baseline=(None, None))
evoked = epochs.average()
evoked.plot(time_unit='s')
evoked.crop(0., 0.)
"""
Explanation: Our choice of tmin and tmax should capture exactly one cycle, so
we can make the unusual choice of baselining using the entire epoch
when creating our evoked data. We also then crop to a single time point
(@t=0) because this is a peak in our signal.
End of explanation
"""
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.08)
mne.viz.plot_alignment(raw.info, subject='sample',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain'])
del raw, epochs
"""
Explanation: Let's use a sphere head geometry model <eeg_sphere_model>
and let's see the coordinate alignment and the sphere location.
End of explanation
"""
raw_erm = read_raw_ctf(erm_path).apply_gradient_compensation(0)
raw_erm = mne.preprocessing.maxwell_filter(raw_erm, coord_frame='meg',
**mf_kwargs)
cov = mne.compute_raw_covariance(raw_erm)
del raw_erm
dip, residual = fit_dipole(evoked, cov, sphere, verbose=True)
"""
Explanation: To do a dipole fit, let's use the covariance provided by the empty room
recording.
End of explanation
"""
expected_pos = np.array([18., 0., 49.])
diff = np.sqrt(np.sum((dip.pos[0] * 1000 - expected_pos) ** 2))
print('Actual pos: %s mm' % np.array_str(expected_pos, precision=1))
print('Estimated pos: %s mm' % np.array_str(dip.pos[0] * 1000, precision=1))
print('Difference: %0.1f mm' % diff)
print('Amplitude: %0.1f nAm' % (1e9 * dip.amplitude[0]))
print('GOF: %0.1f %%' % dip.gof[0])
"""
Explanation: Compare the actual position with the estimated one.
End of explanation
"""
|
kerimlcr/ab2017-dpyo | ornek/osmnx/osmnx-0.3/examples/04-example-simplify-network.ipynb | gpl-3.0 | import osmnx as ox
%matplotlib inline
ox.config(log_file=True, log_console=True, use_cache=True)
"""
Explanation: Use OSMnx to topologically correct and simplify street networks
Overview of OSMnx
GitHub repo
Examples, demos, tutorials
End of explanation
"""
# create a network around some (lat, lon) point and plot it
location_point = (33.299896, -111.831638)
G = ox.graph_from_point(location_point, distance=500, simplify=False)
fig, ax = ox.plot_graph(G, node_color='b', node_zorder=3)
# show which nodes we'd remove if we simplify it
nc = ['b' if ox.is_endpoint(G, node) else 'r' for node in G.nodes()]
fig, ax = ox.plot_graph(G, node_color=nc, node_zorder=3)
# simplify the network
G2 = G.copy()
G2 = ox.simplify_graph(G2)
# plot the simplified network and show any loops in magenta
loops = [edge[0] for edge in G2.selfloop_edges()]
nc = ['m' if node in loops else 'b' for node in G2.nodes()]
fig, ax = ox.plot_graph(G2, node_color=nc, node_zorder=3)
# turn off strict mode and see what nodes we'd remove
nc = ['b' if ox.is_endpoint(G, node, strict=False) else 'r' for node in G.nodes()]
fig, ax = ox.plot_graph(G, node_color=nc, node_zorder=3)
# simplify network with strict mode turned off
G3 = G.copy()
G3 = ox.simplify_graph(G3, strict=False)
fig, ax = ox.plot_graph(G3, node_color='b', node_zorder=3)
# show the strictly simplified network with edges colored by network length
ec = ox.get_edge_colors_by_attr(G2, attr='length')
fig, ax = ox.plot_graph(G2, node_color='w', node_edgecolor='k', node_size=30, node_zorder=3, edge_color=ec, edge_linewidth=3)
# plot same network again (topologically isomorphic), this time ignoring the edges' spatial geometry
ec = ox.get_edge_colors_by_attr(G2, attr='length')
fig, ax = ox.plot_graph(G2, node_color='w', node_edgecolor='k', node_size=30, node_zorder=3,
edge_color=ec, edge_linewidth=3, edge_alpha=1, use_geom=False)
# highlight all parallel edges
ec = ['b' if key == 0 else 'r' for u, v, key in G2.edges(keys=True)]
fig, ax = ox.plot_graph(G2, node_color='w', node_edgecolor='k', node_size=20, node_zorder=3, edge_color=ec, edge_linewidth=2)
"""
Explanation: Create a street network and then simplify it
End of explanation
"""
# get some bbox
bbox = ox.bbox_from_point((45.518698, -122.679964), distance=300)
north, south, east, west = bbox
G = ox.graph_from_bbox(north, south, east, west, network_type='drive', clean_periphery=False)
fig, ax = ox.plot_graph(ox.project_graph(G))
# the node degree distribution for this graph has many false cul-de-sacs
k = dict(G.degree())
{n:list(k.values()).count(n) for n in range(max(k.values()) + 1)}
"""
Explanation: Cleaning up the periphery of the network
This is related to simplification. OSMnx by default (with clean_periphery parameter equal to True) buffers the area you request by 0.5km, and then retrieves the street network within this larger, buffered area. Then it simplifies the topology so that nodes represent intersections of streets (rather than including all the interstitial OSM nodes). Then it calculates the (undirected) degree of each node in this larger network. Next it truncates this network by the actual area you requested (either by bounding box, or by polygon). Finally it saves a dictionary of node degree values as a graph attribute.
This has two primary benefits. First, it cleans up stray false edges around the periphery. If clean_periphery=False, peripheral non-intersection nodes within the requested area appear to be cul-de-sacs, as the rest of the edge leading to an intersection outside the area is ignored. If clean_periphery=True, the larger graph is first created, allowing simplification of such edges to their true intersections, allowing their entirety to be pruned after truncating down to the actual requested area. Second, it gives accurate node degrees by both a) counting node neighbors even if they fall outside the retained network (so you don't claim a degree-4 node is degree-2 because only 2 of its neighbors lie within the area), and b) not counting all those stray false edges' terminus nodes as cul-de-sacs that otherwise grossly inflate the count of nodes with degree=1, even though these nodes are really just interstitial nodes in the middle of a chopped-off street segment between intersections.
See two examples below.
End of explanation
"""
G = ox.graph_from_bbox(north, south, east, west, network_type='drive')
fig, ax = ox.plot_graph(ox.project_graph(G))
# the streets_per_node distribution for this cleaned up graph is more accurate
# dict keys = count of streets emanating from the node (ie, intersections and dead-ends)
# dict vals = number of nodes with that count
k = G.graph['streets_per_node']
{n:list(k.values()).count(n) for n in range(max(k.values()) + 1)}
"""
Explanation: Above, notice all the peripheral stray edge stubs. Below, notice these are cleaned up and that the node degrees are accurate with regards to the wider street network that may extend beyond the limits of the requested area.
End of explanation
"""
location_point = (33.299896, -111.831638)
G = ox.graph_from_point(location_point, distance=500, simplify=True)
fig, ax = ox.plot_graph(G, node_color='b', node_zorder=3)
"""
Explanation: A final example. Compare the network below to the ones in the section above. It has the stray peripheral edges cleaned up. Also notice toward the bottom left, two interstitial nodes remain in that east-west street. Why? These are actually intersections, but their (southbound) edges were removed because these edges' next intersections were south of the requested area's boundaries. However, OSMnx correctly kept these nodes in the graph because they are in fact intersections and should be counted in measures of intersection density, etc.
End of explanation
"""
location_point = (37.791427, -122.410018)
G = ox.graph_from_point(location_point, distance=1500, network_type='drive', simplify=False)
nc = ['b' if ox.is_endpoint(G, node) else 'r' for node in G.nodes()]
fig, ax = ox.plot_graph(G, node_color=nc, node_zorder=3)
G2 = G.copy()
G2 = ox.simplify_graph(G2)
loops = [edge[0] for edge in G2.selfloop_edges()]
nc = ['m' if node in loops else 'b' for node in G2.nodes()]
fig, ax = ox.plot_graph(G2, node_color=nc, node_zorder=3)
"""
Explanation: Create a 'drivable' street network for downtown SF then simplify it
End of explanation
"""
G = ox.graph_from_address('N. Sicily Pl., Chandler, Arizona',
distance=800, network_type='drive', simplify=False)
nc = ['b' if ox.is_endpoint(G, node) else 'r' for node in G.nodes()]
fig, ax = ox.plot_graph(G, node_color=nc, node_zorder=3)
G2 = G.copy()
G2 = ox.simplify_graph(G2)
fig, ax = ox.plot_graph(G2, node_color='b', node_zorder=3)
# save as ESRI shapefile
ox.save_graph_shapefile(G2, filename='arizona')
# highlight one-way streets
ec = ['r' if data['oneway'] else 'b' for u, v, key, data in G2.edges(keys=True, data=True)]
fig, ax = ox.plot_graph(G2, node_color='w', node_edgecolor='k', node_size=5, node_zorder=3,
edge_color=ec, edge_linewidth=1.5, edge_alpha=0.5)
"""
Explanation: Create another suburban street network, then simplify it
End of explanation
"""
|
Gonzalo933/portfolio | blog/content/K_means_blog.ipynb | mit | %matplotlib inline
#loading the dataset
import numpy as np
import pandas as pd
import seaborn as sns # Nice plots
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
from scipy.spatial.distance import cdist
df = pd.read_csv('old_faithful.csv')
df.round(2) # Round all data to two decimal places
df.drop(df.columns[0], axis=1, inplace=True) # Drop 1st column, we now have: 'eruptions' and 'waiting'
df = (df - df.mean()) / df.std() # Standarize the data (make each variable zero mean and unit standard deviation)
df.plot(x='eruptions', y='waiting', kind='scatter')
plt.show()
"""
Explanation: <a id='k-means'></a>
I want to start this little machine learning blog with a simple but effective algorithm to identify clusters in data. For those that are not familiar with the concept that means finding groups that could be used for finding relations in all the observations that belongs to a cluster or just to find the number of different clusters in some data, for example, in a social network.
As this is the first post I will explain some things about notations, that are also on the introduction:
All vectorial variables are written in bold ex: $ \mathbf{x} , \mathbf{y} $.
All vectors are always column vectors (unless otherwise stated) so a $ x $ vector of size $ N $ would be written as:
$\mathbf{x} = \begin{bmatrix} x_1 \ x_2 \ \vdots\ x_n \end{bmatrix} $ or $ \mathbf{x} = \begin{bmatrix} x_1, \ x_2,\ \dots, \ x_n \end{bmatrix}^T $
Matrices are written both in bold and uppercase, ex: $ \mathbf{X}, \mathbf{Y} $
so if the size of $ \mathbf{X} $ is $N\times D$:
$ \mathbf{X} = \begin{bmatrix} x_{11} & \dots & x_{1d} \ x_{21} & \dots & x_{2d} \ \vdots & \ddots & \vdots \ x_{n1} & \dots & x_{nd}\end{bmatrix} $
K-means is an algorithm to partition $ N $ observations of a random D-dimensional variable $ \mathbf{X}$ (which we will refer as the dataset) into $ \mathit{K} $ clusters, the value of $ \mathit{K} $ is given to us. We can think of a cluster as a group of points (from the dataset) whose distance between each other are small compared to distances to points outside of the cluster.
The Dataset
Lets study first the dataset we are going to work with. I chose one called old faithful, and if you ever read a book about machine learning you will study it deeply.
End of explanation
"""
X = df.as_matrix()
N = X.shape[0]
K = 2
D = X.shape[1] # number of dimensions of the problem
"""
Explanation: I'm using pandas to work with the dataset.
As you can see we are working in two dimensions (waiting time and eruptions), the code above rounds all the data to two decimals places, then "deletes" the first column (we don't need it, is just an index) and last it standardizes the data.
and we can already clearly see two clusters. If you don't see the clusters that I'm talking about I can help you with my ultra MS paint™ skills.
Formalizing the dataset
Lets start by naming some of the variables that we are going to use.
We are working in two dimensions right? and we stated that we are working with a dataset $ \mathbf{X} $ of size $ N \times D $. Here we have our first variables:
$ \mathbf{X} $ is the dataset matrix, one row for each observation, and each observation has two values (waiting and eruptions) so we got that the size of the matrix is:
$ N = 272 $ (trust me on this one) $\times$ $ D = 2 $
Now we need to choose in how many cluster we want to divide the data, we already saw that there are probably two clusters, so lets assume that, set $ K = 2 $.
we can now add that to the code
End of explanation
"""
# Error function
def J(distances, labels):
error = 0
for n, distance in enumerate(distances):
error += distance[labels[n]] # we could have made it even more vectorial, but it's not worth it
return error
"""
Explanation: $\newcommand{\bs}[1]{\boldsymbol{#1}}$
$\newcommand{\Epsilon}[0]{\mathcal{E}}$
We are still missing some things before we can start. If we are going to create clusters we need to define the centers of the clusters, for example we can store one of the centers in a variable $ \bs{\mu} $ that would need to be of size D. If we have K clusters then our variable $ \bs{\mu} $ (it's uppercase even if it doesn't look like it) would be of size $ K\times D $.
We also need something to describe to which cluster is each point assigned. We can do that with another matrix $R$ of size $N\times K$, the matrix means something like this: We have a row for each of the points ($N$) and each row has $1$ in the position of the cluster that the point is assigned to and $0$ on the rest of the positions.
Here is an example to understand it better:
$$ \underset{N\times K}{\mathbf{R}} = \begin{bmatrix}
1 & 0\
0 & 1\
1 & 0 \
\vdots & \vdots
\end{bmatrix} $$
It says that the first datapoint is assigned to the first cluster, the second point to the second one and the third one to the first cluster. (note that each point can only be assigned to one and only one of the clusters).
Math time
Now we need and algorithm that give us the best choices for both the assignments and the centers of the clusters but
if we expect to implement an algorithm we are going to need math, don't worry this ones aren't complicated.
We can start with a function $J()$ that tell us how good or how bad our algorithm is doing, we call that the objective funcion or the error function and we can use something as simple as this:
$$ J = \sum_{n=1}^{N}\sum_{k=1}^{K} r_{nk}|| \bs{x_n} - \bs{\mu_k} ||^2 \tag{1}$$
This represents the sum of the squares of the distances of each point to the center of the assigned cluster.
We can break the equation to understand it better (or you can skip the exaplanation if you feel confident).
we are going to loop through all the points and all the clusters that means that if $r_{nk} = 1 $ we are in a point $ x_n $ that belongs to the cluster $ k $ and we calculate the distance from $ \bs{x_n} $ to $ \bs{\mu_k} $ and we add that value to our total error (we are in a summation). If $r_{nk} = 0 $ the point doesn't belong to the cluster and we don't even need to calculate anything, we sum 0 to the error.
Okay so now we have some function that we can call with some parameters and give us a number that tell us if we are in the correct path to solve the problem, we obviously want a small error, so we want to minimize $J$.
take a look at $ (1) $ again, we see that it depends on $\bs{X}, \mu $ and $\bs{R}$, and of that list we can only change $ R $ and $ \mu $ so our goal now is to find the values of those variables that minimize $J$, we can do that in two phases, first minimizing $J$ with respect to $\bs{R}$ fixing the other parameters and then with respect to $\mu $ fixing the other parameters.
Optimize $R$
because $(1)$ is a linear function of $r_{nk}$ we can optimize for each datapoint esparately by doing this:
$$
r_{nk} =
\begin{cases}
1 & \text{if $k$ = arg $min_j || \bs{x_n} - \bs{\mu_j} ||^2 $} \
0 & \text{otherwise}
\end{cases}
$$
intuitively that means that we calculate all distances to all centers of the $ n^{th} $ point and assign the $n^{th}$ data point to the closest cluster centre (the one with less distance $|| \bs{x_n} - \bs{\mu_j} ||^2$.
now to optimize the cluster centres $\bs{\mu_k} $ we can set the derivative of $(1)$ to zero with respect to $\bs{\mu_k} $ and we have
$$
2\sum_{n=1}^{N}r_{nk}(\bs{x_n} - \bs{\mu_k}) = 0
$$
$$
\bs{\mu_k} = \frac{\sum_{n=1}^{N} r_{nk}\bs{x_n}}{\sum_{n=1}^{N}r_{nk}} \tag{2}
$$
eq $(2)$ just says:
$$
\bs{\mu_k} = \frac{\text{sum of the values of points assigned to cluster k}}{\text{number of points assigned to cluster k}}
$$
sounds familiar?, well that is why is called K-means. Now we can start with the fun part and code all of this.
Note: the trick to make an algortihm fast in machine learning is to vectorize all the operations instead of looping through the values. we could code the eq. $(1)$ as two for loops but we will be making our algorithm $O(N^2)$ if we code it the way is done below the time complexity is $O(N)$ but it is equivalent. we also converted the matrix $R$ in a vector called labels of size $N$ which each value can take any number from $ 0 $ to $ K - 1$
End of explanation
"""
plt.rcParams["figure.figsize"] = [12, 6] #Bigger images
MU = np.array([[-2.0, 2.0], [1.5, -2.5]])
max_iterations = 7
for _ in range(max_iterations):
# Calculate distance from each datapoint to each cluster center
squared_distances = cdist(X, MU, 'sqeuclidean') # NxK matrix
labels = np.argmin(squared_distances, axis=1) # Nx1 and each value tell us which index is associated which each datapoint
# calculate the means of all datapoints assigned to each cluster (Kx1)
for k in range(K):
# points_of_cluster k -> X[labels == k]
# new centers
MU[k] = np.mean(X[labels == k], axis=0) #out=MU[k]
# Error should be decreasing
print("Current error: {0}".format(J(squared_distances, labels)))
# end of iteration, now we have labels of which cluster the datapoint is associated and the new cluster locations (MU)
#plt.figure()
df = df.assign(labels=labels)
df.plot.scatter(x='eruptions', y='waiting', c='labels', colormap=cmx.coolwarm, colorbar=False)
plt.scatter(MU[:,0], MU[:,1], marker='o', linewidths=5, color=['c', '#DA0AF0'])
plt.show()
"""
Explanation: We can start initializing the cluster centers at random (or better, choose some acceptable centers).
We then calculate the distances matrix squared_distances (corresponding to $|| \bs{x_n} - \bs{\mu_k} ||^2 $ and to which cluster are closer, called labels.
I also printed the error (this is optative but helps so we know if we are doing it right, because it should be decreasing in each iteration).
This is the final code then:
End of explanation
"""
|
hannorein/rebound | ipython_examples/Units.ipynb | gpl-3.0 | import rebound
import math
sim = rebound.Simulation()
sim.G = 6.674e-11
"""
Explanation: Unit convenience functions
For convenience, REBOUND offers simple functionality for converting units. One implicitly sets the units for the simulation through the values used for the initial conditions, but one has to set the appropriate value for the gravitational constant G, and sometimes it is convenient to get the output in different units.
The default value for G is 1, so one can:
a) use units for the initial conditions where G=1 (e.g., AU, $M_\odot$, yr/$2\pi$)
b) set G manually to the value appropriate for the adopted initial conditions, e.g., to use SI units,
End of explanation
"""
sim.units = ('yr', 'AU', 'Msun')
print("G = {0}.".format(sim.G))
"""
Explanation: c) set rebound.units:
End of explanation
"""
sim.add('Earth')
ps = sim.particles
import math
print("v = {0}".format(math.sqrt(ps[0].vx**2 + ps[0].vy**2 + ps[0].vz**2)))
"""
Explanation: When you set the units, REBOUND converts G to the appropriate value for the units passed (must pass exactly 3 units for mass length and time, but they can be in any order). Note that if you are interested in high precision, you have to be quite particular about the exact units.
As an aside, the reason why G differs from $4\pi^2 \approx 39.47841760435743$ is mostly that we follow the convention of defining a "year" as 365.25 days (a Julian year), whereas the Earth's sidereal orbital period is closer to 365.256 days (and at even finer level, Venus and Mercury modify the orbital period). G would only equal $4\pi^2$ in units where a "year" was exactly equal to one orbital period at $1 AU$ around a $1 M_\odot$ star.
Adding particles
If you use sim.units at all, you need to set the units before adding any particles. You can then add particles in any of the ways described in WHFast.ipynb. You can also add particles drawing from the horizons database (see Churyumov-Gerasimenko.ipynb). If you don't set the units ahead of time, HORIZONS will return initial conditions in units of AU, $M_\odot$ and yrs/$2\pi$, such that G=1.
Above we switched to units of AU, $M_\odot$ and yrs, so when we add Earth:
End of explanation
"""
sim = rebound.Simulation()
sim.units = ('m', 's', 'kg')
sim.add(m=1.99e30)
sim.add(m=5.97e24,a=1.5e11)
sim.convert_particle_units('AU', 'yr', 'Msun')
sim.status()
"""
Explanation: we see that the velocity is correctly set to approximately $2\pi$ AU/yr.
If you'd like to enter the initial conditions in one set of units, and then use a different set for the simulation, you can use the sim.convert_particle_units function, which converts both the initial conditions and G. Since we added Earth above, we restart with a new Simulation instance; otherwise we'll get an error saying that we can't set the units with particles already loaded:
End of explanation
"""
sim = rebound.Simulation()
print("G = {0}".format(sim.G))
sim.add(m=1.99e30)
sim.add(m=5.97e24,a=1.5e11)
sim.status()
"""
Explanation: We first set the units to SI, added (approximate values for) the Sun and Earth in these units, and switched to AU, yr, $M_\odot$. You can see that the particle states were converted correctly--the Sun has a mass of about 1, and the Earth has a distance of about 1.
Note that when you pass orbital elements to sim.add, you must make sure G is set correctly ahead of time (through either 3 of the methods above), since it will use the value of sim.G to generate the velocities:
End of explanation
"""
|
statsmodels/statsmodels.github.io | v0.12.2/examples/notebooks/generated/statespace_tvpvar_mcmc_cfa.ipynb | bsd-3-clause | %matplotlib inline
from importlib import reload
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy.stats import invwishart, invgamma
# Get the macro dataset
dta = sm.datasets.macrodata.load_pandas().data
dta.index = pd.date_range('1959Q1', '2009Q3', freq='QS')
"""
Explanation: TVP-VAR, MCMC, and sparse simulation smoothing
End of explanation
"""
# Construct a local level model for inflation
mod = sm.tsa.UnobservedComponents(dta.infl, 'llevel')
# Fit the model's parameters (sigma2_varepsilon and sigma2_eta)
# via maximum likelihood
res = mod.fit()
print(res.params)
# Create simulation smoother objects
sim_kfs = mod.simulation_smoother() # default method is KFS
sim_cfa = mod.simulation_smoother(method='cfa') # can specify CFA method
"""
Explanation: Background
Bayesian analysis of linear Gaussian state space models via Markov chain Monte Carlo (MCMC) methods has become both commonplace and relatively straightforward in recent years, due especially to advances in sampling from the joint posterior of the unobserved state vector conditional on the data and model parameters (see especially Carter and Kohn (1994), de Jong and Shephard (1995), and Durbin and Koopman (2002)). This is particularly useful for Gibbs sampling MCMC approaches.
While these procedures make use of the forward/backward application of the recursive Kalman filter and smoother, another recent line of research takes a different approach and constructs the posterior joint distribution of the entire vector of states at once - see in particular Chan and Jeliazkov (2009) for an econometric time series treatment and McCausland et al. (2011) for a more general survey. In particular, the posterior mean and precision matrix are constructed explicitly, with the latter a sparse band matrix. Advantage is then taken of efficient algorithms for Cholesky factorization of sparse band matrices; this reduces memory costs and can improve performance. Following McCausland et al. (2011), we refer to this method as the "Cholesky Factor Algorithm" (CFA) approach.
The CFA-based simulation smoother has some advantages and some drawbacks compared to that based on the more typical Kalman filter and smoother (KFS).
Advantages of CFA:
Derivation of the joint posterior distribution is relatively straightforward and easy to understand.
In some cases can be both faster and less memory-intensive than the KFS approach
In the Appendix at the end of this notebook, we briefly discuss the performance of the two simulation smoothers for the TVP-VAR model. In summary: simple tests on a single machine suggest that for the TVP-VAR model, the CFA and KFS implementations in Statsmodels have about the same runtimes, while both implementations are about twice as fast as the replication code, written in Matlab, provided by Chan and Jeliazkov (2009).
Drawbacks of CFA:
The main drawback is that this method has not (at least so far) reached the generality of the KFS approach. For example:
It can not be used with models that have reduced-rank error terms in the observation or state equations.
One implication of this is that the typical state space model trick of including identities in the state equation to accommodate, for example, higher-order lags in autoregressive models is not applicable. These models can still be handled by the CFA approach, but at the cost of requiring a slightly different implementation for each lag that is included.
As an example, standard ways of representing ARMA and VARMA processes in state space form do include identities in the observation and/or state equations, and so the basic formulas presented in Chan and Jeliazkov (2009) do not apply immediately to these models.
Less flexibility is available in the state initialization / prior.
Implementation in Statsmodels
A CFA simulation smoother along the lines of the basic formulas presented in Chan and Jeliazkov (2009) has been implemented in Statsmodels.
Notes:
Therefore, the CFA simulation smoother in Statsmodels so-far only supports the case that the state transition is truly a first-order Markov process (i.e. it does not support a p-th order Markov process that has been stacked using identities into a first-order process).
By contrast, the KFS smoother in Statsmodels is fully general any can be used for any state space model, including those with stacked p-th order Markov processes or other identities in the observation and state equations.
Either a KFS or the CFA simulation smoothers can be constructed from a state space model using the simulation_smoother method. To show the basic idea, we first consider a simple example.
Local level model
A local level model decomposes an observed series $y_t$ into a persistent trend $\mu_t$ and a transitory error component
$$
\begin{aligned}
y_t & = \mu_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, \sigma_\text{irregular}^2) \
\mu_t & = \mu_{t-1} + \eta_t, \quad ~ \eta_t \sim N(0, \sigma_\text{level}^2)
\end{aligned}
$$
This model satisfies the requirements of the CFA simulation smoother because both the observation error term $\varepsilon_t$ and the state innovation term $\eta_t$ are non-degenerate - that is, their covariance matrices are full rank.
We apply this model to inflation, and consider simulating draws from the posterior of the joint state vector. That is, we are interested in sampling from
$$p(\mu^t \mid y^t, \sigma_\text{irregular}^2, \sigma_\text{level}^2)$$
where we define $\mu^t \equiv (\mu_1, \dots, \mu_T)'$ and $y^t \equiv (y_1, \dots, y_T)'$.
In Statsmodels, the local level model falls into the more general class of "unobserved components" models, and can be constructed as follows:
End of explanation
"""
nsimulations = 20
simulated_state_kfs = pd.DataFrame(
np.zeros((mod.nobs, nsimulations)), index=dta.index)
simulated_state_cfa = pd.DataFrame(
np.zeros((mod.nobs, nsimulations)), index=dta.index)
for i in range(nsimulations):
# Apply KFS simulation smoothing
sim_kfs.simulate()
# Save the KFS simulated state
simulated_state_kfs.iloc[:, i] = sim_kfs.simulated_state[0]
# Apply CFA simulation smoothing
sim_cfa.simulate()
# Save the CFA simulated state
simulated_state_cfa.iloc[:, i] = sim_cfa.simulated_state[0]
"""
Explanation: The simulation smoother objects sim_kfs and sim_cfa have simulate methods that perform simulation smoothing. Each time that simulate is called, the simulated_state attribute will be re-populated with a new simulated draw from the posterior.
Below, we construct 20 simulated paths for the trend, using the KFS and CFA approaches, where the simulation is at the maximum likelihood parameter estimates.
End of explanation
"""
# Plot the inflation data along with simulated trends
fig, axes = plt.subplots(2, figsize=(15, 6))
# Plot data and KFS simulations
dta.infl.plot(ax=axes[0], color='k')
axes[0].set_title('Simulations based on KFS approach, MLE parameters')
simulated_state_kfs.plot(ax=axes[0], color='C0', alpha=0.25, legend=False)
# Plot data and CFA simulations
dta.infl.plot(ax=axes[1], color='k')
axes[1].set_title('Simulations based on CFA approach, MLE parameters')
simulated_state_cfa.plot(ax=axes[1], color='C0', alpha=0.25, legend=False)
# Add a legend, clean up layout
handles, labels = axes[0].get_legend_handles_labels()
axes[0].legend(handles[:2], ['Data', 'Simulated state'])
fig.tight_layout();
"""
Explanation: Plotting the observed data and the simulations created using each method below, it is not too hard to see that these two methods are doing the same thing.
End of explanation
"""
fig, ax = plt.subplots(figsize=(15, 3))
# Update the model's parameterization to one that attributes more
# variation in inflation to the observation error and so has less
# variation in the trend component
mod.update([4, 0.05])
# Plot simulations
for i in range(nsimulations):
sim_kfs.simulate()
ax.plot(dta.index, sim_kfs.simulated_state[0],
color='C0', alpha=0.25, label='Simulated state')
# Plot data
dta.infl.plot(ax=ax, color='k', label='Data', zorder=-1)
# Add title, legend, clean up layout
ax.set_title('Simulations with alternative parameterization yielding a smoother trend')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[-2:], labels[-2:])
fig.tight_layout();
"""
Explanation: Updating the model's parameters
The simulation smoothers are tied to the model instance, here the variable mod. Whenever the model instance is updated with new parameters, the simulation smoothers will take those new parameters into account in future calls to the simulate method.
This is convenient for MCMC algorithms, which repeatedly (a) update the model's parameters, (b) draw a sample of the state vector, and then (c) draw new values for the model's parameters.
Here we will change the model to a different parameterization that yields a smoother trend, and show how the simulated values change (for brevity we only show the simulations from the KFS approach, but simulations from the CFA approach would be the same).
End of explanation
"""
# Subset to the four variables of interest
y = dta[['realgdp', 'cpi', 'unemp', 'tbilrate']].copy()
y.columns = ['gdp', 'inf', 'unemp', 'int']
# Convert to real GDP growth and CPI inflation rates
y[['gdp', 'inf']] = np.log(y[['gdp', 'inf']]).diff() * 100
y = y.iloc[1:]
fig, ax = plt.subplots(figsize=(15, 5))
y.plot(ax=ax)
ax.set_title('Evolution of macroeconomic variables included in TVP-VAR exercise');
"""
Explanation: Application: Bayesian analysis of a TVP-VAR model by MCMC
One of the applications that Chan and Jeliazkov (2009) consider is the time-varying parameters vector autoregression (TVP-VAR) model, estimated with Bayesian Gibb sampling (MCMC) methods. They apply this to model the co-movements in four macroeconomic time series:
Real GDP growth
Inflation
Unemployment rate
Short-term interest rates
We will replicate their example, using a very similar dataset that is included in Statsmodels.
End of explanation
"""
# 1. Create a new TVPVAR class as a subclass of sm.tsa.statespace.MLEModel
class TVPVAR(sm.tsa.statespace.MLEModel):
# Steps 2-3 are best done in the class "constructor", i.e. the __init__ method
def __init__(self, y):
# Create a matrix with [y_t' : y_{t-1}'] for t = 2, ..., T
augmented = sm.tsa.lagmat(y, 1, trim='both', original='in', use_pandas=True)
# Separate into y_t and z_t = [1 : y_{t-1}']
p = y.shape[1]
y_t = augmented.iloc[:, :p]
z_t = sm.add_constant(augmented.iloc[:, p:])
# Recall that the length of the state vector is p * (p + 1)
k_states = p * (p + 1)
super().__init__(y_t, exog=z_t, k_states=k_states)
# Note that the state space system matrices default to contain zeros,
# so we don't need to explicitly set c_t = d_t = 0.
# Construct the design matrix Z_t
# Notes:
# -> self.k_endog = p is the dimension of the observed vector
# -> self.k_states = p * (p + 1) is the dimension of the observed vector
# -> self.nobs = T is the number of observations in y_t
self['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
for i in range(self.k_endog):
start = i * (self.k_endog + 1)
end = start + self.k_endog + 1
self['design', i, start:end, :] = z_t.T
# Construct the transition matrix T = I
self['transition'] = np.eye(k_states)
# Construct the selection matrix R = I
self['selection'] = np.eye(k_states)
# Step 3: Initialize the state vector as alpha_1 ~ N(0, 5I)
self.ssm.initialize('known', stationary_cov=5 * np.eye(self.k_states))
# Step 4. Create a method that we can call to update H and Q
def update_variances(self, obs_cov, state_cov_diag):
self['obs_cov'] = obs_cov
self['state_cov'] = np.diag(state_cov_diag)
# Finally, it can be convenient to define human-readable names for
# each element of the state vector. These will be available in output
@property
def state_names(self):
state_names = np.empty((self.k_endog, self.k_endog + 1), dtype=object)
for i in range(self.k_endog):
endog_name = self.endog_names[i]
state_names[i] = (
['intercept.%s' % endog_name] +
['L1.%s->%s' % (other_name, endog_name) for other_name in self.endog_names])
return state_names.ravel().tolist()
"""
Explanation: TVP-VAR model
Note: this section is based on Chan and Jeliazkov (2009) section 3.1, which can be consulted for additional details.
The usual (time-invariant) VAR(1) model is typically written:
$$
\begin{aligned}
y_t & = \mu + \Phi y_{t-1} + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H)
\end{aligned}
$$
where $y_t$ is a $p \times 1$ vector of variables observed at time $t$ and $H$ is a covariance matrix.
The TVP-VAR(1) model generalizes this to allow the coefficients to vary over time according. Stacking all the parameters into a vector according to $\alpha_t = \text{vec}([\mu_t : \Phi_t])$, where $\text{vec}$ denotes the operation that stacks columns of a matrix into a vector, we model their evolution over time according to:
$$\alpha_{i,t+1} = \alpha_{i, t} + \eta_{i,t}, \qquad \eta_{i, t} \sim N(0, \sigma_i^2)$$
In other words, each parameter evolves independently according to a random walk.
Note that there are $p$ coefficients in $\mu_t$ and $p^2$ coefficients in $\Phi_t$, so the full state vector $\alpha$ is shaped $p * (p + 1) \times 1$.
Putting the TVP-VAR(1) model into state-space form is relatively straightforward, and in fact we just have to re-write the observation equation into SUR form:
$$
\begin{aligned}
y_t & = Z_t \alpha_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H) \
\alpha_{t+1} & = \alpha_t + \eta_t, \qquad \eta_t \sim N(0, \text{diag}({\sigma_i^2}))
\end{aligned}
$$
where
$$
Z_t = \begin{bmatrix}
1 & y_{t-1}' & 0 & \dots & & 0 \
0 & 0 & 1 & y_{t-1}' & & 0 \
\vdots & & & \ddots & \ddots & \vdots \
0 & 0 & 0 & 0 & 1 & y_{t-1}' \
\end{bmatrix}
$$
As long as $H$ is full rank and each of the variances $\sigma_i^2$ is non-zero, the model satisfies the requirements of the CFA simulation smoother.
We also need to specify the initialization / prior for the initial state, $\alpha_1$. Here we will follow Chan and Jeliazkov (2009) in using $\alpha_1 \sim N(0, 5 I)$, although we could also model it as diffuse.
Aside from the time-varying coefficients $\alpha_t$, the other parameters that we will need to estimate are terms in the covariance matrix $H$ and the random walk variances $\sigma_i^2$.
TVP-VAR model in Statsmodels
Constructing this model programatically in Statsmodels is also relatively straightforward, since there are basically four steps:
Create a new TVPVAR class as a subclass of sm.tsa.statespace.MLEModel
Fill in the fixed values of the state space system matrices
Specify the initialization of $\alpha_1$
Create a method for updating the state space system matrices with new values of the covariance matrix $H$ and the random walk variances $\sigma_i^2$.
To do this, first note that the general state space representation used by Statsmodels is:
$$
\begin{aligned}
y_t & = d_t + Z_t \alpha_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H_t) \
\alpha_{t+1} & = c_t + T_t \alpha_t + R_t \eta_t, \qquad \eta_t \sim N(0, Q_t) \
\end{aligned}
$$
Then the TVP-VAR(1) model implies the following specializations:
The intercept terms are zero, i.e. $c_t = d_t = 0$
The design matrix $Z_t$ is time-varying but its values are fixed as described above (i.e. its values contain ones and lags of $y_t$)
The observation covariance matrix is not time-varying, i.e. $H_t = H_{t+1} = H$
The transition matrix is not time-varying and is equal to the identity matrix, i.e. $T_t = T_{t+1} = I$
The selection matrix $R_t$ is not time-varying and is also equal to the identity matrix, i.e. $R_t = R_{t+1} = I$
The state covariance matrix $Q_t$ is not time-varying and is diagonal, i.e. $Q_t = Q_{t+1} = \text{diag}({\sigma_i^2})$
End of explanation
"""
# Create an instance of our TVPVAR class with our observed dataset y
mod = TVPVAR(y)
"""
Explanation: The above class defined the state space model for any given dataset. Now we need to create a specific instance of it with the dataset that we created earlier containing real GDP growth, inflation, unemployment, and interest rates.
End of explanation
"""
initial_obs_cov = np.cov(y.T)
initial_state_cov_diag = [0.01] * mod.k_states
# Update H and Q
mod.update_variances(initial_obs_cov, initial_state_cov_diag)
# Perform Kalman filtering and smoothing
# (the [] is just an empty list that in some models might contain
# additional parameters. Here, we don't have any additional parameters
# so we just pass an empty list)
initial_res = mod.smooth([])
"""
Explanation: Preliminary investigation with ad-hoc parameters in H, Q
In our analysis below, we will need to begin our MCMC iterations with some initial parameterization. Following Chan and Jeliazkov (2009) we will set $H$ to be the sample covariance matrix of our dataset, and we will set $\sigma_i^2 = 0.01$ for each $i$.
Before discussing the MCMC scheme that will allow us to make inferences about the model, first we can consider the output of the model when simply plugging in these initial parameters. To fill in these parameters, we use the update_variances method that we defined earlier and then perform Kalman filtering and smoothing conditional on those parameters.
Warning: This exercise is just by way of explanation - we must wait for the output of the MCMC exercise to study the actual implications of the model in a meaningful way.
End of explanation
"""
def plot_coefficients_by_equation(states):
fig, axes = plt.subplots(2, 2, figsize=(15, 8))
# The way we defined Z_t implies that the first 5 elements of the
# state vector correspond to the first variable in y_t, which is GDP growth
ax = axes[0, 0]
states.iloc[:, :5].plot(ax=ax)
ax.set_title('GDP growth')
ax.legend()
# The next 5 elements correspond to inflation
ax = axes[0, 1]
states.iloc[:, 5:10].plot(ax=ax)
ax.set_title('Inflation rate')
ax.legend();
# The next 5 elements correspond to unemployment
ax = axes[1, 0]
states.iloc[:, 10:15].plot(ax=ax)
ax.set_title('Unemployment equation')
ax.legend()
# The last 5 elements correspond to the interest rate
ax = axes[1, 1]
states.iloc[:, 15:20].plot(ax=ax)
ax.set_title('Interest rate equation')
ax.legend();
return ax
"""
Explanation: The initial_res variable contains the output of Kalman filtering and smoothing, conditional on those initial parameters. In particular, we may be interested in the "smoothed states", which are $E[\alpha_t \mid y^t, H, {\sigma_i^2}]$.
First, lets create a function that graphs the coefficients over time, separated into the equations for equation of the observed variables.
End of explanation
"""
# Here, for illustration purposes only, we plot the time-varying
# coefficients conditional on an ad-hoc parameterization
# Recall that `initial_res` contains the Kalman filtering and smoothing,
# and the `states.smoothed` attribute contains the smoothed states
plot_coefficients_by_equation(initial_res.states.smoothed);
"""
Explanation: Now, we are interested in the smoothed states, which are available in the states.smoothed attribute out our results object initial_res.
As the graph below shows, the initial parameterization implies substantial time-variation in some of the coefficients.
End of explanation
"""
# Prior hyperparameters
# Prior for obs. cov. is inverse-Wishart(v_1^0=k + 3, S10=I)
v10 = mod.k_endog + 3
S10 = np.eye(mod.k_endog)
# Prior for state cov. variances is inverse-Gamma(v_{i2}^0 / 2 = 3, S+{i2}^0 / 2 = 0.005)
vi20 = 6
Si20 = 0.01
"""
Explanation: Bayesian estimation via MCMC
We will now implement the Gibbs sampler scheme described in Chan and Jeliazkov (2009), Algorithm 2.
We use the following (conditionally conjugate) priors:
$$
\begin{aligned}
H & \sim \mathcal{IW}(\nu_1^0, S_1^0) \
\sigma_i^2 & \sim \mathcal{IG} \left ( \frac{\nu_{i2}^0}{2}, \frac{S_{i2}^0}{2} \right )
\end{aligned}
$$
where $\mathcal{IW}$ denotes the inverse-Wishart distribution and $\mathcal{IG}$ denotes the inverse-Gamma distribution. We set the prior hyperparameters as:
$$
\begin{aligned}
v_1^0 = T + 3, & \quad S_1^0 = I \
v_{i2}^0 = 6, & \quad S_{i2}^0 = 0.01 \qquad \text{for each} ~ i\
\end{aligned}
$$
End of explanation
"""
# Gibbs sampler setup
niter = 11000
nburn = 1000
# 1. Create storage arrays
store_states = np.zeros((niter + 1, mod.nobs, mod.k_states))
store_obs_cov = np.zeros((niter + 1, mod.k_endog, mod.k_endog))
store_state_cov = np.zeros((niter + 1, mod.k_states))
# 2. Put in the initial values
store_obs_cov[0] = initial_obs_cov
store_state_cov[0] = initial_state_cov_diag
mod.update_variances(store_obs_cov[0], store_state_cov[0])
# 3. Construct posterior samplers
sim = mod.simulation_smoother(method='cfa')
"""
Explanation: Before running the MCMC iterations, there are a couple of practical steps:
Create arrays to store the draws of our state vector, observation covariance matrix, and state error variances.
Put the initial values for H and Q (described above) into the storage vectors
Construct the simulation smoother object associated with our TVPVAR instance to make draws of the state vector
End of explanation
"""
for i in range(niter):
mod.update_variances(store_obs_cov[i], store_state_cov[i])
sim.simulate()
# 1. Sample states
store_states[i + 1] = sim.simulated_state.T
# 2. Simulate obs cov
fitted = np.matmul(mod['design'].transpose(2, 0, 1), store_states[i + 1][..., None])[..., 0]
resid = mod.endog - fitted
store_obs_cov[i + 1] = invwishart.rvs(v10 + mod.nobs, S10 + resid.T @ resid)
# 3. Simulate state cov variances
resid = store_states[i + 1, 1:] - store_states[i + 1, :-1]
sse = np.sum(resid**2, axis=0)
for j in range(mod.k_states):
rv = invgamma.rvs((vi20 + mod.nobs - 1) / 2, scale=(Si20 + sse[j]) / 2)
store_state_cov[i + 1, j] = rv
"""
Explanation: As before, we could have used either the simulation smoother based on the Kalman filter and smoother or that based on the Cholesky Factor Algorithm.
End of explanation
"""
# Collect the posterior means of each time-varying coefficient
states_posterior_mean = pd.DataFrame(
np.mean(store_states[nburn + 1:], axis=0),
index=mod._index, columns=mod.state_names)
# Plot these means over time
plot_coefficients_by_equation(states_posterior_mean);
"""
Explanation: After removing a number of initial draws, the remaining draws from the posterior allow us to conduct inference. Below, we plot the posterior mean of the time-varying regression coefficients.
(Note: these plots are different from those in Figure 1 of the published version of Chan and Jeliazkov (2009), but they are very similar to those produced by the Matlab replication code available at http://joshuachan.org/code/code_TVPVAR.html)
End of explanation
"""
import arviz as az
# Collect the observation error covariance parameters
az_obs_cov = az.convert_to_inference_data({
('Var[%s]' % mod.endog_names[i] if i == j else
'Cov[%s, %s]' % (mod.endog_names[i], mod.endog_names[j])):
store_obs_cov[nburn + 1:, i, j]
for i in range(mod.k_endog) for j in range(i, mod.k_endog)})
# Plot the credible intervals
az.plot_forest(az_obs_cov, figsize=(8, 7));
# Collect the state innovation variance parameters
az_state_cov = az.convert_to_inference_data({
r'$\sigma^2$[%s]' % mod.state_names[i]: store_state_cov[nburn + 1:, i]
for i in range(mod.k_states)})
# Plot the credible intervals
az.plot_forest(az_state_cov, figsize=(8, 7));
"""
Explanation: Python also has a number of libraries to assist with exploring Bayesian models. Here we'll just use the arviz package to explore the credible intervals of each of the covariance and variance parameters, although it makes available a much wider set of tools for analysis.
End of explanation
"""
from statsmodels.tsa.statespace.simulation_smoother import SIMULATION_STATE
sim_cfa = mod.simulation_smoother(method='cfa')
sim_kfs = mod.simulation_smoother(simulation_output=SIMULATION_STATE)
"""
Explanation: Appendix: performance
Finally, we run a few simple tests to compare the performance of the KFS and CFA simulation smoothers by using the %timeit Jupyter notebook magic.
One caveat is that the KFS simulation smoother can produce a variety of output beyond just simulations of the posterior state vector, and these additional computations could bias the results. To make the results comparable, we will tell the KFS simulation smoother to only compute simulations of the state by using the simulation_output argument.
End of explanation
"""
|
dcavar/python-tutorial-for-ipython | notebooks/Python Parsing with NLTK.ipynb | apache-2.0 | from nltk import Nonterminal, nonterminals, Production, CFG
nt1 = Nonterminal('NP')
nt2 = Nonterminal('VP')
nt1.symbol()
nt1 == Nonterminal('NP')
nt1 == nt2
S, NP, VP, PP = nonterminals('S, NP, VP, PP')
print(S.symbol())
N, V, P, DT = nonterminals('N, V, P, DT')
prod1 = Production(S, [NP, VP])
prod2 = Production(NP, [DT, NP])
prod1.lhs()
prod1.rhs()
prod1 == Production(S, [NP, VP])
prod1 == prod2
grammar = CFG.fromstring("""
S -> NP VP
PP -> P NP
PP -> P NP
NP -> 'the' N | N PP | 'the' N PP
VP -> V NP | V PP | V NP PP
N -> 'cat'
N -> 'fish'
N -> 'aligator'
N -> 'dog'
N -> 'rug'
N -> 'mouse'
V -> 'chased'
V -> 'sat'
P -> 'in'
P -> 'on'
""")
print(grammar)
"""
Explanation: Python Parsing with NLTK
(C) 2017-2020 by Damir Cavar
Download: This and various other Jupyter notebooks are available from my GitHub repo.
License: Creative Commons Attribution-ShareAlike 4.0 International License (CA BY-SA 4.0)
This is a tutorial related to the discussion of grammar engineering and parsing in the class Alternative Syntactic Theories and Advanced Natural Language Processing taught at Indiana University in Spring 2017 and Fall 2018.
Working with Grammars
The following examples are taken from the NLTK parsing HOWTO page.
End of explanation
"""
import nltk
fstr = nltk.FeatStruct("[POS='N', AGR=[PER=3, NUM='pl', GND='fem']]")
print(fstr)
"""
Explanation: Feature Structures
One can build complex feature structures using the following strategies:
End of explanation
"""
fstr2 = nltk.FeatStruct("""[NAME='Lee', ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'],
SPOUSE=[NAME='Kim', ADDRESS->(1)]]""")
print(fstr2)
"""
Explanation: Creating shared paths is also possible:
End of explanation
"""
fs1 = nltk.FeatStruct("[AGR=[PER=3, NUM='pl', GND='fem'], POS='N']")
fs2 = nltk.FeatStruct("[POS='N', AGR=[PER=3, GND='fem']]")
print(fs1.unify(fs2))
"""
Explanation: Let us create feature structures and try out unification:
End of explanation
"""
import nltk
nltk.parse.chart.demo(2, print_times=False, trace=1,
sent='I saw a dog', numparses=1)
"""
Explanation: Chart Parser
The following examples are taken from the NLTK parsing HOWTO page.
End of explanation
"""
nltk.parse.chart.demo(1, print_times=True, trace=0,
sent='she killed the man with the tie', numparses=2)
"""
Explanation: This is an example how to apply top-down parsing:
End of explanation
"""
nltk.parse.chart.demo(2, print_times=False, trace=0,
sent='I saw John with a dog', numparses=2)
nltk.parse.featurechart.demo(print_times=False,
print_grammar=True,
parser=nltk.parse.featurechart.FeatureChartParser,
sent='I saw John with a dog')
"""
Explanation: This is how to apply bottom-up parsing:
End of explanation
"""
import nltk
from nltk import CFG
from nltk.grammar import FeatureGrammar as FCFG
"""
Explanation: Loading grammars from files and editing them
We will need the following NLTK modules in this section:
End of explanation
"""
cfg = nltk.data.load('spanish1.cfg')
print(cfg)
"""
Explanation: We can load a grammar from a file, that is located in the same folder as the current Jupyter notebook, in the following way:
End of explanation
"""
cp1 = nltk.parse.ChartParser(cfg)
"""
Explanation: We instantiate a ChartParser object with this grammar:
End of explanation
"""
"los mujeres adoran la Lucas".split()
for x in cp1.parse("los mujeres adoran la Lucas".split()):
print(x)
"""
Explanation: The ChartParser object has a parse-function that takes a list of tokens as a parameter. The token list can be generated using a language specific tokenizer. In this case we simply tokenize using the Python-function split on strings. The output of the parse function is a list of trees. We loop through the list of parse trees and print them out:
End of explanation
"""
cfg2 = CFG.fromstring("""
S -> NP VP
PP -> P NP
NP -> 'the' N | N PP | 'the' N PP
VP -> V NP | V PP | V NP PP
N -> 'cat'
N -> 'dog'
N -> 'bird'
N -> 'rug'
N -> 'woman'
N -> 'man'
N -> 'tie'
V -> 'chased'
V -> 'killed'
V -> 'sat'
V -> 'bit'
P -> 'in'
P -> 'on'
P -> 'with'
""")
"""
Explanation: We can also edit a grammar directly:
End of explanation
"""
cp2 = nltk.parse.ChartParser(cfg2)
for x in cp2.parse("the woman killed the man with the tie".split()):
print(x)
"""
Explanation: We parse our example sentences using the same approach as above:
End of explanation
"""
fcfg = nltk.data.load('spanish1.fcfg')
fcp1 = nltk.parse.FeatureChartParser(fcfg)
for x in fcp1.parse(u"Miguel adoró el gato".split()):
print(x)
"""
Explanation: The previous example included a Context-free grammar. In the following example we load a Context-free Grammar with Features, instantiate a FeatureChartParser, and loop through the parse trees that are generated by our grammar to print them out:
End of explanation
"""
fcfg2 = FCFG.fromstring("""
% start CP
# ############################
# Grammar Rules
# ############################
CP -> Cbar[stype=decl]
Cbar[stype=decl] -> IP[+TNS]
IP[+TNS] -> DP[num=?n,pers=?p,case=nom] VP[num=?n,pers=?p]
DP[num=?n,pers=?p,case=?k] -> Dbar[num=?n,pers=?p,case=?k]
Dbar[num=?n,pers=?p] -> D[num=?n,DEF=?d,COUNT=?c] NP[num=?n,pers=?p,DEF=?d,COUNT=?c]
Dbar[num=?n,pers=?p] -> NP[num=?n,pers=?p,DEF=?d,COUNT=?c]
Dbar[num=?n,pers=?p,case=?k] -> D[num=?n,pers=?p,+DEF,type=pron,case=?k]
NP[num=?n,pers=?p,COUNT=?c] -> N[num=?n,pers=?p,type=prop,COUNT=?c]
VP[num=?n,pers=?p] -> V[num=?n,pers=?p,val=1]
VP[num=?n,pers=?p] -> V[num=?n,pers=?p,val=2] DP[case=acc]
PP -> P DP[num=?n,pers=?p,case=acc]
#PP -> P DP[num=?n,pers=?p,case=dat]
#
# ############################
# Lexical Rules
# ############################
D[-DEF,+COUNT,num=sg] -> 'a'
D[-DEF,+COUNT,num=sg] -> 'an'
D[+DEF] -> 'the'
D[+DEF,gen=f,num=sg,case=nom,type=pron] -> 'she'
D[+DEF,gen=m,num=sg,case=nom,type=pron] -> 'he'
D[+DEF,gen=n,num=sg,type=pron] -> 'it'
D[+DEF,gen=f,num=sg,case=acc,type=pron] -> 'her'
D[+DEF,gen=m,num=sg,case=acc,type=pron] -> 'him'
N[num=sg,pers=3,type=prop] -> 'John' | 'Sara' | 'Mary'
V[tns=pres,num=sg,pers=3,val=2] -> 'loves' | 'calls' | 'sees' | 'buys'
N[num=sg,pers=3,-COUNT] -> 'furniture' | 'air' | 'justice'
N[num=sg,pers=3] -> 'cat' | 'dog' | 'mouse'
N[num=pl,pers=3] -> 'cats' | 'dogs' | 'mice'
V[tns=pres,num=sg,pers=3,val=1] -> 'sleeps' | 'snores'
V[tns=pres,num=sg,pers=1,val=1] -> 'sleep' | 'snore'
V[tns=pres,num=sg,pers=2,val=1] -> 'sleep' | 'snore'
V[tns=pres,num=pl,val=1] -> 'sleep' | 'snore'
V[tns=past,val=1] -> 'slept' | 'snored'
V[tns=pres,num=sg,pers=3,val=2] -> 'calls' | 'sees' | 'loves'
V[tns=pres,num=sg,pers=1,val=2] -> 'call' | 'see' | 'love'
V[tns=pres,num=sg,pers=2,val=2] -> 'call' | 'see' | 'love'
V[tns=pres,num=pl,val=2] -> 'call' | 'see' | 'love'
V[tns=past,val=2] -> 'called' | 'saw' | 'loved'
""")
"""
Explanation: We can edit a Feature CFG in the same way directly in this notebook and then parse with it:
End of explanation
"""
fcp2 = nltk.parse.FeatureChartParser(fcfg2, trace=1)
sentence = "John buys the furniture"
result = list(fcp2.parse(sentence.split()))
if result:
for x in result:
print(x)
else:
print("*", sentence)
"""
Explanation: We can now create a parser instance and parse with this grammar:
End of explanation
"""
|
iurilarosa/thesis | codici/Archiviati/Plots/.ipynb_checkpoints/Sensibilità-checkpoint.ipynb | gpl-3.0 | theta = 2.5
probs = p0*(1-p0)/math.pow(p1,2)
sogliaCR = 6
confs = sogliaCR - math.sqrt(2)*scsp.erfcinv(2*gamma)
const0min = 4.02*math.pow(N,-1/4)*math.pow(theta,-1/2)*math.pow(probs, 1/4)*math.pow(confs, 1/2)*math.pow(tFft,-1/2)
const0min
lambda0min = 4.02*math.pow(theta,-1/2)*math.pow(probs, 1/4)*math.pow(confs, 1/2)
lambda0min
import numpy
import math
import scipy.special as scsp
#N è numero fft
#WARNING, per numero di fft si intende il totale (con rivelatore acceso),
# il totale vetato (con cui si è fatta la peakmap con una certa soglia)
# il totale di fft che hanno almeno un picco nella peakmap?
tFft = 8192
tObs = 9*30*24*60*60
Ntempi = tObs/(tFft/2)*0.6
print(Ntempi)
#p0 è prob di avere picco in peakmap
theta = 2.5
p0 = math.exp(-theta)-math.exp(-2*theta)+1/3*math.exp(-3*theta)
p1 = math.exp(-theta)-2*math.exp(-2*theta)+math.exp(-3*theta)
print(p0, p1)
#inverfc(2Gamma)
gamma = 0.9545
scsp.erfcinv(2*gamma)
"""
Explanation: Sensib ricerca
$$h_{0,min} \approx \frac{4.02}{N^{1/4}\theta_{thr}^{1/2}} \left(\frac{p_0(1-p_0)}{p_1^2}\right)^{1/4}\sqrt{CR_{thr}-\sqrt{2}erfc^{-1}(2\Gamma)} \cdot \sqrt{\frac{S_n(\nu)}{T_{FFT}}}$$
Poniamo
$\Gamma = 0.9545$
$\theta_{thr} = 2.5$ (TODO CONTROL)
$T_{FFT} = 8192$ (o $4096$)
$N \sim 3400$ (TODO CONTROL)
$p_0 = 0.0755$, $p_1 = 0.0692$
End of explanation
"""
quadSize = 78
stepFrequenza = 1/tFft
freqMax = 128
freqMin = 10
spindownMin = -1e-9
spindownMax = 1e-10
stepSpindown = stepFrequenza/tObs
Nsd = round((spindownMax-spindownMin)/stepSpindown)
peakSize = (freqMax-freqMin)/stepFrequenza * Ntempi
freqSize = (freqMax-freqMin)/stepFrequenza
print(Nsd, freqSize, quadSize)
Ntot = freqSize*Nsd*quadSize
Ntot
#se scelgo che solo uno dei candidati sia dovuto a fluttuazione di rumore Pfa sarà
Nrum = 1
Pfa = Nrum/Ntot
print(Pfa)
#e soglia sarà
CR = math.pow(2,1/2)*scsp.erfcinv(2*Nrum/Ntot)
print(CR)
#se scelgo che voglio una confidenza di 3 sigma che i miei candidati NON siano fluttuazione di rumore,
#allora Pfa srà
confSigma = 0.997
Pfa = 1-confSigma
Nrum = Pfa*Ntot
print(Nrum)
#e soglia CR
CR = math.pow(2,1/2)*scsp.erfcinv(2*Nrum/Ntot)
print(CR)
#se scelgo che voglio una confidenza di 3 sigma che i miei candidati NON siano fluttuazione di rumore,
#allora Pfa srà
confSigma = 0.999999426697
Pfa = 1-confSigma
Nrum = Pfa*Ntot
print(Nrum)
#e soglia CR
CR = math.pow(2,1/2)*scsp.erfcinv(2*Nrum/Ntot)
print(CR)
"""
Explanation: Scelgo soglia CR
$$P_{fa} = \frac{N_{rumcand}}{N_{tot}}$$
$$P_{fa} = \frac{1}{2}erfc\left(\frac{CR_{thr}}{\sqrt{2}}\right)$$
$$CR_{thr} = \sqrt{2} erfc^{-1} \left(2\frac{N_{cand}}{N_{tot}}\right)$$
$$Ntot = NfreqNtimesNspindowns*Ncoords$$
End of explanation
"""
|
pmgbergen/porepy | tutorials/parameter_assignment_assembler_setup.ipynb | gpl-3.0 | import numpy as np
import scipy.sparse as sps
import porepy as pp
"""
Explanation: Assembly of system with multiple domains, variables and numerics
This tutorial has the dual purpose of illustrating parameter assigment in PorePy, and also showing how to set up problems in (mixed-dimensional) geometries. It contains two examples, one covering a simple setup (the pressure equation), and a second illustrating the full generality of the coupling scheme.
End of explanation
"""
def assign_data(gb, keyword_param_storage):
# Method to assign data.
for g, d in gb:
# This keyword is used to define which set of default parameters to pick
# Replace with 'transport' or 'mechanics' if needed
default_parameter_type = 'flow'
# Assign a non-default permeability, for illustrative purposes
if g.dim == 2:
kxx = 10 * np.ones(g.num_cells)
else:
kxx = 0.1 * np.ones(g.num_cells)
perm = pp.SecondOrderTensor(kxx)
# We also set Dirichlet conditions, as the default Neumann condition
# gives a singular problem
bc = pp.BoundaryCondition(g, g.get_boundary_faces(), 'dir')
# Create a dictionary to override the default parameters
specified_parameters = {'second_order_tensor': perm, 'bc': bc}
# Define the
pp.initialize_default_data(g, d, default_parameter_type, specified_parameters,
keyword_param_storage)
# Internally to the Parameter class, the parameters are stored as dictionaries.
# To illustrate how to access specific sets of parameters, print the keywords
# for one of the grids
if g.dim == 2:
print('The assigned parameters for the 2d grid are')
print(d[pp.PARAMETERS][keyword_param_storage].keys())
for e, d in gb.edges():
# On edges in the GridBucket, there is currently no methods for default initialization.
data = {"normal_diffusivity": 2e1}
# Add parameters: We again use keywords to identify sets of parameters.
d[pp.PARAMETERS] = pp.Parameters(keywords=['flow_param_edge'], dictionaries=[data])
return gb
"""
Explanation: Data assignment
We will mainly use default values for parameters, while overriding some of the values. Sets of default parameters are available for flow, transport and mechanics (elasticity). For example, initialize_default_data initializes a second order permeability tensor in the flow data, and a fourth order stiffness tensor in the mechanics data. For more details, and definitions of what the defaults are, see the modules pp/params/data.py and pp/params/parameter_dictionaries.py <br>
The parameters are stored in a class pp.Parameters. This class is again stored in the data dictionary on each node and edge in the GridBucket, that is, in the variable d in this loop. The Paramater object can be accessed by d[pp.PARAMETERS]. To allow storage of parameters for several problems simultaneously (say, we want to solve a combined flow and transport problem), the Parameter class uses keywords to identify sets of parameters. This keyword must also be provided to the discretization method.
When the parameter class is initialized with default values, the default behavior is to identify the parameters by the same keyword as is used to choose the type of default parameters (default_parameter_type). While this is usually good practice, we here override this behavior for illustrative purposes, using the keyword_param_storage.
End of explanation
"""
gb, _ = pp.grid_buckets_2d.single_horizontal([2, 2], simplex=False)
parameter_keyword = 'flow_param'
gb = assign_data(gb, parameter_keyword)
"""
Explanation: Example 1
The practical way of setting up a problem with a single variable is described here. For explanations, and hints on how to consider a more general setting, see the expanded Example 2 below. <br>
As shown in the tutorial on single-phase flow, the equation in the mono-dimensional case is
$$ - \nabla \cdot K \nabla p = f. $$
We expand to the mixed-dimensional version of the single-phase flow problem by solving the problem in each of the subdomains (here: fracture and matrix) and adding the flux between the subdomains
$$ \lambda = - \kappa (p_{fracture} - \texttt{tr }p_{matrix}), $$
with $\kappa$ denoting the normal permeability of the fractures. For details, refer to the tutorial on single-phase flow and published papers, e.g. this one.<br><br>
We start by defining the grid bucket and assigning parameters, tagging them with a keyword. This keyword ensures that the discretizer (here tpfa, defined below) uses the right set of parameters.
End of explanation
"""
# Define the pressure variable with the same keyword on all grids
grid_variable = 'pressure'
# Variable name for the flux between grids, that is, the primary variable
# on the edges in the GridBucket.
mortar_variable = 'mortar_flux'
# Identifier of the discretization operator on each grid
operator_keyword = 'diffusion'
# Identifier of the discretization operator between grids
coupling_operator_keyword = 'coupling_operator'
# Use a two-point flux approximation on all grids.
# Note the keyword here: It must be the same as used when assigning the
# parameters.
tpfa = pp.Tpfa(parameter_keyword)
# Between the grids we use a Robin type coupling (resistance to flow over a fracture).
# Again, the keyword must be the same as used to assign data to the edge
# The edge discretization also needs access to the corresponding discretizations
# on the neighboring nodes
edge_discretization = pp.RobinCoupling('flow_param_edge', tpfa, tpfa)
# Loop over the nodes in the GridBucket, define primary variables and discretization schemes
for g, d in gb:
# Assign primary variables on this grid. It has one degree of freedom per cell.
d[pp.PRIMARY_VARIABLES] = {grid_variable: {"cells": 1, "faces": 0}}
# Assign discretization operator for the variable.
# If the discretization is composed of several terms, they can be assigned
# by multiple entries in the inner dictionary, e.g.
# {operator_keyword_1: method_1, operator_keyword_2: method_2, ...}
d[pp.DISCRETIZATION] = {grid_variable: {operator_keyword: tpfa}}
# Loop over the edges in the GridBucket, define primary variables and discretizations
for e, d in gb.edges():
g1, g2 = gb.nodes_of_edge(e)
# The mortar variable has one degree of freedom per cell in the mortar grid
d[pp.PRIMARY_VARIABLES] = {mortar_variable: {"cells": 1}}
# The coupling discretization links an edge discretization with variables
# and discretization operators on each neighboring grid
d[pp.COUPLING_DISCRETIZATION] = {
coupling_operator_keyword: {
g1: (grid_variable, operator_keyword),
g2: (grid_variable, operator_keyword),
e: (mortar_variable, edge_discretization),
}
}
d[pp.DISCRETIZATION_MATRICES] = {'flow_param_edge': {}}
"""
Explanation: Now, we define the variables on grids and edges and identify the individual terms of the equation we want to solve. We have an equation for the pressure on each grid (node of the GridBucket), and an equation for the mortar flux between them (edge of the bucket). The terms to be discretized are the diffusion term on the nodes ($- \nabla \cdot K \nabla p$) and the coupling term $- \kappa (p_{fracture} - \texttt{tr }p_{matrix})$ on the edges.
End of explanation
"""
dof_manager = pp.DofManager(gb)
assembler = pp.Assembler(gb, dof_manager)
assembler.discretize()
# Assemble the linear system, using the information stored in the GridBucket
A, b = assembler.assemble_matrix_rhs()
pressure = sps.linalg.spsolve(A, b)
"""
Explanation: The task of assembling the linear system is left to a dedicated object, called an Assembler, whih again relies on a DofManager to keep track of the ordering of unknowns (for more, see below).
Discretization and assembly of the global linear system can in this case be carried out by a single function call. Note that for some problems, notably poro-elasticity, this is not possible, then discretization must be carried out first.
Below, A is the global linear system, and b is the corresponding right hand side, and we obtain the pressure solution by solving the system.
End of explanation
"""
# Getting the grids is easy, there is one in each dimension
g_2d = gb.grids_of_dimension(2)[0]
g_1d = gb.grids_of_dimension(1)[0]
# Formally loop over the edges, there is a single one
for e, _ in gb.edges():
continue
# Get 2d dofs
global_dof_2d = dof_manager.grid_and_variable_to_dofs(g_2d, grid_variable)
# Print the relevant part of the system matrix
print(A.toarray()[global_dof_2d, :][:, global_dof_2d])
"""
Explanation: The parameters assigned above will not yield a well-posed problem, thus the solve will likely produce a warning about the matrix being singular. This can be ignored in this case. <br>
The ordering of the unknowns in the global linear system will vary depending on how the components in the GridBucket and the unknowns are traversed. The DofManager has methods to map from combinations of the relevant component in the GridBucket (either the grid or the edge between grids) with variables to the corresponding degrees of freedom.
End of explanation
"""
def assign_data_2(gb, keyword_param_storage, keyword_param_storage_2=None):
# Method to assign data.
for g, d in gb:
# This keyword is used to define which set of default parameters to pick
# Replace with 'transport' or 'mechanics' if needed
default_parameter_type = 'flow'
# Assign a non-default permeability, for illustrative purposes
if g.dim == 2:
kxx = 10 * np.ones(g.num_cells)
else:
kxx = 0.1 * np.ones(g.num_cells)
perm = pp.SecondOrderTensor(kxx)
# Create a dictionary to override the default parameters
specified_parameters = {'second_order_tensor': perm}
#
# Define the
pp.initialize_default_data(g, d, default_parameter_type, specified_parameters,
keyword_param_storage)
# Internally to the Parameter class, the parameters are stored as dictionaries.
# To illustrate how to access specific sets of parameters, print the keywords
# for one of the grids
if g.dim == 2 and not keyword_param_storage_2:
print('The assigned parameters for the 2d grid are')
print(d[pp.PARAMETERS][keyword_param_storage].keys())
# For one example below, we will need two different parameter sets.
# Define a second set, with default values only.
if keyword_param_storage_2:
pp.initialize_default_data(g, d, default_parameter_type, keyword = keyword_param_storage_2)
for e, d in gb.edges():
# On edges in the GridBucket, there is currently no methods for default initialization.
data = {"normal_diffusivity": 2e1}
# Add parameters: We again use keywords to identify sets of parameters.
if keyword_param_storage_2 is not None:
# There are actually three parameters here ('two_parameter_sets' refers to the nodes)
# since we plan on using in total three mortar variables in this case
d[pp.PARAMETERS] = pp.Parameters(keywords=['flow_param_edge',
'second_flow_param_edge',
'third_flow_param_edge'],
dictionaries=[data, data, data])
else:
d[pp.PARAMETERS] = pp.Parameters(keywords=['flow_param_edge'], dictionaries=[data])
return gb
# Define a grid
gb, _ = pp.grid_buckets_2d.single_horizontal([4, 4], simplex=False)
parameter_keyword = 'flow_param'
parameter_keyword_2 = 'second_flow_param'
gb = assign_data_2(gb, parameter_keyword, parameter_keyword_2)
"""
Explanation: Example 2
The first example showed how to work with the assembler in reletively simple cases. In this second example, we aim to illustrate the full scope of the assembler, including:
* General assignment of variables on different grid components (fracture, matrix, etc.):
* Different number of variables on each grid component
* Different names for variables (a relevant case could be to use 'temperature' on one domain, 'enthalpy' on another, with an appropriate coupling)
* General coupling schemes between different grid components:
* Multiple coupling variables
* Couplings related to different variables and discretization schemes on the neighboring grids.
* Multiple discretization operators applied to the same term / equation on different grid components
The example that incorporates all these features are necessarily quite complex and heavy on notation. As such it should be considered as a reference for how to use the functionality, more than a simulation of any real physical system.
We define two primary variables on the nodes and three coupling variables. The resulting system will be somewhat arbitrary, in that it may not reflect any standard physics, but it should better illustrate what is needed for a multi-physics problem.
First we extend the data assignment method.
End of explanation
"""
# Variable keywords first grid
grid_1_pressure_variable = 'pressure'
grid_1_temperature_variable = 'temperature'
# Variable keywords second grid
grid_2_pressure_variable = 'flux_pressure'
grid_2_temperature_variable = 'temperature'
"""
Explanation: Primary variables must be defined on each component of the GridBucket.
On the first grid we use a cell centered method which has one primary variable "pressure".
On the second grid, we use a mixed method with both pressure and fluxes combined into one primary variable.
The temperature is tagged with the same keyword on both grids.
End of explanation
"""
# Coupling variable for pressure
mortar_variable_pressure = 'mortar_flux_pressure'
# Coupling variable for advective temperature flux
mortar_variable_temperature_1 = 'mortar_flux_diffusion'
mortar_variable_temperature_2 = 'mortar_flux_diffusion_2'
"""
Explanation: Next we assign a keyword to the coupling terms between the grid. We will have three coupling variables;
one for the fluid flux, and one for each of the diffusive terms in the temperature equation.
End of explanation
"""
# Identifier of the discretization operator for pressure discretizaiton
operator_keyword_pressure = 'pressure_diffusion'
# identifier for the temperature discretizations.
# THIS IS WEIRD: The intention is to illustrate the use of two discretization operators for
# a single variable. The natural option in this setting is advection-diffusion, but that
# requires either the existence of a Darcy flux, or tighter coupling with the pressure equation.
# Purely for illustrative purposes, we instead use a double diffusion model. There you go.
operator_keyword_temperature_1 = 'diffusion'
operator_keyword_temperature_2 = 'diffusion_2'
# Identifier of the discretization operator between grids
coupling_pressure_keyword = 'coupling_operator_pressure'
"""
Explanation: We now give a keyword to the operators.
End of explanation
"""
# Pressure diffusion discretization
tpfa_flow = pp.Tpfa(parameter_keyword)
vem_flow = pp.MVEM(parameter_keyword)
# Temperature diffusion discretization
tpfa_temperature = pp.Tpfa(parameter_keyword_2)
mpfa_temperature = pp.Mpfa(parameter_keyword_2)
"""
Explanation: So far we have only defined the keywords needed for the discretizations to obtain the correct parameters
and couplings. Next, we create the discretization objects
End of explanation
"""
# One term couples two pressure / flow variables
edge_discretization_flow = pp.RobinCoupling('flow_param_edge', tpfa_flow, vem_flow)
# The second coupling is of mpfa on one domain, and tpfa on the other, both for temperature
edge_discretization_temperature_diffusion_1 = pp.RobinCoupling('second_flow_param_edge',
mpfa_temperature, tpfa_temperature)
# The third coupling is of tpfa for flow with mpfa for temperature
edge_discretization_temperature_diffusion_2 = pp.RobinCoupling('third_flow_param_edge',
tpfa_flow, mpfa_temperature)
"""
Explanation: Discretization operators on the coupling conditions, chosen to illustrate the framework.
Note that in all cases, the coupling conditions need a separate keyword, which should
correspond to an assigned set of data
End of explanation
"""
for g, d in gb:
# Assign primary variables on this grid.
if g.dim == 2:
# Both pressure and temperature are represented as cell centered variables
d[pp.PRIMARY_VARIABLES] = {grid_1_pressure_variable: {"cells": 1, "faces": 0},
grid_1_temperature_variable: {"cells": 1}}
# The structure of the discretization assignment is: For each variable, give a
# pair of operetor identifications (usually a string) and a discretizaiton method.
# If a variable is identified with several discretizations, say, advection and diffusion,
# several pairs can be assigned.
# For pressure, use tpfa.
# For temperature, use two discretizations, respectively tpfa and mpfa
d[pp.DISCRETIZATION] = {grid_1_pressure_variable: {operator_keyword_pressure: tpfa_flow},
grid_1_temperature_variable: {operator_keyword_temperature_1: tpfa_temperature,
operator_keyword_temperature_2: mpfa_temperature}}
else: #g.dim == 1
# Pressure is discretized with flux-pressure combination, temperature with cell centered variables
d[pp.PRIMARY_VARIABLES] = {grid_2_pressure_variable: {"cells": 1, "faces": 1},
grid_2_temperature_variable: {"cells": 1}}
# For pressure, use vem.
# For temperature, only discretize once, with tpfa
d[pp.DISCRETIZATION] = {grid_2_pressure_variable: {operator_keyword_pressure: vem_flow},
grid_2_temperature_variable: {operator_keyword_temperature_1: tpfa_temperature}}
"""
Explanation: Loop over the nodes in the GridBucket, define primary variables and discretization schemes
End of explanation
"""
for e, d in gb.edges():
#
g1, g2 = gb.nodes_of_edge(e)
# The syntax used in the problem setup assumes that g1 has dimension 2
if g1.dim < g2.dim:
g2, g1 = g1, g2
# The mortar variable has one degree of freedom per cell in the mortar grid
d[pp.PRIMARY_VARIABLES] = {mortar_variable_pressure: {"cells": 1},
mortar_variable_temperature_1: {"cells": 1},
mortar_variable_temperature_2: {"cells": 1},
}
# Coupling discretizations
d[pp.COUPLING_DISCRETIZATION] = {
# The flow discretization couples tpfa on one domain with vem on the other
'edge_discretization_flow': {
g1: (grid_1_pressure_variable, operator_keyword_pressure),
g2: (grid_2_pressure_variable, operator_keyword_pressure),
e: (mortar_variable_pressure, edge_discretization_flow),
},
# The first temperature mortar couples one of the temperature discretizations on grid 1
# with the single tempearture discretization on the second grid
# As a side remark, the keys in the outer dictionary are never used, except from debugging,
# but a dictionary seemed a more natural option than a list.
'the_keywords_in_this_dictionary_can_have_any_value': {
g1: (grid_1_temperature_variable, operator_keyword_temperature_2),
g2: (grid_2_temperature_variable, operator_keyword_temperature_1),
e: (mortar_variable_temperature_1, edge_discretization_temperature_diffusion_1),
},
# Finally, the third coupling
'second_edge_discretization_temperature': {
# grid_1_variable_1 gives pressure variable, then identify the discretization object
g1: (grid_1_pressure_variable, operator_keyword_pressure),
# grid_2_variable_2 gives temperature, then use the keyword that was used to identify mpfa
# (and not the one for tpfa, would have been operator_keyword_temperature_1)
g2: (grid_2_temperature_variable, operator_keyword_temperature_2),
e: (mortar_variable_temperature_2, edge_discretization_temperature_diffusion_2),
}
}
d[pp.DISCRETIZATION_MATRICES] = {'flow_param_edge': {},
'second_flow_param_edge': {},
'third_flow_param_edge': {}
}
"""
Explanation: Loop over the edges in the GridBucket, define primary variables and discretizations.
Notice how coupling discretizations are assigned as a dictionary, one per coupling term on each edge. For each term, the coupling contains an inner dictionary, with the keys being the edge and the two neighboring grids. For the edge, the values are the name of the mortar variable, and the discretization object to be applied. For the grids, the values are the variable name on the grid, and the keyword identifying the discretization operator, as specified in the loop over nodes.
End of explanation
"""
dof_manager = pp.DofManager(gb)
assembler = pp.Assembler(gb, dof_manager)
"""
Explanation: We have now assigned all the data. The task of assembling the linear system is left to a dedicated object:
End of explanation
"""
# Discretize, then Assemble the linear system, using the information stored in the GridBucket
assembler.discretize()
A, b = assembler.assemble_matrix_rhs()
# Pick out part of the discretization associated with the third mortar variable
g_2d = gb.grids_of_dimension(2)[0]
# Formally loop over the edges, there is a single one
for e, _ in gb.edges():
continue
# Get 2d dofs
global_dof_2d_pressure = dof_manager.grid_and_variable_to_dofs(g_2d, grid_1_pressure_variable)
global_dof_e_temperature = dof_manager.grid_and_variable_to_dofs(e, mortar_variable_temperature_2)
# Print the relevant part of the system matrix
print(A.toarray()[global_dof_2d_pressure, :][:, global_dof_e_temperature])
"""
Explanation: Discretization and assembly of the global linear system can again be carried out by separate function calls.
End of explanation
"""
|
Kaggle/learntools | notebooks/computer_vision/raw/ex4.ipynb | apache-2.0 | # Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.computer_vision.ex4 import *
import tensorflow as tf
import matplotlib.pyplot as plt
import learntools.computer_vision.visiontools as visiontools
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('image', cmap='magma')
"""
Explanation: Introduction
In these exercises, you'll explore the operations a couple of popular convnet architectures use for feature extraction, learn about how convnets can capture large-scale visual features through stacking layers, and finally see how convolution can be used on one-dimensional data, in this case, a time series.
Run the cell below to set everything up.
End of explanation
"""
from learntools.computer_vision.visiontools import edge, blur, bottom_sobel, emboss, sharpen, circle
image_dir = '../input/computer-vision-resources/'
circle_64 = tf.expand_dims(circle([64, 64], val=1.0, r_shrink=4), axis=-1)
kaggle_k = visiontools.read_image(image_dir + str('k.jpg'), channels=1)
car = visiontools.read_image(image_dir + str('car_illus.jpg'), channels=1)
car = tf.image.resize(car, size=[200, 200])
images = [(circle_64, "circle_64"), (kaggle_k, "kaggle_k"), (car, "car")]
plt.figure(figsize=(14, 4))
for i, (img, title) in enumerate(images):
plt.subplot(1, len(images), i+1)
plt.imshow(tf.squeeze(img))
plt.axis('off')
plt.title(title)
plt.show();
kernels = [(edge, "edge"), (blur, "blur"), (bottom_sobel, "bottom_sobel"),
(emboss, "emboss"), (sharpen, "sharpen")]
plt.figure(figsize=(14, 4))
for i, (krn, title) in enumerate(kernels):
plt.subplot(1, len(kernels), i+1)
visiontools.show_kernel(krn, digits=2, text_size=20)
plt.title(title)
plt.show()
"""
Explanation: (Optional) Experimenting with Feature Extraction
This exercise is meant to give you an opportunity to explore the sliding window computations and how their parameters affect feature extraction. There aren't any right or wrong answers -- it's just a chance to experiment!
We've provided you with some images and kernels you can use. Run this cell to see them.
End of explanation
"""
# YOUR CODE HERE: choose an image
image = circle_64
# YOUR CODE HERE: choose a kernel
kernel = bottom_sobel
visiontools.show_extraction(
image, kernel,
# YOUR CODE HERE: set parameters
conv_stride=1,
conv_padding='valid',
pool_size=2,
pool_stride=2,
pool_padding='same',
subplot_shape=(1, 4),
figsize=(14, 6),
)
"""
Explanation: To choose one to experiment with, just enter it's name in the appropriate place below. Then, set the parameters for the window computation. Try out some different combinations and see what they do!
End of explanation
"""
# View the solution (Run this code cell to receive credit!)
q_1.check()
# Lines below will give you a hint
#_COMMENT_IF(PROD)_
q_1.hint()
"""
Explanation: The Receptive Field
Trace back all the connections from some neuron and eventually you reach the input image. All of the input pixels a neuron is connected to is that neuron's receptive field. The receptive field just tells you which parts of the input image a neuron receives information from.
As we've seen, if your first layer is a convolution with $3 \times 3$ kernels, then each neuron in that layer gets input from a $3 \times 3$ patch of pixels (except maybe at the border).
What happens if you add another convolutional layer with $3 \times 3$ kernels? Consider this next illustration:
<figure>
<img src="https://i.imgur.com/HmwQm2S.png" alt="Illustration of the receptive field of two stacked convolutions." width=250>
</figure>
Now trace back the connections from the neuron at top and you can see that it's connected to a $5 \times 5$ patch of pixels in the input (the bottom layer): each neuron in the $3 \times 3$ patch in the middle layer is connected to a $3 \times 3$ input patch, but they overlap in a $5 \times 5$ patch. So that neuron at top has a $5 \times 5$ receptive field.
1) Growing the Receptive Field
Now, if you added a third convolutional layer with a (3, 3) kernel, what receptive field would its neurons have? Run the cell below for an answer. (Or see a hint first!)
End of explanation
"""
import pandas as pd
# Load the time series as a Pandas dataframe
machinelearning = pd.read_csv(
'../input/computer-vision-resources/machinelearning.csv',
parse_dates=['Week'],
index_col='Week',
)
machinelearning.plot();
"""
Explanation: So why stack layers like this? Three (3, 3) kernels have 27 parameters, while one (7, 7) kernel has 49, though they both create the same receptive field. This stacking-layers trick is one of the ways convnets are able to create large receptive fields without increasing the number of parameters too much. You'll see how to do this yourself in the next lesson!
(Optional) One-Dimensional Convolution
Convolutional networks turn out to be useful not only (two-dimensional) images, but also on things like time-series (one-dimensional) and video (three-dimensional).
We've seen how convolutional networks can learn to extract features from (two-dimensional) images. It turns out that convnets can also learn to extract features from things like time-series (one-dimensional) and video (three-dimensional).
In this (optional) exercise, we'll see what convolution looks like on a time-series.
The time series we'll use is from Google Trends. It measures the popularity of the search term "machine learning" for weeks from January 25, 2015 to January 15, 2020.
End of explanation
"""
detrend = tf.constant([-1, 1], dtype=tf.float32)
average = tf.constant([0.2, 0.2, 0.2, 0.2, 0.2], dtype=tf.float32)
spencer = tf.constant([-3, -6, -5, 3, 21, 46, 67, 74, 67, 46, 32, 3, -5, -6, -3], dtype=tf.float32) / 320
"""
Explanation: What about the kernels? Images are two-dimensional and so our kernels were 2D arrays. A time-series is one-dimensional, so what should the kernel be? A 1D array! Here are some kernels sometimes used on time-series data:
End of explanation
"""
# UNCOMMENT ONE
kernel = detrend
# kernel = average
# kernel = spencer
# Reformat for TensorFlow
ts_data = machinelearning.to_numpy()
ts_data = tf.expand_dims(ts_data, axis=0)
ts_data = tf.cast(ts_data, dtype=tf.float32)
kern = tf.reshape(kernel, shape=(*kernel.shape, 1, 1))
ts_filter = tf.nn.conv1d(
input=ts_data,
filters=kern,
stride=1,
padding='VALID',
)
# Format as Pandas Series
machinelearning_filtered = pd.Series(tf.squeeze(ts_filter).numpy())
machinelearning_filtered.plot();
#%%RM_IF(PROD)%%
# UNCOMMENT ONE
kernel = detrend
# kernel = average
# kernel = spencer
# Reformat for TensorFlow
ts_data = machinelearning.to_numpy()
ts_data = tf.expand_dims(ts_data, axis=0)
ts_data = tf.cast(ts_data, dtype=tf.float32)
kern = tf.reshape(kernel, shape=(*kernel.shape, 1, 1))
ts_filter = tf.nn.conv1d(
input=ts_data,
filters=kern,
stride=1,
padding='VALID',
)
# Format as Pandas Series
machinelearning_filtered = pd.Series(tf.squeeze(ts_filter).numpy())
machinelearning_filtered.plot();
#%%RM_IF(PROD)%%
# UNCOMMENT ONE
# kernel = detrend
kernel = average
# kernel = spencer
# Reformat for TensorFlow
ts_data = machinelearning.to_numpy()
ts_data = tf.expand_dims(ts_data, axis=0)
ts_data = tf.cast(ts_data, dtype=tf.float32)
kern = tf.reshape(kernel, shape=(*kernel.shape, 1, 1))
ts_filter = tf.nn.conv1d(
input=ts_data,
filters=kern,
stride=1,
padding='VALID',
)
# Format as Pandas Series
machinelearning_filtered = pd.Series(tf.squeeze(ts_filter).numpy())
machinelearning_filtered.plot();
#%%RM_IF(PROD)%%
# UNCOMMENT ONE
# kernel = detrend
# kernel = average
kernel = spencer
# Reformat for TensorFlow
ts_data = machinelearning.to_numpy()
ts_data = tf.expand_dims(ts_data, axis=0)
ts_data = tf.cast(ts_data, dtype=tf.float32)
kern = tf.reshape(kernel, shape=(*kernel.shape, 1, 1))
ts_filter = tf.nn.conv1d(
input=ts_data,
filters=kern,
stride=1,
padding='VALID',
)
# Format as Pandas Series
machinelearning_filtered = pd.Series(tf.squeeze(ts_filter).numpy())
machinelearning_filtered.plot();
"""
Explanation: Convolution on a sequence works just like convolution on an image. The difference is just that a sliding window on a sequence only has one direction to travel -- left to right -- instead of the two directions on an image. And just like before, the features picked out depend on the pattern on numbers in the kernel.
Can you guess what kind of features these kernels extract? Uncomment one of the kernels below and run the cell to see!
End of explanation
"""
|
blakeflei/IntroScientificPythonWithJupyter | 08 - Signal Processing - Scipy.ipynb | bsd-3-clause | import numpy as np # Python numpy
from scipy import signal, stats # Python scipy signal package
from matplotlib import pyplot as plt # Python matplotlib library
import matplotlib.gridspec as gridspec # Multiple plots in a single figure
# Display matplotlib in the notebook
%matplotlib inline
%cd datafiles
"""
Explanation: Scipy Signal Processing
One of the greatest strengths of matlab is the included signal processing. The python scipy library has many of these capabilities and some are highlighted below. These have applications in electronics, microscopy, telescopy, radio, and many other fields.
By the end of this file you should have seen simple examples of:
1. Frequency decomposition via Fourier transforms
2. Feature detection via correlations
3. Convolution and deconvolution
4. Digital filtering of signal using infinite impulse response (IIR) and finite impulse response (FIR) filters
Further reading:
https://docs.scipy.org/doc/scipy/reference/tutorial/signal.html
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.correlate.html
Filters:
http://www.dummies.com/education/science/science-engineering/real-world-signals-and-systems-case-analog-filter-design-with-a-twist/
http://radio.feld.cvut.cz/matlab/toolbox/signal/basics27.html
IIR Filters:
https://dspguru.com/dsp/faqs/iir/basics/
https://www.dsprelated.com/showarticle/194.php
FIR Filters:
http://scipy-cookbook.readthedocs.io/items/ApplyFIRFilter.html
End of explanation
"""
# Create signal
frq1 = 50 # Frequency 1(hz)
amp1 = 5 # Amplitude 1
frq2 = 250 # Frequency 2(hz)
amp2 = 3 # Amplitude 2
sr = 2000 # Sample rate
dur = 0.4 # Duration (s) (increasing/decreasing this changes S/N)
# Create signal and timesteps
X = np.linspace(0, dur-1/sr, int(dur*sr)) # Time
Y_s = amp1*np.cos(X*2*np.pi*frq1 - np.pi/4)+amp2*np.cos(X*2*np.pi*frq2 - np.pi/2) # Signal
# Add noise
Y_sn = Y_s + 40*np.random.rand(len(X)) # Signal + noise
plt.plot(X[1:100], Y_sn[1:100])
plt.title('Plot of Signal with Noise')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude of Signal')
plt.show()
# Plot Single Sided FT Spectrum
Y_sn_fft = np.fft.fft(Y_sn)
# Update fft output
FT = np.roll(Y_sn_fft, len(X)//2) # Shift zero freq component to center of spectrum
SSFT_amp = np.abs(FT)[len(X)//2:] # Use the absolute value for amplitude; spectrum is symmetric - start from zero freq component
SSFT_amp = 2/len(X) * SSFT_amp # Normalize
# Determine frequencies
freqs = sr/len(X)*np.arange(0,len(SSFT_amp))
# Plot
plt.plot(freqs[1:], SSFT_amp[1:])
plt.title('Single-Sided Spectrum of Signal')
plt.xlabel('freq (Hz)')
plt.ylabel('Freq Amplitude')
plt.show()
"""
Explanation: Fourier Transform Examples
Fourier transforms are most often used to decompose a signal as a function of time into the frequency components that comprise it, e.g. transforming between time and frequency domains. It's also possible to post-process a filtered signal using Fourier transforms.
FFTs decompose a single signal into the form of:
$$ Y = \frac{1}{2} a_0 \sum_{n=1}^{\infty} a_n cos (n x + \phi_x) $$
Amplitude Example
Here, we solve for the individual $a_n$, so we know how strong each of the individual signal components are.
End of explanation
"""
# Create signal
sr = 2000 # Sample rate
dur = 10 # Increased duration (s) (increasing/decreasing this changes S/N)
X = np.linspace(0, dur-1/sr, int(dur*sr)) # Time
Y_s = amp1*np.sin(X*2*np.pi*frq1 - np.pi/4) + amp2*np.sin(X*2*np.pi*frq2 + np.pi/2)
Y_sn = Y_s + 40*np.random.rand(len(X))
# Determine Single Sided FT Spectrum
Y_sn_fft = np.fft.fft(Y_sn)
# Update ft output
FT = np.roll(Y_sn_fft, len(X)//2) # Shift zero freq component to center of spectrum
SSFT_amp = np.abs(FT)[len(X)//2:] # Use the absolute value for amplitude; spectrum is symmetric - start from zero freq component
SSFT_amp = 2/len(X) * SSFT_amp # Scale by 2 (using half the spectrum) / number points
# Determine frequencies
freqs = sr/len(X)*np.arange(0,len(SSFT_amp))
# Plot
plt.plot(freqs[1:], SSFT_amp[1:])
plt.title('Single-Sided Spectrum of Signal')
plt.xlabel('freq (Hz)')
plt.ylabel('Freq Amplitude')
plt.show()
# Create signal
sr = 2000 # Sample rate
dur = 10 # Increased duration (s) (increasing/decreasing this changes S/N)
X = np.linspace(0, dur-1/sr, int(dur*sr)) # Time
Y_s = amp1*np.cos(X*2*np.pi*frq1 - np.pi/4) + amp2*np.cos(X*2*np.pi*frq2 + np.pi/2)
Y_sn = Y_s + 40*np.random.rand(len(X))
# Determine Single Sided FT Spectrum
Y_sn_fft = np.fft.fft(Y_sn)
# Update ft output
FT = np.roll(Y_sn_fft, len(X)//2) # Shift zero freq component to center of spectrum
SSFT_amp = np.abs(FT)[len(X)//2:] # Use the absolute value for amplitude; spectrum is symmetric - start from zero freq component
SSFT_amp = 2/len(X) * SSFT_amp # Scale by 2 (using half the spectrum) / number points
# Determine frequencies
freqs = sr/len(X)*np.arange(0,len(SSFT_amp))
# Plot
plt.plot(freqs[1:], SSFT_amp[1:])
plt.title('Single-Sided Spectrum of Signal')
plt.xlabel('freq (Hz)')
plt.ylabel('Freq Amplitude')
plt.show()
"""
Explanation: The amplitudes don't seem quite right - longer duration increases the signal to noise and gives a better result:
End of explanation
"""
# We can use the previous signal to get the phase:
# Set a tolerance limit - phase is sensitive to floating point errors
# (see Gotchas and Optimization for more info):
FT_trun = FT
tol = 1*10**-6 # Truncate signal below tolerance level
FT_trun[np.abs(FT_trun)<tol] = 0
# Use the angle function (arc tangent of imaginary over real)
phase = np.angle(FT_trun[len(X)//2:])
phase_rad = 1/np.pi * phase # Convert to radians
# Plot
plt.plot(freqs[1:], phase_rad[1:])
plt.title('Single-Sided Spectrum of Signal')
plt.xlabel('freq (Hz)')
plt.ylabel('Freq Amplitude')
plt.show()
"""
Explanation: Phase Example
Phase is shift of a periodic signal 'left' or 'right'. it is the $\phi_x$ in the following equation:
$$ Y = \frac{1}{2} a_0 \sum_{n=1}^{\infty} a_n cos (n x + \phi_x) $$
End of explanation
"""
nonzero_freqs = freqs[SSFT_amp > 1][1:]
print('Notable frequencies are: {}'.format(nonzero_freqs))
inds = [list(freqs).index(x) for x in nonzero_freqs] # Return index of nonzero frequencies
print('Phase shifts for notable frequencies are: {}'.format(phase_rad[inds]))
"""
Explanation: This shows the phase for every single frequency, but we really only care about the nonzero frequencies with minimum amplitude:
End of explanation
"""
# Determine Single Sided FT Spectrum
Y_s_fft = np.fft.fft(Y_s)
# Update ft output
FT = np.roll(Y_s_fft, len(X)//2)
# Set a tolerance limit - phase is sensitive to floating point errors
# (see Gotchas and Optimization for more info):
FT_trun = FT
tol = 1*10**-6 # Truncate signal below tolerance level
FT_trun[np.abs(FT_trun)<tol] = 0
# Use the angle function (arc tangent of imaginary over real)
phase = np.angle(FT_trun[len(X)//2:])
phase_rad = 1/np.pi * phase # Convert to radians
# Plot
plt.plot(freqs[1:], phase_rad[1:])
plt.title('Single-Sided Spectrum of Signal')
plt.xlabel('freq (Hz)')
plt.ylabel('Freq Amplitude')
plt.show()
# To streamline, we can create a function in Pandas (see Pandas Crash Course for more info):
import pandas as pd
def fft_norm(signal, sr=1):
'''Return a normalized fft single sided spectrum.'''
signal = signal[: signal.shape[0]//2*2] # Use an even number of data points so can divide FFT in half evenly
N = signal.shape[0]
freqs = sr*np.arange(0, N//2)/N
# FFT
fft = np.fft.fft(signal)
fft = np.roll(fft, N//2) # Shift zero freq component to center of spectrum
# Normalized Amplitude
amp_norm = 2/N*np.abs(fft[N//2:])
# Phase
tol = 1*10**-6 # Truncate signal below tolerance so phase isn't weird
fft[np.abs(fft)<tol] = 0
phase_rad = np.angle(fft[N//2:])/(np.pi)
# To convert the phase, use (fft_norm(Phase (Radians)+np.pi)) * conversion factor/(2*np.pi)
# I.e. add Pi to the output before converting from radians
return pd.DataFrame({'Frequency':freqs, 'Amplitude':amp_norm, 'Phase (Radians)':phase_rad, 'Phase (Degrees)':phase_rad*180}).set_index('Frequency')
Y_ms = Y_s-Y_s.mean() # Mean subtract to remove the offset (0 freq component)
fft_norm(Y_ms, sr=2000).plot(subplots=True, layout=(3,1)) # mean subtract to
plt.show()
"""
Explanation: This is better visualized without noise:
End of explanation
"""
# Fourier transform
Yfft = np.fft.fft(Y_s);
freqs = sr*np.arange(0,len(Yfft)/2)/len(Y_sn) # Frequencies of the FT
ind250Hz = np.where(freqs==250)[0][0] # Index to get just 250 Hz Signal
Y_filt = Yfft[:] # The original, non-absolute, full spectrum is important
full_w = 200 # Width of spectrum to set to zero
# Set FT at frequency to zero
Y_filt[ind250Hz-int(full_w/2):ind250Hz+int(full_w/2)] = 0 # Set the 250 Hz signal (+-) to zero on the lower side
Y_filt[-ind250Hz-int(full_w/2):-ind250Hz+int(full_w/2)] = 0 # Set the 250 Hz signal (+-) to zero on the upper side
# Determine single sided Fourier transform
SSFT_filt = Y_filt[:int(len(Y_filt)/2)] # Index the first half
SSFT_filt = np.abs(SSFT_filt) # Use the absolute Value
SSFT_filt = SSFT_filt/len(X) * 2 # Normalize and double the values (FFT is wrapped)
# Plot
plt.plot(freqs[1:], SSFT_filt[1:])
plt.title('Single-Sided Spectrum of Signal')
plt.xlabel('freq (Hz)')
plt.ylabel('Amplitude of X')
plt.show()
"""
Explanation: Notch Filter
Plot our original, non-noisy two-component signal:
Perform the Fourier transform, and set the 200 Hz signal to zero:
End of explanation
"""
# Inverse FFT the original, non-absolute, full spectrum
Y2 = np.fft.ifft(Y_filt)
Y2 = np.real(Y2) # Use the real values to plot the filtered signal
# Plot
plt.plot(X[:100],Y_s[:100], label='Original')
plt.plot(X[:100],Y2[:100], label='Filtered')
plt.title('Two Signals')
plt.xlabel('Time (s)')
plt.ylabel('Signal Amplutude')
plt.legend(loc='best')
plt.show()
"""
Explanation: Inverse Fourier transform back, and plot the original filtered signal:
End of explanation
"""
# Determine approx power spectral density
f, Pxx_den = signal.periodogram(Y_s, sr)
# Plot
plt.plot(f, Pxx_den)
plt.xlabel('frequency [Hz]')
plt.ylabel('PSD')
plt.show()
"""
Explanation: While the Fourier amplitudes properly represent the amplitude of frequency components, the power spectral density (square of the discrete fourier transform) can be estimated using a periodogram:
End of explanation
"""
# Create a signal
npts = 200
heartbeat = np.array([0,1,0,0,4,8,2,-4,0,4,0,1,2,1,0,0,0,0])/8
xvals = np.linspace(0,len(heartbeat),npts)
heartbeat = np.interp(xvals,np.arange(0,len(heartbeat)),heartbeat) # Use interpolation to spread the signal out
# Repeat the signal ten times, add some noise:
hrtbt = np.tile(heartbeat,10)
hrtbt_noise = hrtbt + np.random.rand(len(hrtbt))
# Plot
G = gridspec.GridSpec(2, 1)
axis1 = plt.subplot(G[0, 0])
axis1.plot(heartbeat)
axis1.set_title('Single Heartbeat Electrocardiogram')
axis2 = plt.subplot(G[1, 0])
axis2.plot(hrtbt_noise)
axis2.set_title('Noisy Electrocardiogram for repeating Heartbeat')
plt.tight_layout()
plt.show()
"""
Explanation: Correlation
Correlations are a measure of the product of two signals as a function of the x-axis shift between them. They are often used to determine similarity between the two signals, e.g. is there some structure or repeating feature that is present in both signals?
End of explanation
"""
# Find center of each repeating signal
cent_x = np.arange(1,11)*200 - 100
cent_y = np.ones(10)*max(hrtbt)
# Plot
plt.plot(hrtbt[:], label='heartbeat')
plt.plot(cent_x[:],cent_y[:],'r^', label='Centroid')
plt.title('Heartbeat Electrocardiogram')
plt.xlabel('Time')
plt.ylabel('Volts')
plt.legend(loc='best')
plt.show()
"""
Explanation: The center of the repeating (heartbeat) signal is marked as a centroid:
End of explanation
"""
# Correlate
corr = signal.correlate(hrtbt_noise, heartbeat, mode='same')
# Plot
plt.plot(corr/max(corr), label='Corelogram')
plt.plot(cent_x,cent_y,'r^', label='Centroid')
plt.title('Correlogram')
plt.xlabel('Delay')
plt.ylabel('Normalized Volts $^2$')
plt.legend(loc='best')
plt.show()
"""
Explanation: Correlate the single signal with the repeating, noisy one:
End of explanation
"""
# Signal and PSF
orig_sig = signal.sawtooth(2*np.pi*np.linspace(0,3,300))/2+0.5
psf = signal.gaussian(101, std=15)
# Convolve
convolved = signal.convolve(orig_sig, psf)
# Plot
G = gridspec.GridSpec(3, 1)
axis1 = plt.subplot(G[0, 0])
axis1.plot(orig_sig)
axis1.set_xlim(0, len(convolved))
axis1.set_title('Original Pulse')
axis2 = plt.subplot(G[1, 0])
axis2.plot(psf)
axis2.set_xlim(0, len(convolved))
axis2.set_title('Point Spread Function')
axis3 = plt.subplot(G[2, 0])
axis3.plot(convolved)
axis3.set_xlim(0, len(convolved))
axis3.set_title('Convolved Signal')
plt.tight_layout()
plt.show()
"""
Explanation: The correlogram recovered the repeating signal central points. This is because at these points, the signal has the greatest similarity with the rectangular pulse. In other words, we're recovering the areas that share the greatest amount of similarity with our rectangular pulse.
Convolution
Convolution is a process in which the shape of one function is expressed in another. They're useful for adjusting features, or representing real-world measurements if the response of the filter or instrument is known.
As an example, consider a 1 dimensional image taken by an optical microscope (here, a sawtooth wave). The microscope itself imposes empirical limitations in the optics they use approximated by a Gaussian point squared function (PSF). The final image is the convolution of the original image and the PSF.
End of explanation
"""
# Deconvolve
recovered, remainder = signal.deconvolve(convolved, psf)
# Plot
G = gridspec.GridSpec(3, 1)
axis1 = plt.subplot(G[0, 0])
axis1.plot(convolved)
axis1.set_xlim(0, len(convolved))
axis1.set_title('Convolved Signal')
axis2 = plt.subplot(G[1, 0])
axis2.plot(psf)
axis2.set_xlim(0, len(convolved))
axis2.set_title('Known Impulse Response')
axis3 = plt.subplot(G[2, 0])
axis3.plot(recovered)
axis3.set_xlim(0, len(convolved))
axis3.set_title('Recovered Pulse')
plt.tight_layout()
plt.show()
"""
Explanation: Deconvolution
Deconvolution can be thought of as removing the filter or instrument response. This is pretty common when reconstructing real signals if the response is known.
In the microscope example, this would be deconvolving image with a known response of the instrument to a point source. If it is known how much the entire image is spread out, the original image can be recovered.
End of explanation
"""
frq1 = 250 # Frequency 1(hz)
amp1 = 3 # Amplitude 1
sr = 2000 # Sample rate
dur = 1 # Duration (s) (increasing/decreasing this changes S/N)
# Create timesteps, signal and noise
X = np.linspace(0, dur-1/sr, int(dur*sr)) # Time
Y = amp1*np.sin(X*2*np.pi*frq1) # Signal
Y_noise = Y + 40*np.random.rand(len(X)) # Add noise
# Approx PSD
f, Pxx_den = signal.periodogram(Y_noise, sr)
# Plot
G = gridspec.GridSpec(2, 1)
axis1 = plt.subplot(G[0, 0])
axis1.plot(X, Y_noise)
axis1.set_title('Plot of Signal with Noise')
axis2 = plt.subplot(G[1, 0])
axis2.plot(f, Pxx_den)
axis2.set_title('Approx PSD')
plt.tight_layout()
plt.show()
"""
Explanation: Filtering
Filters recieve a signal input and selectively reduce the amplitude of certain frequencies. Working with digital signals, they can broadly be divided into infinitie impulse response (IIR) and finite impulse response (FIR).
IIR filters that receive an impulse response (signal of value 1 followed by many zeros) yield a (theoretically) infinite number of non-zero values.
This is in contrast with the the finite impulse response (FIR) filter that receives an impulse response and does become exactly zero beyond the duration of the impulse.
IIR and FIR filters are also different in that they have different filter coefficients (b, a) that represent the feed forward coefficients and feedback coefficients, respectively. Feed forward coefficients (b) are applied to input (x) values, and feedback coefficients (a) are applied to output (y) values - i.e:
$y(n) = b_1x(n) + b_2x(n) - a_1y(n) - a_2y(n) $
where:
$b_jx(n)$ are the feed forward coefficients, using $x$ values
$a_jy(n)$ are the feedback coefficients (notice the $y(n)$!)
Generate Signal
First, we generate a signal and approximate the power spectral density (PSD):
End of explanation
"""
f_order = 10.0 # Filter order
f_pass = 'low' # Filter is low pass
f_freq = 210.0 # Frequency to pass
f_cutoff = f_freq/(sr/2) # Convert frequency into
# Create the filter
b, a = signal.iirfilter(f_order, f_cutoff, btype=f_pass, ftype='butter')
# Test the filter
w, h = signal.freqz(b, a, 1000) # Test response of filter across
# frequencies (Use 'freqz' for digital)
freqz_hz = w * sr / (2 * np.pi) # Convert frequency to Hz
resp_db = 20 * np.log10(abs(h)) # Convert response to decibels
# Plot filter response
plt.semilogx(freqz_hz, resp_db )
plt.title('Butterworth Bandpass Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (dB)')
plt.axis((100, 500, -200, 10))
plt.grid(which='both', axis='both')
plt.show()
"""
Explanation: Infinite Impulse Response (IIR) filters
Digital filters inherently account for digital signal limitations, i.e. the sampling frequency. The Nyquist theorem asserts that we can't measure frequencies that are higher than 1/2 the sampling frequency, and the digital filter operates on this principle.
Next, we create the digital filter and plot the response, using both feedforward (b) and feedback (a) coefficeints.:
End of explanation
"""
# Apply filter to signal
sig_filtered = signal.filtfilt(b, a, Y_noise)
# Determine approx PSD
f, Pxx_den_f = signal.periodogram(sig_filtered, sr)
# Plot
G = gridspec.GridSpec(2, 1)
axis1 = plt.subplot(G[0, 0])
axis1.plot(f, Pxx_den)
axis1.set_title('Approx PSD of Original Signal')
axis2 = plt.subplot(G[1, 0])
axis2.plot(f, Pxx_den_f)
axis2.set_title('Approx PSD of Filtered Signal')
plt.tight_layout()
plt.show()
"""
Explanation: Applying the filter to our signal filters all higher frequencies:
End of explanation
"""
# Create FIR filter
taps = 150 # Analogus to IIR order - indication
# of memory, calculation, and
# 'filtering'
freqs = [0, 150, 300, 500, sr/2.] # FIR frequencies
ny_fract = np.array(freqs)/(sr/2) # Convert frequency to fractions of
# the Nyquist freq
gains = [10.0, 1.0, 10.0, 0.0, 0.0] # Gains at each frequency
b = signal.firwin2(taps, ny_fract, gains) # Make the filter (there are no
# 'a' coefficients)
w, h = signal.freqz(b) # Check filter response
# Test FIR filter
freqz_hz = w * sr / (2 * np.pi) # Convert frequency to Hz
resp_db = 20 * np.log10(abs(h)) # Convert response to decibels
# Plot filter response
plt.title('Digital filter frequency response')
plt.plot(freqz_hz, np.abs(h))
plt.title('Digital filter frequency response')
plt.ylabel('Amplitude Response')
plt.xlabel('Frequency (Hz)')
plt.grid()
plt.show()
"""
Explanation: Finite Impulse Response Filters
A finite impulse response (FIR) filter can be designed where a linear phase response is specified within specified regions (up to the Nyquist or 1/2 of the sampling frequency). Only feedforward coefficients (b) are used.
End of explanation
"""
# Apply FIR filter
sig_filtered = signal.filtfilt(b, 1, Y_noise)
# Determine approx PSD
f, Pxx_den_f = signal.periodogram(sig_filtered, sr)
# Plot
G = gridspec.GridSpec(2, 1)
axis1 = plt.subplot(G[0, 0])
axis1.plot(f, Pxx_den)
axis1.set_title('Approx PSD of Original Signal')
axis2 = plt.subplot(G[1, 0])
axis2.plot(f, Pxx_den_f)
axis2.set_title('Approx PSD of Filtered Signal')
plt.tight_layout()
plt.show()
"""
Explanation: And the effect of the FIR digital filter:
End of explanation
"""
|
LSSTC-DSFP/LSSTC-DSFP-Sessions | Sessions/Session14/Day3/AutoencodersBlank.ipynb | mit | !pip install astronn
import torch
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from astroNN.datasets import load_galaxy10
from astroNN.datasets.galaxy10 import galaxy10cls_lookup
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
"""
Explanation: Creating a Simple Autoencoder
By: V. Ashley Villar (PSU)
In this problem set, we will use Pytorch to learn a latent space for the same galaxy image dataset we have previously played with.
End of explanation
"""
# Readin the data
images, labels = load_galaxy10()
labels = labels.astype(np.float32)
images = images.astype(np.float32)
images = torch.tensor(images)
labels = torch.tensor(labels)
# Cut down the resolution of the images!!! What is this line doing in words?
images = images[:,::6,::6,1]
#Plot an example image here
#Flatten images here
#Normalize the flux of the images here
"""
Explanation: Problem 1a: Understanding our dataset...again
Our data is a little too big for us to train an autoencoder in ~1 minute. Let's lower the resolution of our images and only keep one filter. Plot an example of the lower resolution galaxies.
Next, flatten each image into a 1D array. Then rescale the flux of the images such that the mean is 0 and the standard deviation is 1.
End of explanation
"""
class Autoencoder(torch.nn.Module):
# this defines the model
def __init__(self, input_size, hidden_size, hidden_inner, encoded_size):
super(Autoencoder, self).__init__()
print(input_size,hidden_size,encoded_size)
self.input_size = input_size
self.hidden_size = hidden_size
self.encoded_size = encoded_size
self.hidden_inner = hidden_inner
self.hiddenlayer1 = torch.nn.Linear(self.input_size, self.hidden_size)
# ADD A LAYER HERE
self.encodedlayer = torch.nn.Linear(self.hidden_inner, self.encoded_size)
self.hiddenlayer3 = torch.nn.Linear(self.encoded_size, self.hidden_inner)
# ADD A LAYER HERE
self.outputlayer = torch.nn.Linear(self.hidden_size, self.input_size)
# some nonlinear options
self.sigmoid = torch.nn.Sigmoid()
self.softmax = torch.nn.Softmax()
self.relu = torch.nn.ReLU()
def forward(self, x):
layer1 = self.hiddenlayer1(x)
activation1 = self.ACTIVATION?(layer1)
layer2 = self.hiddenlayer2(activation1)
activation2 = self.ACTIVATION?(layer2)
layer3 = self.encodedlayer(activation2)
activation3 = self.ACTIVATION?(layer3)
layer4 = self.hiddenlayer3(activation3)
activation4 = self.ACTIVATION?(layer4)
layer5 = self.hiddenlayer4(activation4)
activation5 = self.ACTIVATION?(layer5)
layer6 = self.outputlayer(activation5)
output = self.ACTIVATION?(layer6)
# Why do I have two outputs?
return output, layer3
"""
Explanation: Problem 1b.
Split the training and test set with a 66/33 split.
Problem 2: Understanding the Autoencoder
Below is sample of an autoencoder, built in Pytorch. Describe the code line-by-line with a partner. Add another hidden layer before and after the encoded (latent) layer (this will be a total of 2 new layers). Choose the appropriate activation function for this regression problem. Make all of the activation functions the same.
End of explanation
"""
# train the model
def train_model(training_data,test_data, model):
# define the optimization
criterion = torch.nn.MSELoss()
# Choose between these two optimizers
#optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
#optimizer = torch.optim.Adam(model.parameters(), lr=0.1,weight_decay=1e-6)
for epoch in range(500):
# clear the gradient
optimizer.zero_grad()
# compute the model output
myoutput, encodings_train = model(training_data)
# calculate loss
loss = criterion(myoutput, training_data)
# credit assignment
loss.backward()
# update model weights
optimizer.step()
# Add a plot of the loss vs epoch for the test and training sets here
#Do your training here!!
hidden_size_1 = 100
hidden_size_2 = 50
encoded_size = 10
model = Autoencoder(np.shape(images_train[0])[0],hidden_size_1,hidden_size_2,encoded_size)
train_model(images_train, images_test, model)
"""
Explanation: Problem 3. Training
This is going to be a lot of guess-and-check. You've been warned. In this block, we will train the autoencoder. Add a plotting function into the training.
Note that instead of cross-entropy, we use the "mean-square-error" loss. Switch between SGD and Adam optimized. Which seems to work better? Optimize the learning-rate parameter and do not change other parameters, like momentum.
Write a piece of code to run train_model for 10 epochs. Play with the size of each hidden layer and encoded layer. When you feel you've found a reasonable learning rate, up this to 100 (or even 500 if you're patient) epochs. Hint: You want to find MSE~0.25.
End of explanation
"""
#Make an image of the original image
#Make an image of its reconstruction
#Make an image of (original - reconstruction)
"""
Explanation: Problem 4a. Understand our Results
Plot an image (remember you will need to reshape it to a 14x14 grid) with imshow, and plot the autoencoder output for the same galaxy. Try plotting the difference between the two. What does your algorithm do well reconstructing? Are there certain features which it fails to reproduce?
End of explanation
"""
#Scatter plot between two dimensions of the latent space
#Try coloring the points
"""
Explanation: Problem 4b.
Make a scatter plot of two of the 10 latent space dimensions. Do you notice any interesting correlations between different subsets of the latent space? Any interesting clustering?
Try color coding each point by the galaxy label using plt.scatter
End of explanation
"""
clf = RandomForestClassifier(...)
clf.fit(...)
new_labels = clf.predict(...)
cm = confusion_matrix(labels_test,new_labels,normalize='true')
disp = ConfusionMatrixDisplay(confusion_matrix=cm)
disp.plot()
plt.show()
"""
Explanation: Bonus Problem 5a Playing with the Latent Space
Create a random forest classifier to classiy each galaxy using only your latent space.
End of explanation
"""
clf = IsolationForest(...).fit(encodings)
scores = -clf.score_samples(encodings) #I am taking the negative because the lowest score is actually the weirdest, which I don't like...
#Plot an image of the weirdest galazy!
#This plots the cumulative distribution
def cdf(x, label='',plot=True, *args, **kwargs):
x, y = sorted(x), np.arange(len(x)) / len(x)
return plt.plot(x, y, *args, **kwargs, label=label) if plot else (x, y)
ulabels = np.unique(labels)
for ulabel in ulabels:
gind = np.where(labels==ulabel)
cdf(...)
"""
Explanation: Bonus Problem 5b Playing with the Latent Space
Create an isolation forest to find the most anomalous galaxies. Made a cumulative distribution plot showing the anomaly scores of each class of galaxies. Which ones are the most anomalous? Why do you think that is?
End of explanation
"""
|
broundy/udacity | nanodegrees/deep_learning_foundations/unit_3/lesson_34_sentiment-rnn/Sentiment RNN.ipynb | unlicense | import numpy as np
import tensorflow as tf
with open('reviews.txt', 'r') as f:
reviews = f.read()
with open('labels.txt', 'r') as f:
labels = f.read()
reviews[:1000]
"""
Explanation: Sentiment Analysis with an RNN
In this notebook, you'll implement a recurrent neural network that performs sentiment analysis. Using an RNN rather than a feedfoward network is more accurate since we can include information about the sequence of words. Here we'll use a dataset of movie reviews, accompanied by labels.
The architecture for this network is shown below.
<img src="assets/network_diagram.png" width=400px>
Here, we'll pass in words to an embedding layer. We need an embedding layer because we have tens of thousands of words, so we'll need a more efficient representation for our input data than one-hot encoded vectors. You should have seen this before from the word2vec lesson. You can actually train up an embedding with word2vec and use it here. But it's good enough to just have an embedding layer and let the network learn the embedding table on it's own.
From the embedding layer, the new representations will be passed to LSTM cells. These will add recurrent connections to the network so we can include information about the sequence of words in the data. Finally, the LSTM cells will go to a sigmoid output layer here. We're using the sigmoid because we're trying to predict if this text has positive or negative sentiment. The output layer will just be a single unit then, with a sigmoid activation function.
We don't care about the sigmoid outputs except for the very last one, we can ignore the rest. We'll calculate the cost from the output of the last step and the training label.
End of explanation
"""
from string import punctuation
all_text = ''.join([c for c in reviews if c not in punctuation])
reviews = all_text.split('\n')
all_text = ' '.join(reviews)
words = all_text.split()
all_text[:2000]
words[:100]
"""
Explanation: Data preprocessing
The first step when building a neural network model is getting your data into the proper form to feed into the network. Since we're using embedding layers, we'll need to encode each word with an integer. We'll also want to clean it up a bit.
You can see an example of the reviews data above. We'll want to get rid of those periods. Also, you might notice that the reviews are delimited with newlines \n. To deal with those, I'm going to split the text into each review using \n as the delimiter. Then I can combined all the reviews back together into one big string.
First, let's remove all punctuation. Then get all the text without the newlines and split it into individual words.
End of explanation
"""
from collections import Counter
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
reviews_ints = []
for each in reviews:
reviews_ints.append([vocab_to_int[word] for word in each.split()])
"""
Explanation: Encoding the words
The embedding lookup requires that we pass in integers to our network. The easiest way to do this is to create dictionaries that map the words in the vocabulary to integers. Then we can convert each of our reviews into integers so they can be passed into the network.
Exercise: Now you're going to encode the words with integers. Build a dictionary that maps words to integers. Later we're going to pad our input vectors with zeros, so make sure the integers start at 1, not 0.
Also, convert the reviews to integers and store the reviews in a new list called reviews_ints.
End of explanation
"""
labels = labels.split('\n')
labels = np.array([1 if each == 'positive' else 0 for each in labels])
"""
Explanation: Encoding the labels
Our labels are "positive" or "negative". To use these labels in our network, we need to convert them to 0 and 1.
Exercise: Convert labels from positive and negative to 1 and 0, respectively.
End of explanation
"""
from collections import Counter
review_lens = Counter([len(x) for x in reviews_ints])
print("Zero-length reviews: {}".format(review_lens[0]))
print("Maximum review length: {}".format(max(review_lens)))
"""
Explanation: If you built labels correctly, you should see the next output.
End of explanation
"""
# Filter out that review with 0 length
reviews_ints =
"""
Explanation: Okay, a couple issues here. We seem to have one review with zero length. And, the maximum review length is way too many steps for our RNN. Let's truncate to 200 steps. For reviews shorter than 200, we'll pad with 0s. For reviews longer than 200, we can truncate them to the first 200 characters.
Exercise: First, remove the review with zero length from the reviews_ints list.
End of explanation
"""
seq_len = 200
features =
"""
Explanation: Exercise: Now, create an array features that contains the data we'll pass to the network. The data should come from review_ints, since we want to feed integers to the network. Each row should be 200 elements long. For reviews shorter than 200 words, left pad with 0s. That is, if the review is ['best', 'movie', 'ever'], [117, 18, 128] as integers, the row will look like [0, 0, 0, ..., 0, 117, 18, 128]. For reviews longer than 200, use on the first 200 words as the feature vector.
This isn't trivial and there are a bunch of ways to do this. But, if you're going to be building your own deep learning networks, you're going to have to get used to preparing your data.
End of explanation
"""
features[:10,:100]
"""
Explanation: If you build features correctly, it should look like that cell output below.
End of explanation
"""
split_frac = 0.8
train_x, val_x =
train_y, val_y =
val_x, test_x =
val_y, test_y =
print("\t\t\tFeature Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nValidation set: \t{}".format(val_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
"""
Explanation: Training, Validation, Test
With our data in nice shape, we'll split it into training, validation, and test sets.
Exercise: Create the training, validation, and test sets here. You'll need to create sets for the features and the labels, train_x and train_y for example. Define a split fraction, split_frac as the fraction of data to keep in the training set. Usually this is set to 0.8 or 0.9. The rest of the data will be split in half to create the validation and testing data.
End of explanation
"""
lstm_size = 256
lstm_layers = 1
batch_size = 500
learning_rate = 0.001
"""
Explanation: With train, validation, and text fractions of 0.8, 0.1, 0.1, the final shapes should look like:
Feature Shapes:
Train set: (20000, 200)
Validation set: (2500, 200)
Test set: (2501, 200)
Build the graph
Here, we'll build the graph. First up, defining the hyperparameters.
lstm_size: Number of units in the hidden layers in the LSTM cells. Usually larger is better performance wise. Common values are 128, 256, 512, etc.
lstm_layers: Number of LSTM layers in the network. I'd start with 1, then add more if I'm underfitting.
batch_size: The number of reviews to feed the network in one training pass. Typically this should be set as high as you can go without running out of memory.
learning_rate: Learning rate
End of explanation
"""
n_words = len(vocab)
# Create the graph object
graph = tf.Graph()
# Add nodes to the graph
with graph.as_default():
inputs_ =
labels_ =
keep_prob =
"""
Explanation: For the network itself, we'll be passing in our 200 element long review vectors. Each batch will be batch_size vectors. We'll also be using dropout on the LSTM layer, so we'll make a placeholder for the keep probability.
Exercise: Create the inputs_, labels_, and drop out keep_prob placeholders using tf.placeholder. labels_ needs to be two-dimensional to work with some functions later. Since keep_prob is a scalar (a 0-dimensional tensor), you shouldn't provide a size to tf.placeholder.
End of explanation
"""
# Size of the embedding vectors (number of units in the embedding layer)
embed_size = 300
with graph.as_default():
embedding =
embed =
"""
Explanation: Embedding
Now we'll add an embedding layer. We need to do this because there are 74000 words in our vocabulary. It is massively inefficient to one-hot encode our classes here. You should remember dealing with this problem from the word2vec lesson. Instead of one-hot encoding, we can have an embedding layer and use that layer as a lookup table. You could train an embedding layer using word2vec, then load it here. But, it's fine to just make a new layer and let the network learn the weights.
Exercise: Create the embedding lookup matrix as a tf.Variable. Use that embedding matrix to get the embedded vectors to pass to the LSTM cell with tf.nn.embedding_lookup. This function takes the embedding matrix and an input tensor, such as the review vectors. Then, it'll return another tensor with the embedded vectors. So, if the embedding layer has 200 units, the function will return a tensor with size [batch_size, 200].
End of explanation
"""
with graph.as_default():
# Your basic LSTM cell
lstm =
# Add dropout to the cell
drop =
# Stack up multiple LSTM layers, for deep learning
cell =
# Getting an initial state of all zeros
initial_state = cell.zero_state(batch_size, tf.float32)
"""
Explanation: LSTM cell
<img src="assets/network_diagram.png" width=400px>
Next, we'll create our LSTM cells to use in the recurrent network (TensorFlow documentation). Here we are just defining what the cells look like. This isn't actually building the graph, just defining the type of cells we want in our graph.
To create a basic LSTM cell for the graph, you'll want to use tf.contrib.rnn.BasicLSTMCell. Looking at the function documentation:
tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=<function tanh at 0x109f1ef28>)
you can see it takes a parameter called num_units, the number of units in the cell, called lstm_size in this code. So then, you can write something like
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
to create an LSTM cell with num_units. Next, you can add dropout to the cell with tf.contrib.rnn.DropoutWrapper. This just wraps the cell in another cell, but with dropout added to the inputs and/or outputs. It's a really convenient way to make your network better with almost no effort! So you'd do something like
drop = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
Most of the time, you're network will have better performance with more layers. That's sort of the magic of deep learning, adding more layers allows the network to learn really complex relationships. Again, there is a simple way to create multiple layers of LSTM cells with tf.contrib.rnn.MultiRNNCell:
cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers)
Here, [drop] * lstm_layers creates a list of cells (drop) that is lstm_layers long. The MultiRNNCell wrapper builds this into multiple layers of RNN cells, one for each cell in the list.
So the final cell you're using in the network is actually multiple (or just one) LSTM cells with dropout. But it all works the same from an achitectural viewpoint, just a more complicated graph in the cell.
Exercise: Below, use tf.contrib.rnn.BasicLSTMCell to create an LSTM cell. Then, add drop out to it with tf.contrib.rnn.DropoutWrapper. Finally, create multiple LSTM layers with tf.contrib.rnn.MultiRNNCell.
Here is a tutorial on building RNNs that will help you out.
End of explanation
"""
with graph.as_default():
outputs, final_state =
"""
Explanation: RNN forward pass
<img src="assets/network_diagram.png" width=400px>
Now we need to actually run the data through the RNN nodes. You can use tf.nn.dynamic_rnn to do this. You'd pass in the RNN cell you created (our multiple layered LSTM cell for instance), and the inputs to the network.
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state)
Above I created an initial state, initial_state, to pass to the RNN. This is the cell state that is passed between the hidden layers in successive time steps. tf.nn.dynamic_rnn takes care of most of the work for us. We pass in our cell and the input to the cell, then it does the unrolling and everything else for us. It returns outputs for each time step and the final_state of the hidden layer.
Exercise: Use tf.nn.dynamic_rnn to add the forward pass through the RNN. Remember that we're actually passing in vectors from the embedding layer, embed.
End of explanation
"""
with graph.as_default():
predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
cost = tf.losses.mean_squared_error(labels_, predictions)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
"""
Explanation: Output
We only care about the final output, we'll be using that as our sentiment prediction. So we need to grab the last output with outputs[:, -1], the calculate the cost from that and labels_.
End of explanation
"""
with graph.as_default():
correct_pred = tf.equal(tf.cast(tf.round(predictions), tf.int32), labels_)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
"""
Explanation: Validation accuracy
Here we can add a few nodes to calculate the accuracy which we'll use in the validation pass.
End of explanation
"""
def get_batches(x, y, batch_size=100):
n_batches = len(x)//batch_size
x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
for ii in range(0, len(x), batch_size):
yield x[ii:ii+batch_size], y[ii:ii+batch_size]
"""
Explanation: Batching
This is a simple function for returning batches from our data. First it removes data such that we only have full batches. Then it iterates through the x and y arrays and returns slices out of those arrays with size [batch_size].
End of explanation
"""
epochs = 10
with graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
iteration = 1
for e in range(epochs):
state = sess.run(initial_state)
for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 0.5,
initial_state: state}
loss, state, _ = sess.run([cost, final_state, optimizer], feed_dict=feed)
if iteration%5==0:
print("Epoch: {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Train loss: {:.3f}".format(loss))
if iteration%25==0:
val_acc = []
val_state = sess.run(cell.zero_state(batch_size, tf.float32))
for x, y in get_batches(val_x, val_y, batch_size):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: val_state}
batch_acc, val_state = sess.run([accuracy, final_state], feed_dict=feed)
val_acc.append(batch_acc)
print("Val acc: {:.3f}".format(np.mean(val_acc)))
iteration +=1
saver.save(sess, "checkpoints/sentiment.ckpt")
"""
Explanation: Training
Below is the typical training code. If you want to do this yourself, feel free to delete all this code and implement it yourself. Before you run this, make sure the checkpoints directory exists.
End of explanation
"""
test_acc = []
with tf.Session(graph=graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('/output/checkpoints'))
test_state = sess.run(cell.zero_state(batch_size, tf.float32))
for ii, (x, y) in enumerate(get_batches(test_x, test_y, batch_size), 1):
feed = {inputs_: x,
labels_: y[:, None],
keep_prob: 1,
initial_state: test_state}
batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed)
test_acc.append(batch_acc)
print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
"""
Explanation: Testing
End of explanation
"""
|
minireference/noBSLAnotebooks | cut_material/Cut material.ipynb | mit | # Recall the linear transformation P we constructed above
M_P = Matrix([[1,1],
[1,1]])/2
def P(vec):
"""Compute the projection of vector `vec` onto line y=x."""
return M_P*vec
# null space of M_P == kernel of P
M_P.nullspace()
# any vector from the null space gets mapped to the zero vector
n = M_P.nullspace()[0]
P(n)
# column space of M_P == image of P
M_P.columnspace()
# all outputs of P lie on the line y=x so are multiples of [1,1]
v = Vector([4,5])
P(v)
"""
Explanation: Linear transformations
Fundamental spaces
Vector spaces
Matrix representations
Eigenvectors and eigenvalues
Invertible matrix theorem
Further topics
Analytical geometry
Points, lines, and planes
Projections
Distances
Abstract vector spaces
Vector space of polynomials, e.g. $p(x)=a_0 + a_1x + a_2x^2$
Special types of matrices
Matrix decompositions
Linear algebra over other fields
Fundamental spaces
End of explanation
"""
# Recall the P, the projection onto the y=x plane and its matrix M_P
M_P
M_P.eigenvals()
M_P.eigenvects()
evec0 = M_P.eigenvects()[0][2][0]
evec1 = M_P.eigenvects()[1][2][0]
plot_line([1,1],[0,0])
plot_vecs(evec0, evec1)
M_P*evec0 # == 0*evec0
M_P*evec1 # == 1*evec1
"""
Explanation: Vector spaces
The above examples consist of a single vector, but in general a vector space can consist of linear combination of multiple vectors:
$$
V
= \textrm{span}(\vec{v}_1, \vec{v}_2 )
= { \alpha\vec{v}_1 + \beta\vec{v}_2, \forall \alpha, \beta \in \mathbb{R} }.
$$
Eigenvectors and eigenvalues
When a matrix is multiplied by one of its eigenvectors the output is the same eigenvector multiplied by a constant
$$
A\vec{e}\lambda =\lambda\vec{e}\lambda.
$$
The constant $\lambda$ is called an eigenvalue of $A$.
End of explanation
"""
# diagonal
D = Matrix([[1,0],
[0,4]])
D
# upper triangular
U = Matrix([[1,2],
[0,3]])
U
# symmetric
S = Matrix([[1,2],
[2,3]])
S == S.T
"""
Explanation: Many other topics...
Analytical geometry
Points, lines, planes, and distances
Useful geomtetric calculations and intuition.
Projections
Projections onto lines (as above), planes, hyperplanes, coordinate projections, etc.
Invertible matrix theorem
Summarizes and connects computational, geometric, and theoretical aspects of linear algebra.
Abstract vector spaces
Vector space of matrices e.g. $A = \begin{bmatrix}a_1 & a_2 \ a_3 & a_4 \end{bmatrix}$
Vector space of polynomials, e.g. $p(x)=a_0 + a_1x + a_2x^2$
Special types of matrices
End of explanation
"""
A = Matrix([ [4,2],
[1,3] ])
A
# decompose matrix into eigenvectors and eigenvalues
Q, Lambda = A.diagonalize()
Q, Lambda, simplify( Q*Lambda*Q.inv() )
# decompoe matrix A into a lower-triangular and upper triangular
L, U, _ = A.LUdecomposition()
L, U, L*U
Q, R = A.QRdecomposition()
Q, R, Q*R
Q*Q.T
"""
Explanation: Matrix decompositions
End of explanation
"""
# Finite field of two elements F_2 = {0,1} (Binary numbers)
(1+6) % 2
# applications to cryptography, error correcting codes, etc.
# Complex field z = a+bi, where i = sqrt(-1)
v = Vector([1, I])
v
v.H
# used in communication theory and quantum mechanics
"""
Explanation: Linear algebra over other fields
End of explanation
"""
|
bassio/omicexperiment | doc/01_experiment_basics.ipynb | bsd-3-clause | %load_ext autoreload
%autoreload 2
from omicexperiment.experiment.microbiome import MicrobiomeExperiment
mapping = "example_map.tsv"
biom = "example_fungal.biom"
tax = "blast_tax_assignments.txt"
#the MicrobiomeExperiment constructor currently needs three parameters
exp = MicrobiomeExperiment(biom, mapping,tax)
#the first parameter is the _data_ DataFrame
#the second parameter is the mapping dataframe
#the third paramenter is the taxonomy dataframe
"""
Explanation: The Experiment object
The OmicExperiment object is the heart of the omicexperiment package.
It has the ultimate goal of providing a pleasant API for rapid analysis of 'omic experiments' in an interactive environment.
The R bioinformatics community has already provided similar implementations for similar functionality.
Examples include DESeqDataSet (from the package DeSeq2), MRExperiment (from the package metagenomeSeq), phyloseq-class (from the package phyloseq).
To my knowledge, there exists no similar powerful functionality available to users of python.
Powerful manipulation of large datasets in comparative omic experiment (and these are numerous, and include amplicon microbiome studies, microbial metagenomics, transcriptomics/RNA-Seq, metabolomics, and proteomics, and microarray). According to the Biological Observation Matrix (BIOM) format: "they all share an underlying, core data type: the “sample by observation contingency table”.
The OmicExperiment object (perhaps intended to be subclassed for each specific use-case of the above) is the centrepiece of this functionality.
The philosophy of this package is to build upon solid foundations of the python scientific stack and try not to re-invent the wheel. Packages such as numpy and pandas are powerful optimized libraries in dealing with matrix and tabular data, respectively. This package's main dependency is pandas objects (as well as the rest of the scientific python stack).
As of this date, I have started mainly with implementing functionality in the MicrbiomeExperiment subclass of the parent OmicExperiment class, as this is what I have been recently working on in my research.
Clever scientific interactive computing environments are an excellent way to apply and demonstrate the analysis of scientific datasets. The Jupyter notebook is perhaps the prime example of such an environment, perfectly suited for rapid iteration and exploratory analysis, as well as documentation and literate programming.
Instantiating a MicrobiomeExperiment object
Let us start by showing how we start an experiment; this time, a microbiome experiment.
End of explanation
"""
exp.data_df
"""
Explanation: The parameters passed to MicrobiomeExperiment constructor could be:
* pandas dataframes
* a filepath (str) to a csv or tsv file
* a filepath (str) to a biom file (in case of the data DataFrame)
* a Table object from the biom python package (in case of the data DataFrame)
* a filepath (str) to a Qiime taxonomy assignment txt file - other implementations will be provided in the future, with possible subclassing of MicrobiomeDataFrame into a QiimeExperiment to provide optimal and specific Qiime functionality, while allowing integration with other pipeline software conventions
The data DataFrame
The data DataFrame (attribute data_df) is the central dataframe of the Experiment object. It is what we refer to as the frequency table (OTU table in microbiome studies, etc.).
data_df always has the 'observations' (or OTUs in microbiome OTU tables) as the rows, so that the observatin names/ids constitute the index of the dataframe.
On the other hand, the samples always constitute the sample names.
End of explanation
"""
print(exp.data_df.columns)
#
print("#OR")
#
print(exp.samples)
"""
Explanation: To list the samples:
The sample names are easily obtained through looking at the columns of your data_df.
Alternatively, the Experiment object exposes a shorthand 'samples' attribute, which is essentially as
list(exp.data_df.columns)
End of explanation
"""
print(exp.data_df.index)
#
print("#OR")
#
print(exp.observations)
"""
Explanation: To list the observations:
The observations are obtained through the index of the data DataFrame.
Alternatively, the Experiment object exposes a shorthand 'observations' attribute.
In the example below, the otu ids are actually hashes, which allows easy deduplication (i.e. clustering at 100% level) whilst allowing for pooling of experiments. Following the excellent gist by Greg Caporaso https://gist.github.com/gregcaporaso/f3c042e5eb806349fa18 .
End of explanation
"""
exp.mapping_df
#An example of filtering the mapping dataframe,
#as per the excellent pandas indexing filtration feature
exp.mapping_df[exp.mapping_df.group == 'CRSwNP']
#Note:
#similar operations are available on the counts and taxonomy dataframes, since
#all of these are pandas DataFrame objects
"""
Explanation: The Mapping DataFrame
The mapping dataframe (attribute mapping_df) is the dataframe which holds the sample metadata or "phenotypic" data.
mapping_df has the sample ids as the rows, as to constitute the index of that dataframe.
The columns contain the various variables of interest to our experiment.
The mapping dataframe below is an example of a tsv file, formatted according to QIIME conventions. The first column also has the sample ids.
You can see various variables in the columns. The group column, for example, contains the disease state of the samples in our example table. CRSwNP (i.e. Chronic Rhinosinusitis with Nasal Polyposis), CRSsNP (i.e. Chronic Rhinosinusitis sans Nasal Polyposis) and "healthy" controls.
End of explanation
"""
exp.taxonomy_df
"""
Explanation: The Taxonomy DataFrame
The taxonomy dataframe (attribute taxonomy_df) is the dataframe which holds the taxonomy assignment information for our OTUs.
This dataframe is only available on MicrobiomeExperiment results, and is not available on the base class Experiment.
As of the time of writing this notebook, only the taxonomy assignment txt files according to the QIIME convention has been implemented. Otherwise, a manually constructed taxonomy pandas DataFrame object (e.g., manually imported from a different pipeline or program using your own in-house glue code) can be passed to the constructor of the Microbiome Experiment object.
The taxonomy dataframe's index contains the "OTU" ids. The index object thus has the name 'otu'.
The taxonomy dataframe below is such an example.
In the module omicexperiment.taxonomy, the function tax_as_dataframe is responsible for opening up a taxonomy assignment txt file, formatted according to QIIME's conventions.
End of explanation
"""
#The taxonomic levels are as follows:
from omicexperiment.taxonomy import TAX_RANKS
print(TAX_RANKS)
#The taxonomy dataframe also has extra columns
list(exp.taxonomy_df.columns)
"""
Explanation: Taxonomic levels in taxonomy_df
What you can notice in the taxonomy dataframe above, is that it automatically runs code to separate the taxonomic assignment to various taxonomic levels.
End of explanation
"""
#Rank Resolutions example
for row in exp.taxonomy_df[['tax', 'rank_resolution']].iterrows():
print(str(row[1][0]))
print("RESOLUTION: " + str(row[1][1]))
print("\n")
#Note that the rank_resolution column (a pandas Series) is actually of an ordered Category type.
#It thus support the filtration methods in the following cells
exp.taxonomy_df.rank_resolution
#only two OTUs are assigned at the species level ( > genus )
exp.taxonomy_df.rank_resolution > 'genus'
"""
Explanation: The extra columns in the taxonomy dataframe include:
* otu: the otu id (also in the index)
* tax: the raw taxonomy assignment (includes all levels, conventionally separated by a semi-colon ';')
* rank_resolution: the rank at highest resolution that the taxonomy assignment method was able to provide. For example, if assignment could not identify a 'species' level, this means that the highest resolution rank was 'genus'. Unidentified levels occurs when the taxon at that level is equal to 'unidentified' or '' (empty string) or 'Unassigned' or 'No blast hit'. These levels then are given the 'unidentified' label. Note that when the assignment was 'Unassigned' or 'No blast hit', or a kingdom level assignment was not available, the rank_resolution will be 'nan'.
End of explanation
"""
new_exp = exp.to_relative_abundance()
new_exp.data_df
#OR, similarly:
#to demonstrate method chaining
exp.to_relative_abundance().data_df
#a 'rarefy' (i.e. subsampling) method is also available
#for the MicrobiomeExperiment subclass
print(exp.data_df.sum())
rarefied_df = exp.rarefy(90000).data_df #this will discard sample0
rarefied_df
#This method will discard samples with counts lower than a cutoff value,
#then subsample the counts of each sample to that cutoff
#this method was shown to be of benefit for various downstream diversity analyses
#the functionality is built into qiime and various other pipelines
#you can also specify a num_reps argument
#i.e. (how many randomizations are done before returning the ultimate (rarefied) data
rarefied_df = exp.rarefy(90000, num_reps=50).data_df #this will discard sample0
rarefied_df
#the with_data_df method replaces the data_df in the experiment object
#with the one passed as the parameter to the method
#and!! constructs a new Experiment (this paradigm will keep repeating!)
exp.with_data_df(rarefied_df).data_df
#Note:
#similarly, there is a with_mapping_df method
#and perhaps in the future a with_taxonomy_df method
"""
Explanation: Other OmicExperiment objects functionality
It is good practice to start the experiment with a 'raw counts' matrix.
Since it is very easy to convert that to a relative abundance (by sample) type matrix.
This is afforded through the to_relative_abundance method. This transforms the data such that each sample counts add up to a 100.
Note that the to_relative_abundance method actually instantiates a new experiment object with the newly transformed data DataFrame.
This API is provided to mimic various pandas DataFrame methods, which usually provides a new DataFrame object, and thus allows retaining older frames, as well as allow "chaining of methods", which is very important in interactive environments where economical typing is desirable. This pattern is also called "Fluent interface" (https://en.wikipedia.org/wiki/Fluent_interface).
End of explanation
"""
|
vikashvverma/machine-learning | mlfoundation/istat/project/investigate-a-dataset-template.ipynb | mit | # import necessary libraries
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
"""
Explanation: Project: Investigate TMDb Movie Data
Table of Contents
<ul>
<li><a href="#intro">Introduction</a></li>
<li><a href="#wrangling">Data Wrangling</a></li>
<li><a href="#eda">Exploratory Data Analysis</a></li>
<li><a href="#conclusions">Conclusions</a></li>
</ul>
<a id='intro'></a>
Introduction
The TMDb data provides details of 10,000 movies. The data include details like casts, revenue, budget, popularity etc. This data can be analysed to find interesting pattern between movies.
We can use this data to try to answer following questions:
What is the yearly revenue change?
Which genres are most popular from year to year?
What kinds of properties are associated with movies that have high revenues?
Who are top 15 highest grossing directors?
Who are top 15 highest grossing actors?
End of explanation
"""
# Load TMDb data and print out a few lines. Perform operations to inspect data
# types and look for instances of missing or possibly errant data.
tmdb_movies = pd.read_csv('tmdb-movies.csv')
tmdb_movies.head()
tmdb_movies.describe()
"""
Explanation: <a id='wrangling'></a>
Data Wrangling
General Properties
End of explanation
"""
# Pandas read empty string value as nan, make it empty string
tmdb_movies.cast.fillna('', inplace=True)
tmdb_movies.genres.fillna('', inplace=True)
tmdb_movies.director.fillna('', inplace=True)
tmdb_movies.production_companies.fillna('', inplace=True)
def string_to_array(data):
"""
This function returns given splitss the data by separator `|` and returns the result as array
"""
return data.split('|')
"""
Explanation: Data Cleaning
As evident from the data, it seems we have cast of the movie as string separated by | symbol. This needs to be converted into a suitable type in order to consume it properly later.
End of explanation
"""
tmdb_movies.cast = tmdb_movies.cast.apply(string_to_array)
tmdb_movies.genres = tmdb_movies.genres.apply(string_to_array)
tmdb_movies.director = tmdb_movies.director.apply(string_to_array)
tmdb_movies.production_companies = tmdb_movies.production_companies.apply(string_to_array)
"""
Explanation: Convert cast, genres, director and production_companies columns to array
End of explanation
"""
def yearly_growth(mean_revenue):
return mean_revenue - mean_revenue.shift(1).fillna(0)
# Show change in mean revenue over years, considering only movies for which we have revenue data
movies_with_budget = tmdb_movies[tmdb_movies.budget_adj > 0]
movies_with_revenue = movies_with_budget[movies_with_budget.revenue_adj > 0]
revenues_over_years = movies_with_revenue.groupby('release_year').sum()
revenues_over_years.apply(yearly_growth)['revenue'].plot()
revenues_over_years[['budget_adj', 'revenue_adj']].plot()
def log(data):
return np.log(data)
movies_with_revenue[['budget_adj', 'revenue_adj']].apply(log) \
.sort_values(by='budget_adj').set_index('budget_adj')['revenue_adj'].plot(figsize=(20,6))
"""
Explanation: <a id='eda'></a>
Exploratory Data Analysis
Research Question 1: What is the yearly revenue change?
It's evident from observations below that there is no clear trend in change in mean revenue over years.
Mean revenue from year to year is quite unstable. This can be attributed to number of movies and number of movies having high or low revenue
The gap between budget and revenue have widened after 2000. This can be attributed to circulation of movies worldwide compared to earlier days.
There seems to be a correlation between gross budget and gross revenue over years.
When log of revenue_adj is plotted against log of budget_adj, we can see a clear correlation between revenue of a movie against the budget
End of explanation
"""
def popular_movies(movies):
return movies[movies['vote_average']>=7]
def group_by_genre(data):
"""
This function takes a Data Frame having and returns a dictionary having
release_year as key and value a dictionary having key as movie's genre
and value as frequency of the genre that year.
"""
genres_by_year = {}
for (year, position), genres in data.items():
for genre in genres:
if year in genres_by_year:
if genre in genres_by_year[year]:
genres_by_year[year][genre] += 1
else:
genres_by_year[year][genre] = 1
else:
genres_by_year[year] = {}
genres_by_year[year][genre] = 1
return genres_by_year
def plot(genres_by_year):
"""
This function iterates over each row of Data Frame and prints rows
having release_year divisible by 5 to avoid plotting too many graphs.
"""
for year, genres in genres_by_year.items():
if year%5 == 0:
pd.DataFrame(grouped_genres[year], index=[year]).plot(kind='bar', figsize=(20, 6))
# Group movies by genre for each year and try to find the correlations
# of genres over years.
grouped_genres = group_by_genre(tmdb_movies.groupby('release_year').apply(popular_movies).genres)
plot(grouped_genres)
"""
Explanation: Research Question 2: Which genres are most popular from year to year?
Since popularity column indicates all time popularity of the movie, it might not be the right metric to measure popularity over years. We can measure popularty of a movie based on average vote. I think a movie is popular if vote_average >= 7.
On analyzing the popular movies since 1960(check illustrations below), following onservations can be made:
Almost all popular movies have Drama genre
Over years Comedy, Action and Adventure got popular.
In recent years, Documentry, Action and Animation movies got more popularity.
End of explanation
"""
highest_grossing_movies = tmdb_movies[tmdb_movies['revenue_adj'] >= 1000000000]\
.sort_values(by='revenue_adj', ascending=False)
highest_grossing_movies.head()
"""
Explanation: Research Question 3: What kinds of properties are associated with movies that have high revenues?
We can consider those movies with at least 1 billion revenue and see what are common properties among them.
Considering this criteria and based on illustrations below, we can make following observations about highest grossing movies:
Adventure and Action are most common genres among these movies followed by Science Fiction, Fantasy and Family.
Most of the movies have more than 7 average vote, some movies have less than 7 but that is because of less number of total votes. This means highest grossing movies are popular as well.
Steven Spielberg and Peter Jackson are directors who have highest muber of movies having at least 1 billion revenue.
Most of the directors have only one movies having at least a billion revenue, hence there seems to be no corelation between highest grossing movies and directors.
Most of the cast have one movie having at least a billion revenue.
Warner Bros., Walt Disney, Fox Film and Universal picture seems to have figured out the secret of highest grossing movies. They have highest number of at least a billion revenue movies. This does not mean all their movies have pretty high revenue.
End of explanation
"""
def count_frequency(data):
frequency_count = {}
for items in data:
for item in items:
if item in frequency_count:
frequency_count[item] += 1
else:
frequency_count[item] = 1
return frequency_count
highest_grossing_genres = count_frequency(highest_grossing_movies.genres)
print(highest_grossing_genres)
pd.DataFrame(highest_grossing_genres, index=['Genres']).plot(kind='bar', figsize=(20, 8))
"""
Explanation: Find common genres in highest grossing movies
End of explanation
"""
highest_grossing_movies.vote_average.hist()
"""
Explanation: Popularity of highest grossing movies
End of explanation
"""
def list_to_dict(data, label):
"""
This function creates returns statistics and indices for a data frame
from a list having label and value.
"""
statistics = {label: []}
index = []
for item in data:
statistics[label].append(item[1])
index.append(item[0])
return statistics, index
import operator
high_grossing_dirs = count_frequency(highest_grossing_movies.director)
revenues, indexes = list_to_dict(sorted(high_grossing_dirs.items(), key=operator.itemgetter(1), reverse=True)[:20], 'revenue')
pd.DataFrame(revenues, index=indexes).plot(kind='bar', figsize=(20, 5))
"""
Explanation: Directors of highest grossing movies
End of explanation
"""
high_grossing_cast = count_frequency(highest_grossing_movies.cast)
revenues, index = list_to_dict(sorted(high_grossing_cast.items(), key=operator.itemgetter(1), reverse=True)[:30], 'number of movies')
pd.DataFrame(revenues, index=index).plot(kind='bar', figsize=(20, 5))
"""
Explanation: Cast of highest grossing movies
End of explanation
"""
high_grossing_prod_comps = count_frequency(highest_grossing_movies.production_companies)
revenues, index = list_to_dict(sorted(high_grossing_prod_comps.items(), key=operator.itemgetter(1), reverse=True)[:30]\
, 'number of movies')
pd.DataFrame(revenues, index=index).plot(kind='bar', figsize=(20, 5))
"""
Explanation: Production companies of highest grossing movies
End of explanation
"""
def grossing(movies, by):
"""
This function returns the movies' revenues over key passed as `by` value in argument.
"""
revenues = {}
for id, movie in movies.iterrows():
for key in movie[by]:
if key in revenues:
revenues[key].append(movie.revenue_adj)
else:
revenues[key] = [movie.revenue_adj]
return revenues
def gross_revenue(data):
"""
This functions computes the sum of values of the dictionary and
return a new dictionary with same key but cummulative value.
"""
gross = {}
for key, revenues in data.items():
gross[key] = np.sum(revenues)
return gross
gross_by_dirs = grossing(movies=movies_with_revenue, by='director')
director_gross_revenue = gross_revenue(gross_by_dirs)
top_15_directors = sorted(director_gross_revenue.items(), key=operator.itemgetter(1), reverse=True)[:15]
revenues, index = list_to_dict(top_15_directors, 'director')
pd.DataFrame(data=revenues, index=index).plot(kind='bar', figsize=(15, 9))
"""
Explanation: Highest grossing budget
Research Question 4: Who are top 15 highest grossing directors?
We can see the top 30 highest grossing directors in bar chart below.
It seems Steven Spielberg surpasses other directors in gross revenue.
End of explanation
"""
gross_by_actors = grossing(movies=tmdb_movies, by='cast')
actors_gross_revenue = gross_revenue(gross_by_actors)
top_15_actors = sorted(actors_gross_revenue.items(), key=operator.itemgetter(1), reverse=True)[:15]
revenues, indexes = list_to_dict(top_15_actors, 'actors')
pd.DataFrame(data=revenues, index=indexes).plot(kind='bar', figsize=(15, 9))
"""
Explanation: Research Question 5: Who are top 15 highest grossing actors?
We can find the top 30 actors based on gross revenue as shown in subsequent sections below.
As we can see Harison Ford tops the chart with highest grossing.
End of explanation
"""
|
dereneaton/ipyrad | newdocs/API-analysis/cookbook-distance.ipynb | gpl-3.0 | # conda install ipyrad -c bioconda
# conda install toyplot -c eaton-lab (optional)
import ipyrad.analysis as ipa
import toyplot
"""
Explanation: <h2><span style="color:gray">ipyrad-analysis toolkit:</span> distance</h2>
Key features:
Calculate pairwise genetic distances between samples.
Filter SNPs to reduce missing data.
Impute missing data using population allele frequencies.
required software
End of explanation
"""
# the path to your VCF or HDF5 formatted snps file
data = "/home/deren/Downloads/ref_pop2.snps.hdf5"
# group individuals into populations
imap = {
"virg": ["TXWV2", "LALC2", "SCCU3", "FLSF33", "FLBA140"],
"mini": ["FLSF47", "FLMO62", "FLSA185", "FLCK216"],
"gemi": ["FLCK18", "FLSF54", "FLWO6", "FLAB109"],
"bran": ["BJSL25", "BJSB3", "BJVL19"],
"fusi": ["MXED8", "MXGT4", "TXGR3", "TXMD3"],
"sagr": ["CUVN10", "CUCA4", "CUSV6", "CUMM5"],
"oleo": ["CRL0030", "CRL0001", "HNDA09", "BZBB1", "MXSA3017"],
}
# minimum n samples that must be present in each SNP from each group
minmap = {i: 0.5 for i in imap}
"""
Explanation: Short tutorial
Setup input files and params
End of explanation
"""
# load the snp data into distance tool with arguments
from ipyrad.analysis.distance import Distance
dist = Distance(
data=data,
imap=imap,
minmap=minmap,
mincov=0.5,
impute_method="sample",
subsample_snps=False,
)
dist.run()
"""
Explanation: calculate distances
End of explanation
"""
# save to a CSV file
dist.dists.to_csv("distances.csv")
# show the upper corner
dist.dists.head()
"""
Explanation: save results
End of explanation
"""
toyplot.matrix(
dist.dists,
bshow=False,
tshow=False,
rlocator=toyplot.locator.Explicit(
range(len(dist.names)),
sorted(dist.names),
));
"""
Explanation: Draw the matrix
End of explanation
"""
# get list of concatenated names from each group
ordered_names = []
for group in dist.imap.values():
ordered_names += group
# reorder matrix to match name order
ordered_matrix = dist.dists[ordered_names].T[ordered_names]
toyplot.matrix(
ordered_matrix,
bshow=False,
tshow=False,
rlocator=toyplot.locator.Explicit(
range(len(ordered_names)),
ordered_names,
));
"""
Explanation: Draw matrix reordered to match groups in imap
End of explanation
"""
|
feststelltaste/software-analytics | notebooks/Checking the modularization of software systems by analyzing co-changing source code files.ipynb | gpl-3.0 | from lib.ozapfdis.git_tc import log_numstat
GIT_REPO_DIR = "../../dropover_git/"
git_log = log_numstat(GIT_REPO_DIR)[['sha', 'file']]
git_log.head()
"""
Explanation: Introduction
In my previous blog post, we've seen how we can identify files that change together in one commit.
In this blog post, we take the analysis to an advanced level:
We're using a more robust model for determining the similarity of co-changing source code files
We're checking the existing modularization of a software system and compare it to the change behavior of the development teams
We're creating a visualization that lets us determine the underlying, "hidden" modularization of our software system based on conjoint changes
We discuss the results for a concrete software system in detail (with more systems to come in the upcoming blog posts).
We're using Python and pandas as well as some algorithms from the machine learning library scikit-learn and the visualization libraries matplotlib, seaborn and pygal for these purposes.
The System under Investigation
For this analysis, we use a closed-source project that I developed with some friends of mine. It's called "DropOver", a web application that can manage events with features like events' sites, scheduling, comments, todos, file uploads, mail notifications and so on. The architecture of the software system mirrored the feature-based development process: You could quickly locate where code has to be added or changed because the software system's "screaming architecture". This architecture style lead you to the right place because of the explicit, feature-based modularization that was used for the Java packages/namespaces:
It's also important to know, that we developed the software almost strictly feature-based by feature teams (OK, one developer was one team in our case). Nevertheless, the history of this repository should perfectly fit for our analysis of checking the modularization based on co-changing source code files.
The main goal of our analysis is to see if the modules of the software system were changed independently or if they were code was changed randomly across modules boundaries. If the latter would be the case, we should reorganize the software system or the development teams to let software development activities and the surrounding more naturally fit together.
Idea
We can do this kind of analysis pretty easily by using the version control data of a software system like Git. A version control system tracks each change to a file. If more files are changed within one commit, we can assume that those files somehow have something to do with each other. This could be e. g. a direct dependency because two files depend on each other or a semantic dependency which causes an underlying concepts to change across module boundaries.
In this blog post, we take the idea further: We want to find out the degree of similarity of two co-changing files, making the analysis more robust and reliable on one side, but also enabling a better analysis of bigger software systems on the other side by comparing all files of a software system with each other regarding the co-changing properties.
Data
We use a little helper library for importing the data of our project. It's a simple git log with change statistics for each commit and file (you can see here how to retrieve it if you want to do it manually).
End of explanation
"""
prod_code = git_log.copy()
prod_code = prod_code[prod_code.file.str.endswith(".java")]
prod_code = prod_code[prod_code.file.str.startswith("backend/src/main")]
prod_code = prod_code[~prod_code.file.str.endswith("package-info.java")]
prod_code.head()
"""
Explanation: In our case, we only want to check the modularization of our software for Java production code. So we just leave the files that are belonging to the main source code. What to keep here exactly is very specific to your own project. With Jupyter and pandas, we can make our decisions for this transparent and thus retraceable.
End of explanation
"""
prod_code['hit'] = 1
prod_code.head()
"""
Explanation: Analysis
We want to see which files are changing (almost) together. A good start for this is to create this view onto our dataset with the pivot_table method of the underlying pandas' DataFrame.
But before this, we need a marker column that signals that a commit occurred. We can create an additional column named hit for this easily.
End of explanation
"""
commit_matrix = prod_code.reset_index().pivot_table(
index='file',
columns='sha',
values='hit',
fill_value=0)
commit_matrix.iloc[0:5,50:55]
"""
Explanation: Now, we can transform the data as we need it: For the index, we choose the filename, as columns, we choose the unique sha key of a commit. Together with the commit hits as values, we are now able to see which file changes occurred in which commit. Note that the pivoting also change the order of both indexes. They are now sorted alphabetically.
End of explanation
"""
from sklearn.metrics.pairwise import cosine_distances
dissimilarity_matrix = cosine_distances(commit_matrix)
dissimilarity_matrix[:5,:5]
"""
Explanation: As already mentioned in a previous blog post, we are now able to look at our problem from a mathematician' s perspective. What we have here now with the commit_matrix is a collection of n-dimensional vectors. Each vector represents a filename and the components/dimensions of such a vector are the commits with either the value 0 or 1.
Calculating similarities between such vectors is a well-known problem with a variety of solutions. In our case, we calculate the distance between the various vectors with the cosines distance metric. The machine learning library scikit-learn provides us with an easy to use implementation.
End of explanation
"""
import pandas as pd
dissimilarity_df = pd.DataFrame(
dissimilarity_matrix,
index=commit_matrix.index,
columns=commit_matrix.index)
dissimilarity_df.iloc[:5,:2]
"""
Explanation: To be able to better understand the result, we add the file names from the commit_matrix as index and column index to the dissimilarity_matrix.
End of explanation
"""
%matplotlib inline
import seaborn as sns
sns.heatmap(
dissimilarity_df,
xticklabels=False,
yticklabels=False
);
"""
Explanation: Now, we see the result in a better representation: For each file pair, we get the distance of the commit vectors. This means that we have now a distance measure that says how dissimilar two files were changed in respect to each other.
Visualization
Heatmap
To get an overview of the result's data, we can plot the matrix with a little heatmap first.
End of explanation
"""
modules = dissimilarity_df.copy()
modules.index = modules.index.str.split("/").str[6]
modules.index.name = 'module'
modules.columns = modules.index
modules.iloc[25:30,25:30]
"""
Explanation: Because of the alphabetically ordered filenames and the "feature-first" architecture of the software under investigation, we get the first glimpse of how changes within modules are occurring together and which are not.
To get an even better view, we can first extract the module's names with an easy string operation and use this for the indexes.
End of explanation
"""
import matplotlib.pyplot as plt
plt.figure(figsize=[10,9])
sns.heatmap(modules.iloc[:180,:180]);
"""
Explanation: Then, we can create another heatmap that shows the name of the modules on both axes for further evaluation. We also just take a look at a subset of the data for representational reasons.
End of explanation
"""
from sklearn.manifold import MDS
# uses a fixed seed for random_state for reproducibility
model = MDS(dissimilarity='precomputed', random_state=0)
dissimilarity_2d = model.fit_transform(dissimilarity_df)
dissimilarity_2d[:5]
"""
Explanation: Discussion
Starting at the upper left, we see the "comment" module with a pretty dark area very clearly. This means, that files around this module changed together very often.
If we go to the middle left, we see dark areas between the "comment" module and the "framework" module as well as the "site" module further down. This shows a change dependency between the "comment" module and the other two (I'll explain later, why it is that way).
If we take a look in the middle of the heatmap, we see that the very dark area represents changes of the "mail" module. This module was pretty much changed without touching any other modules. This shows a nice separation of concerns.
For the "scheduling" module, we can also see that the changes occurred mostly cohesive within the module.
Another interesting aspect is the horizontal line within the "comment" region: These files were changed independently from all other files within the module. These files were the code for an additional data storage technology that was added in later versions of the software system. This pattern repeats for all other modules more or less strongly.
With this visualization, we can get a first impression of how good our software architecture fits the real software development activities. In this case, I would say that you can see most clearly that the source code of the modules changed mostly within the module boundaries. But we have to take a look at the changes that occur in other modules as well when changing a particular module. These could be signs of unwanted dependencies and may lead us to an architectural problem.
Multi-dimensional Scaling
We can create another kind of visualization to check
* if the code within the modules is only changed altogether and
* if not, what other modules were changed.
Here, we can help ourselves with a technique called "multi-dimensional scaling" or "MDS" for short. With MDS, we can break down an n-dimensional space to a lower-dimensional space representation. MDS tries to keep the distance proportions of the higher-dimensional space when breaking it down to a lower-dimensional space.
In our case, we can let MDS figure out a 2D representation of our dissimilarity matrix (which is, overall, just a plain multi-dimensional vector space) to see which files get change together. With this, we'll able to see which files are changes together regardless of the modules they belong to.
The machine learning library scikit-learn gives us easy access to the algorithm that we need for this task as well. We just need to say that we have a precomputed dissimilarity matrix when initializing the algorithm and then pass our dissimilarity_df DataFrame to the fit_transform method of the algorithm.
End of explanation
"""
plt.figure(figsize=(8,8))
x = dissimilarity_2d[:,0]
y = dissimilarity_2d[:,1]
plt.scatter(x, y);
"""
Explanation: The result is a 2D matrix that we can plot with matplotlib to get a first glimpse of the distribution of the calculated distances.
End of explanation
"""
dissimilarity_2d_df = pd.DataFrame(
dissimilarity_2d,
index=commit_matrix.index,
columns=["x", "y"])
dissimilarity_2d_df['module'] = dissimilarity_2d_df.index.str.split("/").str[6]
dissimilarity_2d_df.head()
"""
Explanation: With the plot above, we see that the 2D transformation somehow worked. But we can't see
* which filenames are which data points
* how the modules are grouped all together
So we need to enrich the data a little bit more and search for a better, interactive visualization technique.
Let's add the filenames to the matrix as well as nice column names. We, again, add the information about the module of a source code file to the DataFrame.
End of explanation
"""
plot_data = pd.DataFrame(index=dissimilarity_2d_df['module'])
plot_data['value'] = tuple(zip(dissimilarity_2d_df['x'], dissimilarity_2d_df['y']))
plot_data['label'] = dissimilarity_2d_df.index
plot_data['data'] = plot_data[['label', 'value']].to_dict('records')
plot_dict = plot_data.groupby(plot_data.index).data.apply(list)
plot_dict
"""
Explanation: OK, here comes the ugly part: We have to transform all the data to the format our interactive visualization library pygal needs for its XY chart. We need to
* group the data my modules
* add every distance information
* for each file as well as
* the filename itself
in a specific dictionary-like data structure.
But there is nothing that can hinder us in Python and pandas. So let's do this!
We create a separate DataFrame named plot_data with the module names as index
We join the coordinates x and y into a tuple data structure
We use the filenames from dissimilarity_2d_df's index as labels
We convert both data items to a dictionary
We append each entry for a module to only on module entry
This gives us a new DataFrame with modules as index and per module a list of dictionary-like entries with
* the filenames as labels and
* the coordinates as values.
End of explanation
"""
import pygal
xy_chart = pygal.XY(stroke=False)
[xy_chart.add(entry[0], entry[1]) for entry in plot_dict.iteritems()]
# uncomment to create the interactive chart
# xy_chart.render_in_browser()
xy_chart
"""
Explanation: With this nice little data structure, we can fill pygal's XY chart and create an interactive chart.
End of explanation
"""
|
fisicatyc/Cuantica_Jupyter | vis_int.ipynb | mit | from math import sin, cos, tan, sqrt, log, exp, pi
"""
Explanation: Visualización e interacción
La visualización e interacción es un requerimiento actual para las nuevas metodologías de enseñanza, donde se busca un aprendizaje mucho más visual y que permita, a través de la experimentación, el entendimiento de un fenómeno cuando se cambian ciertas condiciones iniciales.
La ubicación espacial y la manipulación de parámetros en dicha experimentación se puede facilitar con herramientas como estas, que integran el uso de gráficos, animaciones y widgets. Este notebook, define los métodos de visualización e interacción que se usarán en otros notebooks, sobre la componente numérica y conceptual.
Esta separación se hace con el fin de distinguir claramente 3 componentes del proceso, y que faciliten la comprensión de la temática sin requerir que el usuario comprenda los 3 niveles (ya que el código es visible, y esto impactaría en el proceso de seguimiento del tema).
Funciones Matemáticas
Aunque no es parte de la visualización y de la interacción, el manejo de funciones matemáticas es requerido para estas etapas y las posteriores. Por lo que su definición es necesaria desde el principio para no ser redundante en requerir de múltiples invocaciones.
La evaluación de funciones matemáticas puede realizarse por medio del modulo math que hace parte de la biblioteca estandar de Python, o con la biblioteca numpy. Para el conjunto limitado de funciones matemáticas que requerimos y con la premisa de no realizar de formas complejas nuestros códigos, las utilidades de numpy no serán necesarias y con math y el uso de listas será suficiente.
El motivo de tener pocos requerimientos de funciones matemáticas es por el uso de métodos numéricos y no de herramientas análiticas. La idea es mostrar como con esta metodología es posible analizar un conjunto mayor de problemas sin tener que profundizar en una gran cantidad de herramientas matemáticas y así no limitar la discusión de estos temas a conocimientos avanzados de matemáticas, y más bien depender de un conocimiento básico tanto de matemáticas como de programación para el desarrollo de los problemas, y permitir simplemente la interacción en caso de solo usar estos notebooks como un recurso para el estudio conceptual. Por este último fin, se busca que el notebook conceptual posea el mínimo de código, y este se lleve sobre los notebooks de técnicas numéricas y de visualización.
End of explanation
"""
import ipywidgets
print(dir(ipywidgets))
"""
Explanation: El conjunto anterior de funciones sólo se indica por mantener una referencia de funciones para cualquier ampliación que se desee realizar sobre este, y para su uso en la creación de potenciales arbitrarios, así como en los casos de ejemplificación con funciones análiticas o para fines de comparación de resultados.
Para la implementación (partiendo de un potencial dado numéricamente), sólo se requiere del uso de sqrt.
El modulo de numpy permitiría extender la aplicación de funciones matemáticas directamente sobre arreglos numéricos, y definir estos arreglos de una forma natural para la matemática, como equivalente a los vectores y matrices a traves de la clase array.
Interacción
Existen multiples mecanismos para interacción con los recursos digitales, definidos de forma casi estándar en su comportamiento a través de distintas plataformas.
Dentro de la definición de los controles gráficos (widgets) incorporados en Jupyter en el módulo ipywidgets, encontramos los siguientes:
End of explanation
"""
from ipywidgets import interact, interactive, fixed, IntSlider, FloatSlider, Button, Text, Box
"""
Explanation: Para nuestro uso, serán de uso principal:
Interacciones: Son mecanismos automáticos para crear controles y asociarlos a una función. interact, interactive.
Deslizadores: Los hay específicos para tipos de datos, y estos son IntSlider y FloatSlider.
Botones: Elementos que permiten ejecutar una acción al presionarlos, Button.
Texto: Permiten el ingreso de texto arbitrario y asociar la ejecución de una acción a su ingreso. Text.
Contenedores: Permiten agrupar en un solo objeto/vista varios controles. Uno de ellos es Box.
End of explanation
"""
from traitlets import link
"""
Explanation: Entre estos controles que se usan, a veces es necesario crear dependencias de sus rangos respecto al rango o propiedad de otro control. Para este fin usamos la función link del módulo traitlets. En este módulo se encuentran otras funciones utiles para manipulación de los controles gráficos.
End of explanation
"""
from IPython.display import clear_output, display, HTML, Latex, Markdown, Math
"""
Explanation: Tambien es necesario el uso de elementos que permitan el formato del documento y visualización de elementos y texto enriquecido, fuera de lo posible con texto plano a punta de print o con las capacidades de MarkDown (Nativo o con extensión). Para esto se puede extender el uso métodos para renderizado HTML y LaTeX.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: Visualización
Por visualización entendemos las estrategias de representación gráfica de la información, resultados o modelos. Facilita la lectura rápida de datos mediante codificaciones de colores así como la ubicación espacial de los mismos. La representación gráfica no tiene por qué ser estática, y es ahí donde las animaciones nos permiten representar las variaciones temporales de un sistema de una forma más natural (no como un gráfico respecto a un eje de tiempo, sino vivenciando un gráfico evolucionando en el tiempo).
Para este fin es posible usar diversas bibliotecas existentes en python (en sus versiones 2 y 3), siendo la más común de ellas y robusta, la biblioteca Matplotlib. En el contexto moderno de los navegadores web, es posible integrar de una forma más natural bibliotecas que realizan el almacenamiento de los gráficos en formatos nativos para la web, como lo es el formato de intercambio de datos JSON, facilitando su interacción en el navegador mediante llamados a javascript.
Así, podemos establecer preferencias, como Matplotlib para uso estático principalmente o para uso local, mientras que para interacción web, usar bibliotecas como Bokeh.
Para este caso, sin profundidad en la interacción web, se usará Matplotlib.
End of explanation
"""
def discretizar(funcion, a, b, n):
dx = (b-a)/n
x = [a + i*dx for i in range(n+1)]
y = [funcion(i) for i in x]
return x, y
def graficar_funcion(x, f):
plt.plot(x, f, '-')
def graficar_punto_texto(x, f, texto):
plt.plot(x, f, 'o')
plt.text(x+.2, f+.2, texto)
def int_raiz_sin(a:(-5.,0., .2), b:(0., 5., .2), k:(0.2, 10., .1), n:(1, 100, 1), N:(0, 10, 1)):
f = lambda x: sin(k*x)
x, y = discretizar(f, a, b, n)
r = pi*(N + int(a*k/pi))/k
graficar_funcion(x, y)
graficar_punto_texto(r, 0, 'Raíz')
plt.show()
interact(int_raiz_sin)
"""
Explanation: Para indicar la graficación no interactiva embebida en el documento usamos la sguiente linea
%matplotlib inline
En caso de requerir una forma interactiva embebida, se usa la linea
%matplotlib notebook
Para nuestro uso básico, todo lo necesario para gráficación se encuentra en el módulo pyplot de Matplotlib. Con él podemos realizar cuadrículas, trazos de curvas de diversos estilos, modificación de ejes, leyendas, adición de anotaciones en el gráfico y llenado de formas (coloreado entre curvas). Pueden consultarse ejemplos de referencia en la galería de Matplotlib y en la lista de ejemplos de la página oficial.
Graficación de funciones
En general nuestro ideal es poder graficar funciones que son representadas por arreglos numéricos. Las funciones continuas en su representación algebraica de discretizan, y es el conjunto de puntos interpolado lo que se ilustra. Antes de discretizar, es conveniente convertir nuestra función en una función evaluable, y asociar la dependencia solo a una variable (para nuestro caso que es 1D).
El proceso de interpolación mencionado se realiza por el paquete de graficación y nosotros solo debemos indicar los puntos que pertenecen a la función.
End of explanation
"""
def raiz_sin(a, b, k, n, N, texto):
f = lambda x: sin(k*x)
x, y = discretizar(f, a, b, n)
r = pi*(N + int(a*k/pi))/k
graficar_funcion(x, y)
graficar_punto_texto(r, 0, texto)
a = FloatSlider(value= -2.5, min=-5., max= 0., step= .2, description='a')
b= FloatSlider(value = 2.5, min=0., max= 5., step=.2, description='b')
k= FloatSlider(value = 5., min=0.2, max=10., step=.1, description='k')
n= IntSlider(value= 50, min=1, max= 100, step=1, description='n')
N= IntSlider(value=5, min=0, max=10, step=1, description='N')
texto = Text(value='Raíz', description='Texto punto')
Boton_graficar = Button(description='Graficar')
def click_graficar(boton):
clear_output(wait=True)
raiz_sin(a.value, b.value, k.value, n.value, N.value, texto.value)
plt.show()
display(a, b, k, n, N, texto, Boton_graficar)
Boton_graficar.on_click(click_graficar)
"""
Explanation: El bloque anterior de código ilustra el uso de interact como mecanismo para crear controles automaticos que se apliquen a la ejecución de una función. Este permite crear de una forma simple las interacciones cuando no se requiere de personalizar mucho, ni vincular controles y se desea una ejecución automatica con cada variación de parametros. En caso de querer recuperar los valores especificos de los parametros para posterior manipulación se recomienda el uso de interactive o del uso explicito de los controles.
A pesar de la facilidad que ofrece interact e interactive al generar los controles automaticos, esto es poco conveniente cuando se trata de ejecuciones que toman tiempos significativos (que para escalas de una interacción favorable, un tiempo significativo son aquellos mayores a un segundo), ya que cada variación de parametros independiente, o sea, cada deslizador en este caso, al cambiar produce una nueva ejecución, y las nuevas variaciones de parámetros quedan en espera hasta terminar las ejecuciones de las variaciones individuales anteriores.
Es por esto, que puede ser conveniente definir una interacción donde los controles la unica acción que posean es la variación y almacenamiento de valores de los parametros, y sea otro control adicional el designado para indicar el momento de actualizar parametros y ejecutar.
El ejemplo anterior se puede construir usando FloatSlider, IntSlider, Button, Text, Box y display.
End of explanation
"""
def graficar_potencial(x, V_x):
V_min = min(V_x)
plt.fill_between(x, V_min, V_x, facecolor = 'peru')
"""
Explanation: Graficación de potenciales
Para fines de ilustración y comprensión de los estados ligados del sistema, conviene poder ilustrar las funciones de potencial como barreras físicas. Esta noción gráfica se representa mediante el llenado entre la curva y el eje de referencia para la energía. De esta forma, al unir el gráfico con la referencia del autovalor, será claro que la energía hallada pertenece al intervalo requerido en teoría y que corresponde a un sistema ligado.
La función de graficación del potencial recibe dos listas/arreglos, uno con la información espacial y otro con la evaluación del potencial en dichos puntos. Antes de proceder con el llenado de la representación de la barrera del potencial, se crean los puntos inicial y final con el fin de crear formas cerradas distinguibles para el comando fill.
End of explanation
"""
def potencial(V_0, a, x):
if abs(x) > a/2:
return V_0
else:
return 0
def int_potencial(V_0:(.1, 10., .1), a:(.1, 5, .1), L:(1., 10., .5), N:(10, 200, 10)):
dx = L / N
x = [-L/2 + i*dx for i in range(N+1)]
y = [potencial(V_0, a, i) for i in x]
graficar_potencial(x, y)
plt.show()
interact(int_potencial)
"""
Explanation: A continuación se presenta un ejemplo interactivo de graficación del potencial finito. Se inicia con la definición del potencial, la cual se usa para generar un arreglo con la información de la evaluación del potencial en distintos puntos del espacio.
End of explanation
"""
def graficar_autovalor(L, E):
plt.plot([-L/2, L/2], [E, E], '--')
def int_potencial_energia(V_0:(.1, 10., .1), E:(.1, 10., .1), a:(.1, 5, .1), L:(1., 10., .5), N:(10, 200, 10)):
dx = L / N
x = [-L/2 + i*dx for i in range(N+1)]
y = [potencial(V_0, a, i) for i in x]
graficar_potencial(x, y)
graficar_autovalor(L, E)
if E > V_0:
plt.text(0, E+0.2, 'No ligado')
else:
plt.text(0, E+0.2, 'Ligado')
plt.show()
interact(int_potencial_energia)
"""
Explanation: Nivel de energía
Para ilustrar adecuadamente la presencia de estados ligados conviene superponer sobre la representación de la función de potencial, la referencia de energía del autovalor del sistema. Para distinguirlo, éste será un trazo discontinuo (no relleno para evitar confusión con el potencial, pero tampoco continuo para distinguirlo de la representación de las funciones de onda).
\begin{eqnarray}
E \leq V_\text{máx},& \qquad \text{Estado ligado}\
E > V_\text{máx},& \qquad \text{Estado no ligado}
\end{eqnarray}
Los estados no ligados son equivalentes a tener particulas libres.
End of explanation
"""
def graficar_autofuncion(x, psi_x, V_max):
psi_max = max([abs(i) for i in psi_x])
escala = V_max / psi_max
psi_x = [i*escala for i in psi_x]
plt.plot(x, psi_x, '-')
def onda(V_0, E, a, x):
if abs(x) <= a/2:
return cos(sqrt(E)*x/2)
else:
a2 = a/2
k1 = sqrt(V_0 - E)
A = cos(sqrt(E)*a2) / exp(-k1*a2)
signo = abs(x)/x
return A*exp(-signo*k1*x)
def int_potencial_auto_ef(V_0:(5., 20., .1), E:(.1, 20., .1), a:(2.5, 30., .1), L:(10., 100., 5.), N:(10, 200, 10)):
dx = L / N
x = [-L/2 + i*dx for i in range(N+1)]
V = [potencial(V_0, a, i) for i in x]
f = [onda(V_0, E, a, i) for i in x]
graficar_potencial(x, V)
graficar_autovalor(L, E)
graficar_autofuncion(x, f, V_0)
if E > V_0:
plt.text(0, E+0.2, 'No ligado')
else:
plt.text(0, E+0.2, 'Ligado')
plt.show()
interact(int_potencial_auto_ef)
"""
Explanation: Graficación de autofunciones
La visualización de las autofunciones (y su módulo cuadrado), nos permite reconocer visualmente la distribución de probabilidad del sistema e identificar los puntos espaciales más probables para la ubicación de la particula analizada.
Para la correcta visualización, la graficación de la función de onda debe considerar una normalización de escala, no necesariamente al valor de la unidad del eje, pero si como referencia un valor numerico comprendido por los valores máximos de potencial, que corresponden a la parte del gráfico más cercana al margen superior del recuadro de graficación. El no realizar este reescalamiento, podría afectar la visualización del potencial y de la energía, ya que el eje se reajusta a los datos máximos y mínimos.
$$ \psi^{\prime}(x) = \frac{\psi(x)}{\max \psi(x)} V_\text{máx} $$
La graficación de las autofunciones es mediante el comando plot tradicional, y solo tiene de elemento adicional su reescalamiento con base al potencial máximo en la región de interes.
End of explanation
"""
|
ceroytres/ipython-notebooks | Algorithms/Random_Graphs.ipynb | mit | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore') #NetworkX has some deprecation warnings
"""
Explanation: Random Graphs
End of explanation
"""
params = [(10,0.1),(10,.5),(10,0.9),(20,0.1),(20,.5),(20,0.9)]
plt.figure(figsize=(15,10))
idx = 1
for (n,p) in params:
G = nx.gnp_random_graph(n,p)
vertex_colors = np.random.rand(n)
plt.subplot(2,3,idx)
nx.draw_circular(G,node_color = vertex_colors)
plt.title("G$(%d,%.2f)$" %(n,p))
idx+=1
plt.suptitle('Sample Random Graphs',fontsize=15)
plt.show()
"""
Explanation: Introduction
A graph $G=(V,E)$ is a collection of vertices $V$ and edges $E$ between the vertices in $V$. Graphs often model interactions such as social networks, a network of computers, links on webpages, or pixels in a image. The set of vertices $V$ often represents a set of objects such as people, computers, and cats. Meanwhile the set of edges $E$ consists of vertex pairs, and usually, vertex pairs encode a relationship between objects. If graph $G$ represents a social network, the vertices could represent people, and the existence of an edge between two people could represent whether they mutually know each other. In fact, there are two types of edges: directed and undirected. An undirected edge represents mutual relation such as friendship in a social network, and a directed edge represents relation that is directional. For example, you have a crush on someone, but they don't have a crush on you. In short, a graph models pairwise relations/interaction(edges) between objects(vertices).
A random graph is a graph whose construction is a result of an underlying random process or distribution over a set of graphs. Random graph can help us model or infer properties of other graphs whose construction is random or appears to be random. Examples of random graphs may include the graph of the internet and a social network. The simplest model is the $G(n,p)$ model ,and it is due to Erdős and Rényi and independently Gilbert [1,2].
The $G(n,p)$ model
The $G(n,p)$ model consist of two parameters where $n$ is the number of vertices, and $p$ is the probability of forming an undirected edge between vertices. During the construction of a random graph, one visits each vertex with probability $p$ one adds an edge. Examples of realization for different parameters are shown below:
End of explanation
"""
num_samples = 1000
n = 20
num_steps = 51
p = np.linspace(0,1,num_steps)
prob_connected = np.zeros(num_steps)
for i in range(num_steps):
for j in range(num_samples):
G = nx.gnp_random_graph(n,p[i])
num_connected = nx.number_connected_components(G)
isFully_connected = float(num_connected==1)
prob_connected[i] = prob_connected[i] + (isFully_connected - prob_connected[i])/(j+1)
plt.figure(figsize=(15,10))
plt.plot(p,prob_connected)
plt.title('Empirical Phase Transition for $G(10,p)$')
plt.xlim([0,1])
plt.ylim([0,1])
plt.show()
"""
Explanation: Basic Properties
Phase Transitions
Random graphs undergo a phase transition for certain properties suchs connectedness. For a random graph, there exist a $p(n)$ for all $p > p(n)$ and a fixed $n$ for which there are no isolated vertices with high probability.
It can be shown that for $$p(n) = \frac{\log n}{n}$$ the probability of isolated components goes to zero[6].
End of explanation
"""
G = nx.read_edgelist('facebook_combined.txt')
d = nx.diameter(G)
n = len(G.nodes())
vertex_colors = np.random.rand(n)
plt.figure(figsize=(15,10))
nx.draw_spring(G,node_color = vertex_colors)
plt.title('SNAP Facebook Ego Network with a diameter of %d and %d vertices' %(d,n),fontsize=15)
plt.show()
"""
Explanation: It's a Small World
In a fully connected random graph, the diameter of the graph becomes extremely small relative to the number vertices. The diameter is the longest shortest path between two nodes. The small world phenomenon was observed in social networks in Milgram's Small World Experiment[3]. Milgram's experiments are commonly refered in popular culture as the six degrees of seperation. The example below shows a Facebook social network from the SNAP dataset:
End of explanation
"""
len(G.nodes())
"""
Explanation: In the social network above, the diameter is 8 and the number of people in the social network is 4039. Although the $G(n,p)$ random graph model has the property of a small diameter, social networks have a property not found in the $G(n,p)$. Social networks tend to have a higher clustering coefficient[4] than $G(n,p)$ model. The clustering cofficient captures the notion of triad closures. In simple terms, your friends are probably friends with your other friends. Other random graph models such as the Watts-Strogatz have been proposed to deal with this problem of clustering coefficient [5].
References:
Erdős and A. Rényi, On Random Graphs, Publ. Math. 6, 290 (1959).
Gilbert, Random Graphs, Ann. Math. Stat., 30, 1141 (1959).
Milgram, Stanley (May 1967). "The Small World Problem". Psychology Today. Ziff-Davis Publishing Company.
M. Chiang, Networked Life: 20 Questions and Answers, Cambridge University Press, August 2012.
Watts, D. J.; Strogatz, S. H. (1998). "Collective dynamics of 'small-world' networks" (PDF). Nature. 393 (6684): 440–442.
Jackson, Matthew O. Social and economic networks. Princeton university press, 2010.
End of explanation
"""
|
uber/pyro | tutorial/source/effect_handlers.ipynb | apache-2.0 | import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.poutine.runtime import effectful
pyro.set_rng_seed(101)
"""
Explanation: Poutine: A Guide to Programming with Effect Handlers in Pyro
Note to readers: This tutorial is a guide to the API details of Pyro's effect handling library, Poutine. We recommend readers first orient themselves with the simplified minipyro.py which contains a minimal, readable implementation of Pyro's runtime and the effect handler abstraction described here. Pyro's effect handler library is more general than minipyro's but also contains more layers of indirection; it helps to read them side-by-side.
End of explanation
"""
def scale(guess):
weight = pyro.sample("weight", dist.Normal(guess, 1.0))
return pyro.sample("measurement", dist.Normal(weight, 0.75))
"""
Explanation: Introduction
Inference in probabilistic programming involves manipulating or transforming probabilistic programs written as generative models. For example, nearly all approximate inference algorithms require computing the unnormalized joint probability of values of latent and observed variables under a generative model.
Consider the following example model from the introductory inference tutorial:
End of explanation
"""
def make_log_joint(model):
def _log_joint(cond_data, *args, **kwargs):
conditioned_model = poutine.condition(model, data=cond_data)
trace = poutine.trace(conditioned_model).get_trace(*args, **kwargs)
return trace.log_prob_sum()
return _log_joint
scale_log_joint = make_log_joint(scale)
print(scale_log_joint({"measurement": 9.5, "weight": 8.23}, 8.5))
"""
Explanation: This model defines a joint probability distribution over "weight" and "measurement":
$${\sf weight} \, | \, {\sf guess} \sim \cal {\sf Normal}({\sf guess}, 1) $$
$${\sf measurement} \, | \, {\sf guess}, {\sf weight} \sim {\sf Normal}({\sf weight}, 0.75)$$
If we had access to the inputs and outputs of each pyro.sample site, we could compute their log-joint:
python
logp = dist.Normal(guess, 1.0).log_prob(weight).sum() + dist.Normal(weight, 0.75).log_prob(measurement).sum()
However, the way we wrote scale above does not seem to expose these intermediate distribution objects, and rewriting it to return them would be intrusive and would violate the separation of concerns between models and inference algorithms that a probabilistic programming language like Pyro is designed to enforce.
To resolve this conflict and facilitate inference algorithm development, Pyro exposes Poutine, a library of effect handlers, or composable building blocks for examining and modifying the behavior of Pyro programs. Most of Pyro's internals are implemented on top of Poutine.
A first look at Poutine: Pyro's library of algorithmic building blocks
Effect handlers, a common abstraction in the programming languages community, give nonstandard interpretations or side effects to the behavior of particular statements in a programming language, like pyro.sample or pyro.param. For background reading on effect handlers in programming language research, see the optional "References" section at the end of this tutorial.
Rather than reviewing more definitions, let's look at a first example that addresses the problem above: we can compose two existing effect handlers, poutine.condition (which sets output values of pyro.sample statements) and poutine.trace (which records the inputs, distributions, and outputs of pyro.sample statements), to concisely define a new effect handler that computes the log-joint:
End of explanation
"""
from pyro.poutine.trace_messenger import TraceMessenger
from pyro.poutine.condition_messenger import ConditionMessenger
def make_log_joint_2(model):
def _log_joint(cond_data, *args, **kwargs):
with TraceMessenger() as tracer:
with ConditionMessenger(data=cond_data):
model(*args, **kwargs)
trace = tracer.trace
logp = 0.
for name, node in trace.nodes.items():
if node["type"] == "sample":
if node["is_observed"]:
assert node["value"] is cond_data[name]
logp = logp + node["fn"].log_prob(node["value"]).sum()
return logp
return _log_joint
scale_log_joint = make_log_joint_2(scale)
print(scale_log_joint({"measurement": 9.5, "weight": 8.23}, 8.5))
"""
Explanation: That snippet is short, but still somewhat opaque - poutine.condition, poutine.trace, and trace.log_prob_sum are all black boxes. Let's remove a layer of boilerplate from poutine.condition and poutine.trace and explicitly implement what trace.log_prob_sum is doing:
End of explanation
"""
class LogJointMessenger(poutine.messenger.Messenger):
def __init__(self, cond_data):
self.data = cond_data
# __call__ is syntactic sugar for using Messengers as higher-order functions.
# Messenger already defines __call__, but we re-define it here
# for exposition and to change the return value:
def __call__(self, fn):
def _fn(*args, **kwargs):
with self:
fn(*args, **kwargs)
return self.logp.clone()
return _fn
def __enter__(self):
self.logp = torch.tensor(0.)
# All Messenger subclasses must call the base Messenger.__enter__()
# in their __enter__ methods
return super().__enter__()
# __exit__ takes the same arguments in all Python context managers
def __exit__(self, exc_type, exc_value, traceback):
self.logp = torch.tensor(0.)
# All Messenger subclasses must call the base Messenger.__exit__ method
# in their __exit__ methods.
return super().__exit__(exc_type, exc_value, traceback)
# _pyro_sample will be called once per pyro.sample site.
# It takes a dictionary msg containing the name, distribution,
# observation or sample value, and other metadata from the sample site.
def _pyro_sample(self, msg):
# Any unobserved random variables will trigger this assertion.
# In the next section, we'll learn how to also handle sampled values.
assert msg["name"] in self.data
msg["value"] = self.data[msg["name"]]
# Since we've observed a value for this site, we set the "is_observed" flag to True
# This tells any other Messengers not to overwrite msg["value"] with a sample.
msg["is_observed"] = True
self.logp = self.logp + (msg["scale"] * msg["fn"].log_prob(msg["value"])).sum()
with LogJointMessenger(cond_data={"measurement": 9.5, "weight": 8.23}) as m:
scale(8.5)
print(m.logp.clone())
scale_log_joint = LogJointMessenger(cond_data={"measurement": 9.5, "weight": 8.23})(scale)
print(scale_log_joint(8.5))
"""
Explanation: This makes things a little more clear: we can now see that poutine.trace and poutine.condition are wrappers for context managers that presumably communicate with the model through something inside pyro.sample. We can also see that poutine.trace produces a data structure (a Trace) containing a dictionary whose keys are sample site names and values are dictionaries containing the distribution ("fn") and output ("value") at each site, and that the output values at each site are exactly the values specified in data.
Finally, TraceMessenger and ConditionMessenger are Pyro effect handlers, or Messengers: stateful context manager objects that are placed on a global stack and send messages (hence the name) up and down the stack at each effectful operation, like a pyro.sample call. A Messenger is placed at the bottom of the stack when its __enter__ method is called, i.e. when it is used in a "with" statement.
We'll look at this process in more detail later in this tutorial. For a simplified implementation in only a few lines of code, see pyro.contrib.minipyro.
Implementing new effect handlers with the Messenger API
Although it's easiest to build new effect handlers by composing the existing ones in pyro.poutine, implementing a new effect as a pyro.poutine.messenger.Messenger subclass is actually fairly straightforward. Before diving into the API, let's look at another example: a version of our log-joint computation that performs the sum while the model is executing. We'll then review what each part of the example is actually doing.
End of explanation
"""
def log_joint(model=None, cond_data=None):
msngr = LogJointMessenger(cond_data=cond_data)
return msngr(model) if model is not None else msngr
scale_log_joint = log_joint(scale, cond_data={"measurement": 9.5, "weight": 8.23})
print(scale_log_joint(8.5))
"""
Explanation: A convenient bit of boilerplate that allows the use of LogJointMessenger as a context manager, decorator, or higher-order function is the following. Most of the existing effect handlers in pyro.poutine, including poutine.trace and poutine.condition which we used earlier, are Messengers wrapped this way in pyro.poutine.handlers.
End of explanation
"""
class LogJointMessenger2(poutine.messenger.Messenger):
def __init__(self, cond_data):
self.data = cond_data
def __call__(self, fn):
def _fn(*args, **kwargs):
with self:
fn(*args, **kwargs)
return self.logp.clone()
return _fn
def __enter__(self):
self.logp = torch.tensor(0.)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
self.logp = torch.tensor(0.)
return super().__exit__(exc_type, exc_value, traceback)
def _pyro_sample(self, msg):
if msg["name"] in self.data:
msg["value"] = self.data[msg["name"]]
msg["done"] = True
def _pyro_post_sample(self, msg):
assert msg["done"] # the "done" flag asserts that no more modifications to value and fn will be performed.
self.logp = self.logp + (msg["scale"] * msg["fn"].log_prob(msg["value"])).sum()
with LogJointMessenger2(cond_data={"measurement": 9.5, "weight": 8.23}) as m:
scale(8.5)
print(m.logp)
"""
Explanation: The Messenger API in more detail
Our LogJointMessenger implementation has three important methods: __enter__, __exit__, and _pyro_sample.
__enter__ and __exit__ are special methods needed by any Python context manager. When implementing new Messenger classes, if we override __enter__ and __exit__, we always need to call the base Messenger's __enter__ and __exit__ methods for the new Messenger to be applied correctly.
The last method LogJointMessenger._pyro_sample, is called once at each sample site. It reads and modifies a message, which is a dictionary containing the sample site's name, distribution, sampled or observed value, and other metadata. We'll examine the contents of a message in more detail in the next section.
Instead of _pyro_sample, a generic Messenger actually contains two methods that are called once per operation where side effects are performed:
1. _process_message modifies a message and sends the result to the Messenger just above on the stack
2. _postprocess_message modifies a message and sends the result to the next Messenger down on the stack. It is always called after all active Messengers have had their _process_message method applied to the message.
Although custom Messengers can override _process_message and _postprocess_message, it's convenient to avoid requiring all effect handlers to be aware of all possible effectful operation types. For this reason, by default Messenger._process_message will use msg["type"] to dispatch to a corresponding method Messenger._pyro_<type>, e.g. Messenger._pyro_sample as in LogJointMessenger. Just as exception handling code ignores unhandled exception types, this allows Messengers to simply forward operations they don't know how to handle up to the next Messenger in the stack:
python
class Messenger:
...
def _process_message(self, msg):
method_name = "_pyro_{}".format(msg["type"]) # e.g. _pyro_sample when msg["type"] == "sample"
if hasattr(self, method_name):
getattr(self, method_name)(msg)
...
Interlude: the global Messenger stack
See pyro.contrib.minipyro for an end-to-end implementation of the mechanism in this section.
The order in which Messengers are applied to an operation like a pyro.sample statement is determined by the order in which their __enter__ methods are called. Messenger.__enter__ appends a Messenger to the end (the bottom) of the global handler stack:
```python
class Messenger:
...
# enter pushes a Messenger onto the stack
def enter(self):
...
_PYRO_STACK.append(self)
...
# __exit__ removes a Messenger from the stack
def __exit__(self, ...):
...
assert _PYRO_STACK[-1] is self
_PYRO_STACK.pop()
...
```
pyro.poutine.runtime.apply_stack then traverses the stack twice at each operation, first from bottom to top to apply each _process_message and then from top to bottom to apply each _postprocess_message:
python
def apply_stack(msg): # simplified
for handler in reversed(_PYRO_STACK):
handler._process_message(msg)
...
default_process_message(msg)
...
for handler in _PYRO_STACK:
handler._postprocess_message(msg)
...
return msg
Returning to the LogJointMessenger example
The second method _postprocess_message is necessary because some effects can only be applied after all other effect handlers have had a chance to update the message once. In the case of LogJointMessenger, other effects, like enumeration, may modify a sample site's value or distribution (msg["value"] or msg["fn"]), so we move the log-probability computation to a new method, _pyro_post_sample, which is called by _postprocess_message (via a dispatch mechanism like the one used by _process_message) at each sample site after all active handlers' _pyro_sample methods have been applied:
End of explanation
"""
def monte_carlo_elbo(model, guide, batch, *args, **kwargs):
# assuming batch is a dictionary, we use poutine.condition to fix values of observed variables
conditioned_model = poutine.condition(model, data=batch)
# we'll approximate the expectation in the ELBO with a single sample:
# first, we run the guide forward unmodified and record values and distributions
# at each sample site using poutine.trace
guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)
# we use poutine.replay to set the values of latent variables in the model
# to the values sampled above by our guide, and use poutine.trace
# to record the distributions that appear at each sample site in in the model
model_trace = poutine.trace(
poutine.replay(conditioned_model, trace=guide_trace)
).get_trace(*args, **kwargs)
elbo = 0.
for name, node in model_trace.nodes.items():
if node["type"] == "sample":
elbo = elbo + node["fn"].log_prob(node["value"]).sum()
if not node["is_observed"]:
elbo = elbo - guide_trace.nodes[name]["fn"].log_prob(node["value"]).sum()
return -elbo
"""
Explanation: Inside the messages sent by Messengers
As the previous two examples mentioned, the actual messages sent up and down the stack are dictionaries with a particular set of keys. Consider the following sample statement:
python
pyro.sample("x", dist.Bernoulli(0.5), infer={"enumerate": "parallel"}, obs=None)
This sample statement is converted into an initial message before any effects are applied, and each effect handler's _process_message and _postprocess_message may update fields in place or add new fields. We write out the full initial message here for completeness:
python
msg = {
# The following fields contain the name, inputs, function, and output of a site.
# These are generally the only fields you'll need to think about.
"name": "x",
"fn": dist.Bernoulli(0.5),
"value": None, # msg["value"] will eventually contain the value returned by pyro.sample
"is_observed": False, # because obs=None by default; only used by sample sites
"args": (), # positional arguments passed to "fn" when it is called; usually empty for sample sites
"kwargs": {}, # keyword arguments passed to "fn" when it is called; usually empty for sample sites
# This field typically contains metadata needed or stored by a particular inference algorithm
"infer": {"enumerate": "parallel"},
# The remaining fields are generally only used by Pyro's internals,
# or for implementing more advanced effects beyond the scope of this tutorial
"type": "sample", # label used by Messenger._process_message to dispatch, in this case to _pyro_sample
"done": False,
"stop": False,
"scale": torch.tensor(1.), # Multiplicative scale factor that can be applied to each site's log_prob
"mask": None,
"continuation": None,
"cond_indep_stack": (), # Will contain metadata from each pyro.plate enclosing this sample site.
}
Note that when we use poutine.trace or TraceMessenger as in our first two versions of make_log_joint, the contents of msg are exactly the information stored in the trace for each sample and param site.
Implementing inference algorithms with existing effect handlers: examples
It turns out that many inference operations, like our first version of make_log_joint above, have strikingly short implementations in terms of existing effect handlers in pyro.poutine.
Example: Variational inference with a Monte Carlo ELBO
For example, here is an implementation of variational inference with a Monte Carlo ELBO that uses poutine.trace, poutine.condition, and poutine.replay. This is very similar to the simple ELBO in pyro.contrib.minipyro.
End of explanation
"""
def train(model, guide, data):
optimizer = pyro.optim.Adam({})
for batch in data:
# this poutine.trace will record all of the parameters that appear in the model and guide
# during the execution of monte_carlo_elbo
with poutine.trace() as param_capture:
# we use poutine.block here so that only parameters appear in the trace above
with poutine.block(hide_fn=lambda node: node["type"] != "param"):
loss = monte_carlo_elbo(model, guide, batch)
loss.backward()
params = set(node["value"].unconstrained()
for node in param_capture.trace.nodes.values())
optimizer.step(params)
pyro.infer.util.zero_grads(params)
"""
Explanation: We use poutine.trace and poutine.block to record pyro.param calls for optimization:
End of explanation
"""
def sequential_discrete_marginal(model, data, site_name="_RETURN"):
from six.moves import queue # queue data structures
q = queue.Queue() # Instantiate a first-in first-out queue
q.put(poutine.Trace()) # seed the queue with an empty trace
# as before, we fix the values of observed random variables with poutine.condition
# assuming data is a dictionary whose keys are names of sample sites in model
conditioned_model = poutine.condition(model, data=data)
# we wrap the conditioned model in a poutine.queue,
# which repeatedly pushes and pops partially completed executions from a Queue()
# to perform breadth-first enumeration over the set of values of all discrete sample sites in model
enum_model = poutine.queue(conditioned_model, queue=q)
# actually perform the enumeration by repeatedly tracing enum_model
# and accumulate samples and trace log-probabilities for postprocessing
samples, log_weights = [], []
while not q.empty():
trace = poutine.trace(enum_model).get_trace()
samples.append(trace.nodes[site_name]["value"])
log_weights.append(trace.log_prob_sum())
# we take the samples and log-joints and turn them into a histogram:
samples = torch.stack(samples, 0)
log_weights = torch.stack(log_weights, 0)
log_weights = log_weights - dist.util.logsumexp(log_weights, dim=0)
return dist.Empirical(samples, log_weights)
"""
Explanation: Example: exact inference via sequential enumeration
Here is an example of a very different inference algorithm--exact inference via enumeration--implemented with pyro.poutine. A complete explanation of this algorithm is beyond the scope of this tutorial and may be found in Chapter 3 of the short online book Design and Implementation of Probabilistic Programming Languages. This example uses poutine.queue, itself implemented using poutine.trace, poutine.replay, and poutine.block, to enumerate over possible values of all discrete variables in a model and compute a marginal distribution over all possible return values or the possible values at a particular sample site:
End of explanation
"""
class LazyValue:
def __init__(self, fn, *args, **kwargs):
self._expr = (fn, args, kwargs)
self._value = None
def __str__(self):
return "({} {})".format(str(self._expr[0]), " ".join(map(str, self._expr[1])))
def evaluate(self):
if self._value is None:
fn, args, kwargs = self._expr
fn = fn.evaluate() if isinstance(fn, LazyValue) else fn
args = tuple(arg.evaluate() if isinstance(arg, LazyValue) else arg
for arg in args)
kwargs = {k: v.evaluate() if isinstance(v, LazyValue) else v
for k, v in kwargs.items()}
self._value = fn(*args, **kwargs)
return self._value
"""
Explanation: (Note that sequential_discrete_marginal is very general, but is also quite slow. For high-performance parallel enumeration that applies to a less general class of models, see the enumeration tutorial.)
Example: implementing lazy evaluation with the Messenger API
Now that we've learned more about the internals of Messenger, let's use it to implement a slightly more complicated effect: lazy evaluation. We first define a LazyValue class that we will use to build up a computation graph:
End of explanation
"""
class LazyMessenger(pyro.poutine.messenger.Messenger):
def _process_message(self, msg):
if msg["type"] in ("apply", "sample") and not msg["done"]:
msg["done"] = True
msg["value"] = LazyValue(msg["fn"], *msg["args"], **msg["kwargs"])
"""
Explanation: With LazyValue, implementing lazy evaluation as a Messenger compatible with other effect handlers is suprisingly easy. We just make each msg["value"] a LazyValue and introduce a new operation type "apply" for deterministic operations:
End of explanation
"""
@effectful(type="apply")
def add(x, y):
return x + y
@effectful(type="apply")
def mul(x, y):
return x * y
@effectful(type="apply")
def sigmoid(x):
return torch.sigmoid(x)
@effectful(type="apply")
def normal(loc, scale):
return dist.Normal(loc, scale)
"""
Explanation: Finally, just like torch.autograd overloads torch tensor operations to record an autograd graph, we need to wrap any operations we'd like to be lazy. We'll use pyro.poutine.runtime.effectful as a decorator to expose these operations to LazyMessenger. effectful constructs a message much like the one above and sends it up and down the effect handler stack, but allows us to set the type (in this case, to "apply" instead of "sample") so that these operations aren't mistaken for sample statements by other effect handlers like TraceMessenger:
End of explanation
"""
def biased_scale(guess):
weight = pyro.sample("weight", normal(guess, 1.))
tolerance = pyro.sample("tolerance", normal(0., 0.25))
return pyro.sample("measurement", normal(add(mul(weight, 0.8), 1.), sigmoid(tolerance)))
with LazyMessenger():
v = biased_scale(8.5)
print(v)
print(v.evaluate())
"""
Explanation: Applied to another model:
End of explanation
"""
|
leonarduk/stockmarketview | timeseries-analysis-python/src/main/python/FinanceOps/01B_Better_Long-Term_Stock_Forecasts.ipynb | apache-2.0 | %matplotlib inline
# Imports from Python packages.
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
import numpy as np
import os
# Imports from FinanceOps.
from curve_fit import CurveFitReciprocal
from data_keys import *
from data import load_index_data, load_stock_data
from returns import prepare_mean_ann_returns
"""
Explanation: Better Long-Term Stock Forecasts
by Magnus Erik Hvass Pedersen
/ GitHub / Videos on YouTube
Introduction
The previous paper showed a strong predictive relationship between the P/Sales ratio and long-term returns of some individual stocks and the S&P 500 stock-market index.
However, there was a considerable amount of noise in those scatter-plots, because we considered fixed investment periods of exactly 10 years, for example. So even though the P/Sales ratio was a strong predictor for the mispricing at the buy-time, it was impossible to predict the mispricing at the sell-time, because the stock-market could be in a bubble or in a crash 10 years into the future, which would distort the estimated returns.
This paper presents a simple solution, which is to consider the average returns for all investment periods between 7 and 15 years, and then make a scatter-plot of the mean returns versus the P/Sales ratio. This produces incredibly smooth curves for estimating the future long-term returns of the S&P 500 and some individual stocks.
Along with the previous paper, this is a very important discovery and it has implications for many areas of both theoretical and applied finance. It means that the U.S. stock-market as a whole is not "efficient" and does not follow a purely "random walk" in the long-term. It is possible to estimate the future long-term return of the stock-market and some individual stocks from just a single indicator variable.
Python Imports
This Jupyter Notebook is implemented in Python v. 3.6 and requires various packages for numerical computations and plotting. See the installation instructions in the README-file.
End of explanation
"""
# Define the ticker-names for the stocks we consider.
ticker_SP500 = "S&P 500"
ticker_JNJ = "JNJ"
ticker_K = "K"
ticker_PG = "PG"
ticker_WMT = "WMT"
# Load the financial data for the stocks.
df_SP500 = load_index_data(ticker=ticker_SP500)
df_JNJ = load_stock_data(ticker=ticker_JNJ)
df_K = load_stock_data(ticker=ticker_K)
df_PG = load_stock_data(ticker=ticker_PG)
df_WMT = load_stock_data(ticker=ticker_WMT)
"""
Explanation: Load Data
We now load all the financial data we will be using.
End of explanation
"""
def plot_psales(df, ticker, start_date=None):
"""
Plot the P/Sales ratio.
:param df: Pandas DataFrame with PSALES.
:param ticker: Ticker-name for the stock or index.
:param start_date: Start-date for the plot.
:return: Nothing.
"""
psales = df[PSALES][start_date:].dropna()
psales.plot(title=ticker + " - P/Sales", grid=True)
def plot_ann_returns(ticker, df, key=PSALES,
min_years=7, max_years=15,
use_colors=True):
"""
Create a single scatter-plot with P/Sales or P/Book
vs. Mean Annualized Returns for e.g. 7-15 years.
:param ticker: Ticker-name for the stock or index.
:param df: Pandas DataFrame containing key and TOTAL_RETURN.
:param key: Name of data-column to use e.g. PSALES or PBOOK.
:param min_years: Min number of years for return periods.
:param max_years: Max number of years for return periods.
:param use_colors: Boolean whether to use colors in plot.
:return: Nothing.
"""
# Prepare the data.
# x is the P/Sales or P/Book and y is the Mean Ann. Returns.
x, y = prepare_mean_ann_returns(df=df, key=key,
min_years=min_years,
max_years=max_years)
# Create a single plot.
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(211)
# Scatter-plot.
if use_colors:
# Give each dot in the scatter-plot a shade of blue
# according to the date of the data-point.
ax.scatter(x, y,
c=list(range(len(x))), cmap='Blues',
alpha=1.0, marker='o')
else:
# Use the same color for all dots.
ax.scatter(x, y, marker='o')
# First part of the title.
title1 = "[{0}] {1} vs. {2}-{3} Years Mean Ann. Return"
title1 = title1.format(ticker, key, min_years, max_years)
# X-values for plotting fitted curves.
x_min = np.min(x)
x_max = np.max(x)
x_range = np.arange(x_min, x_max, (x_max/x_min)/1000)
# Plot reciprocal curve-fit.
curve_fit_reciprocal = CurveFitReciprocal(x=x, y=y)
y_pred = curve_fit_reciprocal.predict(x=x_range)
ax.plot(x_range, y_pred, color='red')
# Title with these curve-fit parameters.
title2 = "Mean Ann. Return = {0:.1%} / " + key + " + {1:.1%}"
title2 = title2.format(*curve_fit_reciprocal.params)
# Combine and set the plot-title.
title = "\n".join([title1, title2])
ax.set_title(title)
# Set axis labels.
ax.set_xlabel(key)
ax.set_ylabel("Mean Ann. Return")
# Convert y-ticks to percentages.
# We use a custom FuncFormatter because PercentFormatter
# is inconsistent with string-formatters used elsewhere.
formatter = FuncFormatter(lambda y, _: '{:.0%}'.format(y))
ax.yaxis.set_major_formatter(formatter)
# Show grid.
ax.grid()
# Show the plot.
plt.show()
"""
Explanation: Plotting Functions
These are helper-functions used for making plots.
End of explanation
"""
plot_ann_returns(ticker=ticker_SP500, df=df_SP500, key=PSALES,
min_years=7, max_years=15, use_colors=True)
"""
Explanation: Case Study: S&P 500
The S&P 500 is a stock-market index consisting of the stocks of 500 of the largest companies in USA. The S&P 500 covers about 80% of the whole U.S. stock-market in terms of size so it is useful as a gauge for the entire U.S. stock-market.
We consider the Total Return of the S&P 500 which is what you would get from investing in the S&P 500 and re-investing all dividends back into the S&P 500. We ignore all taxes here.
The following scatter-plot shows the P/Sales ratio versus the Mean Annualized Returns of the S&P 500 for periods between 7 and 15 years.
For each day we calculate the Total Return of the S&P 500 over the next 7-15 years, then we calculate the Mean Annualized Return from those, and then we put a blue dot in the scatter-plot for that date's P/Sales ratio and the Mean Annualized Return we just calculated. This process is continued for all days in the time-series, until we have calculated and plotted the P/Sales vs. Mean Annualized Return for all days.
As can be seen from this scatter-plot, the P/Sales ratio is a very strong predictor for long investment periods between 7-15 years. We call the fitted red curve for the "return curve".
End of explanation
"""
df_SP500[PSALES].dropna().tail(1)
plot_psales(df=df_SP500, ticker=ticker_SP500)
"""
Explanation: We can forecast the future long-term returns using the fitted "return curve" from the scatter-plot above. Towards the end of 2017, the P/Sales ratio was almost 2.2 for the S&P 500, which was about the previous high point of the "Dot-Com" bubble around year 2000.
End of explanation
"""
plot_ann_returns(ticker=ticker_JNJ, df=df_JNJ, key=PSALES,
min_years=7, max_years=15, use_colors=True)
"""
Explanation: So if you purchased the S&P 500 in December 2017 at this P/Sales ratio and will keep the investment for more than 7 years, while reinvesting all dividends during those years (all taxes are ignored), then the formula forecasts an annualized return of about 1.35%:
$$
Annualized\ Return = 14.4\% / (P/Sales) - 5.2\% = 14.4\% / 2.2 - 5.2\% \simeq 1.35\%
$$
The formula cannot predict exactly what will happen in the future, because there might be a stock-market bubble or a crash in any given year. The formula merely predicts an average annualized return for long-term investments of about 7-15 years in the S&P 500.
Case Study: Johnson & Johnson (JNJ)
Now let us consider individual companies instead of a whole stock-market index. The first company we consider is Johnson & Johnson with the ticker symbol JNJ. This is a very large company with over 130.000 employees worldwide that manufacture a wide range of health-care related products.
When we plot the P/Sales ratio versus the mean annualized return for 7-15 year periods, we see that the "return curve" fits quite well although there appears to be a few separate "return curves" for P/Sales ratios roughly between 2 and 3.
The blue shades in the scatter-plot indicate the time of the data-points and suggest that the separate curves belong to different periods of time. More research would be needed to establish why these periods have different "return curves". Perhaps the periods had significantly different profit-margins or sales-growth.
End of explanation
"""
df_JNJ[PSALES].dropna().tail(1)
plot_psales(df=df_JNJ, ticker=ticker_JNJ)
"""
Explanation: Towards the end of 2017 the P/Sales ratio was about 4.9 which is close to the all-time historical highs experienced during the stock-market bubble around year 2000.
End of explanation
"""
plot_ann_returns(ticker=ticker_PG, df=df_PG, key=PSALES,
min_years=7, max_years=15)
"""
Explanation: Using the formula for the fitted "return curve" from the scatter-plot above, we get this forecasted long-term return:
$$
Annualized\ Return \simeq 77.9\% / (P/Sales) - 8.9\%
\simeq 77.9\% / 4.9 - 8.9\% \simeq 7.0\%
$$
So according to this formula, the annualized return of the JNJ stock will be around 7.0% if you own the stock for at least 7 years, when dividends are reinvested and ignoring taxes.
Again there is the caveat that it is impossible to predict whether there will be a stock-market bubble or crash several years into the future, so the forecasted return is an average for 7-15 year investment periods.
Case Study: Procter & Gamble (PG)
Another very large company is Procter & Gamble with the ticker symbol PG, which sells a wide range of consumer products and has almost 100.000 employees.
If we plot the P/Sales ratio versus the mean annualized return we get an incredibly regular curve of data-points. The red line shows a reciprocal curve-fit, which is apparently not the correct formula for this data, as it doesn't fit so well at the ends. You are encouraged to try and find a better curve-fit and a theoretical explanation why your formula is better.
End of explanation
"""
plot_psales(df=df_PG, ticker=ticker_PG)
"""
Explanation: When we plot the historical P/Sales ratio, we see that at the end of 2017 it was around 3.5 which was near its all-time high experienced during the bubble around year 2000.
End of explanation
"""
plot_ann_returns(ticker=ticker_K, df=df_K, key=PSALES,
min_years=7, max_years=15, use_colors=True)
"""
Explanation: Using the fitted reciprocal curve from the scatter-plot above, we get a forecasted return of about 6.1% per year, when dividends are reinvested without taxes:
$$
Annualized\ Return \simeq 24.4\% / (P/Sales) - 0.9\% \simeq
24.4\% / 3.5 - 0.9\% \simeq 6.1\%
$$
But it should again be noted that this formula doesn't fit so well towards the ends of the data, and looking at the scatter-plot suggests a slightly lower return of maybe 5.5%.
Case Study: Kellogg's (K)
The next company is Kellogg's which trades under the ticker symbol K. The company has about 33.000 employees and is especially known for making breakfast cereals.
When we plot the P/Sales ratio versus the mean annualized return it shows a strong trend that higher P/Sales ratios gives lower long-term returns, although the curve-fit is not as good as for the other companies we studied above, especially for lower P/Sales ratios.
The blue shades show the time of the data-points. It can be hard to see in this plot, but for P/Sales ratios between 1.50 and 1.75, there is a "blob" of light-blue data-points well above the fitted red curve. This clearly indicates that the outlying data-points belong to a specific period in time. But we would have to do more research into the financial data for that period, to uncover the reason why the returns are so different.
End of explanation
"""
df_K[PSALES].dropna().mean()
plot_psales(df=df_K, ticker=ticker_K)
"""
Explanation: Towards the end of 2017 the P/Sales ratio was about 1.8 which was actually very close to the historical average.
End of explanation
"""
plot_ann_returns(ticker=ticker_WMT, df=df_WMT, key=PSALES,
min_years=7, max_years=15, use_colors=True)
"""
Explanation: Using the fitted "return curve" from the scatter-plot above with the P/Sales ratio of 1.8 we get the forecasted return:
$$
Annualized\ Return \simeq 27.5\% / (P/Sales) - 6.2\% \simeq
27.5\% / 1.8 - 6.2\% \simeq 9.1\%
$$
So a forecasted return of about 9.1% per year over the next 7-15 years when dividends are reinvested without taxes. That is about 2% (percentage points) higher than the return forecasted for JNJ and 3% higher than forecasted for PG above.
Case Study: Wal-Mart (WMT)
Now let us consider the company Wal-Mart which trades under the ticker symbol WMT. It is an extremely large retail-company with about 2.3 million employees.
If we plot the P/Sales ratio versus the mean annualized return, we see that the red curve fits very poorly. There seems to be several separate trends in the data, and the blue shades indicate that the trends belong to different periods in time. But more research into the company's financial history would be needed to uncover the reason for this, perhaps it is because of significantly different sales-growth, profit margins, etc.
End of explanation
"""
|
vinitsamel/udacitydeeplearning | autoencoder/Convolutional_Autoencoder_Solution.ipynb | mit | %matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
"""
Explanation: Convolutional Autoencoder
Sticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data.
End of explanation
"""
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
"""
Explanation: Network Architecture
The encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below.
<img src='assets/convolutional_autoencoder.png' width=500px>
Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughly 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data.
What's going on with the decoder
Okay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see transposed convolution layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but in reverse. A stride in the input layer results in a larger stride in the transposed convolution layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a transposed convolution layer. The TensorFlow API provides us with an easy way to create the layers, tf.nn.conv2d_transpose.
However, transposed convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling.
Exercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by a factor of 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor.
End of explanation
"""
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
"""
Explanation: Training
As before, here wi'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays.
End of explanation
"""
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x32
conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x32
conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x16
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x16
conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x16
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14))
# Now 14x14x16
conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x32
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28))
# Now 28x28x32
conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x32
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(0.001).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
"""
Explanation: Denoising
As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images.
Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before.
Exercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers.
End of explanation
"""
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
"""
Explanation: Checking out the performance
Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is.
End of explanation
"""
|
prk327/CoAca | 5__Merging_Concatenating.ipynb | gpl-3.0 | # loading libraries and reading the data
import numpy as np
import pandas as pd
market_df = pd.read_csv("./global_sales_data/market_fact.csv")
customer_df = pd.read_csv("./global_sales_data/cust_dimen.csv")
product_df = pd.read_csv("./global_sales_data/prod_dimen.csv")
shipping_df = pd.read_csv("./global_sales_data/shipping_dimen.csv")
orders_df = pd.read_csv("./global_sales_data/orders_dimen.csv")
"""
Explanation: Merging and Concatenating Dataframes
In this section, you will merge and concatenate multiple dataframes. Merging is one of the most common operations you will do, since data often comes in various files.
In our case, we have sales data of a retail store spread across multiple files. We will now work with all these data files and learn to:
* Merge multiple dataframes using common columns/keys using pd.merge()
* Concatenate dataframes using pd.concat()
Let's first read all the data files.
End of explanation
"""
# Already familiar with market data: Each row is an order
market_df.head()
# Customer dimension table: Each row contains metadata about customers
customer_df.head()
# Product dimension table
product_df.head()
# Shipping metadata
shipping_df.head()
# Orders dimension table
orders_df.head()
"""
Explanation: Merging Dataframes Using pd.merge()
There are five data files:
1. The market_fact table contains the sales data of each order
2. The other 4 files are called 'dimension tables/files' and contain metadata about customers, products, shipping details, order details etc.
If you are familiar with star schemas and data warehouse designs, you will note that we have one fact table and four dimension tables.
End of explanation
"""
# Merging the dataframes
# Note that Cust_id is the common column/key, which is provided to the 'on' argument
# how = 'inner' makes sure that only the customer ids present in both dfs are included in the result
df_1 = pd.merge(market_df, customer_df, how='inner', on='Cust_id')
df_1.head()
# Now, you can subset the orders made by customers from 'Corporate' segment
df_1.loc[df_1['Customer_Segment'] == 'CORPORATE', :]
# Example 2: Select all orders from product category = office supplies and from the corporate segment
# We now need to merge the product_df
df_2 = pd.merge(df_1, product_df, how='inner', on='Prod_id')
df_2.head()
# Select all orders from product category = office supplies and from the corporate segment
df_2.loc[(df_2['Product_Category']=='OFFICE SUPPLIES') & (df_2['Customer_Segment']=='CORPORATE'),:]
"""
Explanation: Merging Dataframes
Say you want to select all orders and observe the Sales of the customer segment Corporate. Since customer segment details are present in the dataframe customer_df, we will first need to merge it with market_df.
End of explanation
"""
# Merging shipping_df
df_3 = pd.merge(df_2, shipping_df, how='inner', on='Ship_id')
df_3.shape
# Merging the orders table to create a master df
master_df = pd.merge(df_3, orders_df, how='inner', on='Ord_id')
master_df.shape
master_df.head()
"""
Explanation: Similary, you can merge the other dimension tables - shipping_df and orders_df to create a master_df and perform indexing using any column in the master dataframe.
End of explanation
"""
# dataframes having the same columns
df1 = pd.DataFrame({'Name': ['Aman', 'Joy', 'Rashmi', 'Saif'],
'Age': ['34', '31', '22', '33'],
'Gender': ['M', 'M', 'F', 'M']}
)
df2 = pd.DataFrame({'Name': ['Akhil', 'Asha', 'Preeti'],
'Age': ['31', '22', '23'],
'Gender': ['M', 'F', 'F']}
)
df1
df2
# To concatenate them, one on top of the other, you can use pd.concat
# The first argument is a sequence (list) of dataframes
# axis = 0 indicates that we want to concat along the row axis
pd.concat([df1, df2], axis = 0)
# A useful and intuitive alternative to concat along the rows is the append() function
# It concatenates along the rows
df1.append(df2)
"""
Explanation: Similary, you can perform left, right and outer merges (joins) by using the argument how = 'left' / 'right' / 'outer'.
Concatenating Dataframes
Concatenation is much more straightforward than merging. It is used when you have dataframes having the same columns and want to append them (pile one on top of the other), or having the same rows and want to append them side-by-side.
Concatenating Dataframes Having the Same columns
Say you have two dataframes having the same columns, like so:
End of explanation
"""
df1 = pd.DataFrame({'Name': ['Aman', 'Joy', 'Rashmi', 'Saif'],
'Age': ['34', '31', '22', '33'],
'Gender': ['M', 'M', 'F', 'M']}
)
df1
df2 = pd.DataFrame({'School': ['RK Public', 'JSP', 'Carmel Convent', 'St. Paul'],
'Graduation Marks': ['84', '89', '76', '91']}
)
df2
# To join the two dataframes, use axis = 1 to indicate joining along the columns axis
# The join is possible because the corresponding rows have the same indices
pd.concat([df1, df2], axis = 1)
"""
Explanation: Concatenating Dataframes Having the Same Rows
You may also have dataframes having the same rows but different columns (and having no common columns). In this case, you may want to concat them side-by-side. For e.g.:
End of explanation
"""
# Teamwise stats for IPL 2018
IPL_2018 = pd.DataFrame({'IPL Team': ['CSK', 'SRH', 'KKR', 'RR', 'MI', 'RCB', 'KXIP', 'DD'],
'Matches Played': [16, 17, 16, 15, 14, 14, 14, 14],
'Matches Won': [11, 10, 9, 7, 6, 6, 6, 5]}
)
# Set the 'IPL Team' column as the index to perform arithmetic operations on the other rows using the team as reference
IPL_2018.set_index('IPL Team', inplace = True)
IPL_2018
# Similarly, we have the stats for IPL 2017
IPL_2017 = pd.DataFrame({'IPL Team': ['MI', 'RPS', 'KKR', 'SRH', 'KXIP', 'DD', 'GL', 'RCB'],
'Matches Played': [17, 16, 16, 15, 14, 14, 14, 14],
'Matches Won': [12, 10, 9, 8, 7, 6, 4, 3]}
)
IPL_2017.set_index('IPL Team', inplace = True)
IPL_2017
# Simply add the two DFs using the add opearator
Total = IPL_2018 + IPL_2017
Total
"""
Explanation: Note that you can also use the pd.concat() method to merge dataframes using common keys, though here we will not discuss that. For simplicity, we have used the pd.merge() method for database-style merging and pd.concat() for appending dataframes having no common columns.
Performing Arithmetic Operations on two or more dataframes
We can also perform simple arithmetic operations on two or more dataframes. Below are the stats for IPL 2018 and 2017.
End of explanation
"""
# The fill_value argument inside the df.add() function replaces all the NaN values in the two dataframes w.r.t. each other with zero.
Total = IPL_2018.add(IPL_2017, fill_value = 0)
Total
"""
Explanation: Notice that there are a lot of NaN values. This is because some teams which played in IPL 2017 were not present in IPL 2018. In addition, there were also new teams present in IPL 2018. We can handle these NaN values by using df.add() instead of the simple add operator. Let's see how.
End of explanation
"""
# Creating a new column - 'Win Percentage'
Total['Win Percentage'] = Total['Matches Won']/Total['Matches Played']
Total
# Sorting to determine the teams with most number of wins. If the number of wins of two teams are the same, sort by the win percentage.
Total.sort_values(by = (['Matches Won', 'Win Percentage']), ascending = False)
"""
Explanation: Also notice how the resultant dataframe is sorted by the index, i.e. 'IPL Team' alphabetically.
End of explanation
"""
|
erinspace/share_tutorials | 2_Complex_Queries_Basic_Visualization_py3.ipynb | apache-2.0 | # Json library parses JSON from strings or files. The library parses JSON into a Python dictionary or list.
# It can also convert Python dictionaries or lists into JSON strings.
# https://docs.python.org/2.7/library/json.html
import json
# Requests library allows you to send organic, grass-fed HTTP/1.1 requests, no need to manually add query strings
# to your URLs, or to form-encode your POST data. Docs: http://docs.python-requests.org/en/master/
import requests
# This takes the URL and puts it into a variable (so we only need to ever reference this variable,
# and so we don't have to repeat adding this URL when we want to work with the data)
SHARE_API = 'https://staging-share.osf.io/api/search/abstractcreativework/_search'
# A helper function that will use the requests library, pass along the correct headers, and make the query we want
def query_share(url, query):
headers = {'Content-Type': 'application/json'}
data = json.dumps(query)
return requests.post(url, headers=headers, data=data, verify=False).json()
# A function that prints out the results in a numbered list
def print_numbered_results(results):
print(
'There are {} total results and {} results on this page'.format(
results['hits']['total'],
len(results['hits']['hits'])
)
)
print('---------------')
for result in enumerate(results['hits']['hits']):
print('{}. {}'.format(result[0] + 1, result[1]['_source']['title']))
"""
Explanation: Complex Queries and Basic Visualization
This notebook will cover how to make more complex queries, using both basic HTTP requests and using sharepa - the SHARE parsing and analysis library.
We'll also go over aggregations, or queries that will return summary statistics about the whole dataset. We'll use those aggregations to make some simple data visualizations using pandas and matplotlib.
Setup
Here we'll define a helper function and specify the SHARE API url that we'll use for querying.
We'll also define another helper function to nicely print out our results.
End of explanation
"""
# We are setting up a query for items in the SHARE dataset that have the keyword "frogs"
basic_query = {
"query": {
"query_string": {
"query": "frogs"
}
}
}
#this puts the results of querying SHARE_API with what we outlined in basic_query (frogs)
query_results = query_share(SHARE_API, basic_query)
#print out the numbered list of the results
print_numbered_results(query_results)
"""
Explanation: Complex Queries
Pagination
One request to the SHARE API will return just 10 results by default. To get more results, you can use the "size" parameter in your request, or paginate through the results you get back.
End of explanation
"""
# We are setting up a query for items in the SHARE dataset that have the keyword "frogs" but limiting the
# results to 20 items
basic_query = {
"query": {
"query_string": {
"query": "frogs"
}
},
"size": 20
}
# this puts the results of querying SHARE_API with what we outlined in basic_query (frogs)
query_results = query_share(SHARE_API, basic_query)
# print out the numbered list of the results
print_numbered_results(query_results)
"""
Explanation: We can get more results either by changing the number of results returned, or by paginating through the results.
First, we'll return 20 results by specifying the size in our query.
End of explanation
"""
# We are setting up a query for items in the SHARE dataset that have the keyword "frogs"
basic_query = {
"query": {
"query_string": {
"query": "frogs"
}
}
}
# creates a list of the first 10 results from the search query "frogs"
query_results = query_share(SHARE_API, basic_query)
# print the results of the search we made in a numbered list
print_numbered_results(query_results)
# make it visually pretty and readable for us humans
print('------------------------------------------')
print('*** Making another query for the next page ***')
print('*** These next titles will be different! ***')
print('------------------------------------------')
basic_query['from'] = 10 # Add the 'from' parameter to the query to pick up at the next page of results
# creates a list of the next 10 results
query_results = query_share(SHARE_API, basic_query)
# print the results of the search we made in a numbered list
print_numbered_results(query_results)
"""
Explanation: We can also paginate through results by specifying the place to start in all of the results.
End of explanation
"""
# this is a function that helps us print the lists with pagination between results (10, next 10, etc.)
def print_numbered_sharepa_results(search_obj):
results = search_obj.execute()
print(
'There are {} total results and {} results on this page'.format(
search_obj.count(),
len(results.hits)
)
)
print('---------------')
for result in enumerate(results.hits):
print('{}. {}'.format(result[0] + 1, result[1]['title']))
# Sharepa is a python client for browsing and analyzing SHARE data specifically using elasticsearch querying.
# We can use this to aggregate, graph, and analyze the data.
# Helpful Links:
# https://github.com/CenterForOpenScience/sharepa
# https://pypi.python.org/pypi/sharepa
# here, we import the specific function from Sharepa called ShareSearch and pretty_print
from sharepa import ShareSearch
from sharepa.helpers import pretty_print
# we are creating a new local search!
frogs_search = ShareSearch()
# this sets up what we will actually search for -- keyword "frogs"
frogs_search = frogs_search.query(
'query_string',
query='frogs'
)
# print the results of the search we made for "frogs" keyword in a numbered list
print_numbered_sharepa_results(frogs_search)
# print the 10th - 20th results of the search we made for "frogs" keyword in a numbered list
print_numbered_sharepa_results(frogs_search[10:20])
"""
Explanation: Pagination with sharepa
You can also use sharepa to paginate through all of the results in your query, and to access slices of your query at any time. The ShareSearch object returns a generator that you can use to access all results, using slices.
First, we'll redefine our helper function for nicer printing with data returned from sharepa.
End of explanation
"""
# this aggregates the number of documents that have no tags, per source, using query boolean (not tags) while also
# grabbing all sources in the aggregations: sources below
missing_tags_aggregation = {
"query": {
"bool": {
"must_not": {
"exists": {
"field": "tags"
}
}
}
},
"aggregations": {
"sources": {
"terms": {
"field": "sources", # A field where the SHARE source is stored
"min_doc_count": 0,
"size": 0 # Will return all sources, regardless if there are results
}
}
}
}
# puts all the items without tags into a list
results_without_tags = query_share(SHARE_API, missing_tags_aggregation)
# counts the number of items without tags
missing_tags_counts = results_without_tags['aggregations']['sources']['buckets']
# this prints out the number of documents with missing tags, separated by sources
for source in missing_tags_counts:
print('{} has {} documents without tags'.format(source['key'], source['doc_count'], ))
"""
Explanation: Aggregations
While searching for individual results is useful, sharepa also lets you make aggregation queries that give you results across the entirety of the SHARE dataset at once. This is useful if you're curious about the completeness of data sets.
For example, we can find the number of documents per source that are missing tags.
End of explanation
"""
# this does the same as above, but adds the percentage of documents missing tags from the total number of documents
# per source
no_tags_query = {
"query": {
"bool": {
"must_not": {
"exists": {
"field": "tags"
}
}
}
},
"aggs": {
"sources":{
"significant_terms":{
"field": "sources", # A field where the SHARE source is stored
"min_doc_count": 0,
"size": 0, # Will return all sources, regardless if there are results
"percentage": {} # This will make the "score" parameter a percentage
}
}
}
}
# creates a list of the documents with no tags
docs_with_no_tags_results = query_share(SHARE_API, no_tags_query)
#creates a dictionary that shows the results of the search along with the aggregations we outlined above
docs_with_no_tags = docs_with_no_tags_results['aggregations']['sources']['buckets']
# this prints out the number of documents with missing tags, separated by sources, with the percentage that makes up
# the total number of documents for each source
for source in docs_with_no_tags:
print(
'{}% (or {}/{}) of documents from {} have no tags'.format(
format(source['score']*100, '.2f'),
source['doc_count'],
source['bg_count'],
source['key']
)
)
"""
Explanation: This information isn't terribly useful if we don't also know how many documents are in each source.
Let's get that information as well, along stats for what percentage of documents from each source are missing titles.
We'll do this with an elasticsearch "sigificant terms" aggregation. We're only interested in results that have 1 document or more, meaning all documents from the other sources have titles.
End of explanation
"""
# yay! creating another new search!
no_language_search = ShareSearch()
# this sets up our search query: all documents with no language field
no_language_search = no_language_search.query(
'bool',
must_not={"exists": {"field": "language"}}
)
no_language_search.aggs.bucket(
'sources', # Every aggregation needs a name
'significant_terms', # There are many kinds of aggregations
field='sources', # We store the source of a document in its type, so this will aggregate by source
min_doc_count=1,
percentage={},
size=0
)
"""
Explanation: Aggregations with sharepa
You can also use sharepa to do aggregations.
Let's make a sharepa search object that will give us the number of documents per sourcethat don't have language specified.
End of explanation
"""
# print (prettily!) the items grabbed the search that have no language field
pretty_print(no_language_search.to_dict())
# here we grab the results of items that have no language field + their sources and significant terms
aggregated_results = no_language_search.execute()
# this prints out the percentage of items that don't have a language field
for source in aggregated_results.aggregations['sources']['buckets']:
print(
'{}% of documents from {} do not have language'.format(
format(source['score']*100, '.2f'),
source['key']
)
)
"""
Explanation: We can see which query is actually going to be sent to elasticsearch by printing out the query. This is very similar to the queries we were defining by hand up above.
End of explanation
"""
# creating another new search!
top_tag_search = ShareSearch()
# this sets up our search query with the aggregations
top_tag_search.aggs.bucket(
'tagsTermFilter', # Every aggregation needs a name
'terms', # There are many kinds of aggregations
field='tags', # We store the source of a document in its type, so this will aggregate by source
min_doc_count=1,
exclude= "of|and|or",
size=10
)
# pretty_print(top_tag_search.to_dict())
# this executes the search as we've outlined it above
top_tag_results_executed = top_tag_search.execute()
# this places the results of the search into this dictionary
top_tag_results = top_tag_results_executed.aggregations.tagsTermFilter.to_dict()['buckets']
# this prints out our search results (prettily)
pretty_print(top_tag_results)
"""
Explanation: Top tags
Let's do an elasticsearch query to find out what the most used tags are used in the dataset across all sources.
End of explanation
"""
# Pandas is a python library that is used for data manipulation and analysis -- good for numbers + time series.
# Pandas gives us some extra data structures (arrays are data structures, for example) which is nice
# We are calling Pandas pd by using the "as" -- locally, we know Pandas as pd
# Helpful Links:
# https://en.wikipedia.org/wiki/Pandas_(software)
# http://pandas.pydata.org/
import pandas as pd
# this transforms our results from the cell above into a dataframe
top_tags_dataframe = pd.DataFrame(top_tag_results)
#this prints out our dataframe -- looks like a nice table!
top_tags_dataframe
# Matplot lib is a a python 2D plotting library which produces publication quality figures in a variety of hardcopy
# formats and interactive environments across platforms.
# Read more about Matplotlib here: http://matplotlib.org/
from matplotlib import pyplot
# this is used specifically with iPython notebooks to display the matplotlib chart or graph in the notebook
%matplotlib inline
#this takes our results dictionary from the cell above and plots them into a bar chart
top_tags_dataframe.plot(kind='bar', x='key', y='doc_count')
# this prints out the bar chart we just made!
pyplot.show()
"""
Explanation: Basic Plotting
Sharepa has some basic functions to get you started making plots using matplotlib and pandas.
Creating a dataframe from sharepa data
Raw sharepa data is in the same format as elasticsearch results, represented as a nested structure. To convert the data into a format that pandas can recognize, we have to convert it into a dataframe.
Let's take our top tags aggregation, make it into a pandas data frame, and plot a bar graph. Then, we'll plot the results.
End of explanation
"""
# from the SHAREPA library, we import this function to transform a bucket into a dataframe so we can plot it!
from sharepa import bucket_to_dataframe
# creating a new search!
all_results = ShareSearch()
# creating our search query!
all_results = all_results.query(
'query_string', # Type of query, will accept a lucene query string
query='*', # This lucene query string will find all documents that don't have tags
analyze_wildcard=True # This will make elasticsearch pay attention to the asterisk (which matches anything)
)
# Lucene supports fielded data. When performing a search you can either specify a field, or use the default field.
all_results.aggs.bucket(
'sources', # Every aggregation needs a name
'terms', # There are many kinds of aggregations, terms is a pretty useful one though
field='sources', # We store the source of a document in its type, so this will aggregate by source
size=0, # These are just to make sure we get numbers for all the sources, to make it easier to combine graphs
min_doc_count=0
)
# this executes our search!
all_results = all_results.execute()
# this uses that function we imported above to transform our aggregated search into a dataframe so we can plot it!
all_results_frame = bucket_to_dataframe('# documents by source', all_results.aggregations.sources.buckets)
# this sorts the dataframe by the number of documents by source (descending order)
all_results_frame_sorted = all_results_frame.sort(ascending=False, columns='# documents by source')
# this creates a bar chart that displays the first 30 results
all_results_frame_sorted[:30].plot(kind='bar')
"""
Explanation: Complex Queries and Dataframes
Let's plot the number of document that each source has. We'll limit it to the top 30 sources to make sure that the graph is readable. Here we'll use the sharepa helper function bucket_to_dataframe()
End of explanation
"""
# Creating a pie graph using the first 10 items in the data frame with no legend
all_results_frame_sorted[:10].plot(kind='pie', y="# documents by source", legend=False)
"""
Explanation: We can choose different types of plots to generate. Here, we'll make a pie chart of the data sources with the top 10 most results.
End of explanation
"""
|
Olsthoorn/TransientGroundwaterFlow | Assignment/VScode/AssJan2019.ipynb | gpl-3.0 | import numpy as np
import matplotlib.pyplot as plt
from scipy.special import exp1 # Theis well function
from scipy.special import erfc
# import the necessary fucntionality
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import exp1 as W # Theis well function
"""
Explanation: Assignment Jan 2019. A building pit next to a river
:author: T.N.Olsthoorn, 2019-01-04, june 2022
Problem statement
A large construction is to be realized next to a river that is in direct contact with the aquifer next to it. The building pit measures 50x50 m and river side is at 400 m distance from the river shore.
Transmissivity and storage coefficient are given: $kD=900\,\mathrm{m^{2}/d}$, $S^{=}0.25$.
The drawdown everywhere in the building pit must be at least 5 m, to be reached within one month of pumping.
The pumping will continue after this month for 5 more months during which the drawdown is to be maintained. However the pumping can be reuced after the first month. Adjust the pumping once per month, such that at the end of each month the darwdown fullfils the requied 5 m.
After 6 months, pumping is stopped, so that the water levels can restore.
Questions
On which two corners of the builing pit should you place the two extraction wells to have most effect.
Find the most critical point and make sure that the drawdown is as required at that point.
Show the extraction as a function of time from the start until one year after the stop. Also plot the drawdown at the critical location for this period.
Compute as a function of time the flow from the river into the groundwater system. It is assumed that the groundwater head is initially uniform and equal to the river stage (water level in the river). Do this for the averate flow during the 6 month of building pit operation (ignore the variation in the extraction for simplicity).
How much time is required after stopping until about 90% of the drawdown has disappeared?
After exactly 3 months, the water level in the river rises suddenly by 1 m and stays so during one month, after which it suddenly returns to its original level.
To what extent does this wave affect the water level in the building pit if no measure is taken?
What must be the extraction during this month to guarantee that the building pit fulfills the required 5 m drawdown relative to the original water level? If both effects do not overlap, say so, and explain what you could to as building-pit owner to better counteract the effect of the wave in the river stage on the head below the building pit
If the river is influenced by sea tide, such that its level fluctuates twice a day between +1 and -1 m relative to the average value. How does this tide influence the required pumping? Is the location of the most critical point still the same?
How much is the delay between the tide in the river and the fluctuation at the critical point in the building pit?
Hints
Work out the assigment in this Jupyter notebook. Take some time to become familiar with it. There is a tremendous amount of help on the internet to get you going. The site on github.com/Olsthoorn/TransientGroundwaterFlow hold numerous examples from the syllabus in the form of jupyter notebooks.
Also refer to the notebooks for the second year students of the TUDelft by Mark bakker (search for Bakker exploratory computing to find them).
You will gain some experience with the Notebooks (see their help)
a) with python
b) with numpy
c) with functions in scipy
Make sure your assigment is a self-contained document, that you could also export as html or pdf for sharing to those who do not have python installed.
Modules you wil need are imported here
End of explanation
"""
def newfig(title='?', xlabel='?', ylabel='?', xlim=None, ylim=None, xscale=None, yscale=None, figsize=(10, 8),
fontsize=16):
sizes = ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large']
assert isinstance(fontsize, int) or fontsize in sizes, \
"fontsize not int and not in [{}]".format(', '.join(sizes))
_, ax = plt.subplots(figsize=figsize)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim: ax.set_xlim(xlim)
if ylim: ax.set_ylim(ylim)
if xscale: ax.set_xscale(xscale)
if yscale: ax.set_yscale(yscale)
ax.grid()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
return ax
def newfigs(layout, titles=['?'], xlabels=['?'], ylabels=['?'], xscales=None, yscales=None,
xlim=None, ylim=None, sharex=None, sharey=None, fontsize=16, figsize=(10, 8)):
sizes = ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large']
assert isinstance(fontsize, int) or fontsize in sizes, \
"fontsize not int and not in [{}]".format(', '.join(sizes))
fig, axs = plt.subplots(*layout, sharex=sharex, sharey=sharey)
fig.set_size_inches(figsize)
assert isinstance(layout, tuple) and len(layout) == 2, 'layout must be a 2-tuple (nrows, ncols) not {}'.format(str(layout))
n_axes = np.prod(layout)
if xscales is None: xscales = [None for _ in range(n_axes)]
if yscales is None: yscales = [None for _ in range(n_axes)]
for items_name, items in zip(['titles', 'xlabels', 'ylabels', 'xscales', 'yscales'], [titles, xlabels, ylabels, xscales, yscales]):
assert len(items) == np.prod(layout), 'len({}) == {} != len(layout) == {}'.format(items_name, len(items), len(np.prod(layout)))
for ax, title, xlabel, ylabel, xscale, yscale in zip(axs, titles, xlabels, ylabels, xscales, yscales):
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim: ax.set_xlim(xlim)
if ylim: ax.set_ylim(ylim)
if xscale: ax.set_xscale(xscale)
if yscale: ax.set_yscale(yscale)
ax.grid()
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
return axs
"""
Explanation: Convenient plotting functions
End of explanation
"""
kD, S = 900, 0.25 # m2/d, (-)
W, B, L = 50., 50., 400
# The building pit
pit = {'ul': {'x':-B/2, 'y':-L},
'ur': {'x':+B/2, 'y':-L},
'lr': {'x':+B/2, 'y':-L-B},
'll': {'x':-B/2, 'y':-L-B},
'mid': {'x': 0., 'y':-L},
'ctr' : {'x': 0., 'y':-L-B/2},
'crit': {'x': 0., 'y':-L-B},
}
# The . This is also the answer to Q1, the wells are at the two corners closest to the river.
wells ={1: {'type': 'wel', 'sign':+1, 'x':-B/2, 'y':-L},
2: {'type': 'wel', 'sign':+1, 'x':+B/2, 'y':-L},
3: {'type': 'mir', 'sign':-1, 'x':+B/2, 'y':+L},
4: {'type': 'mir', 'sign':-1, 'x':-B/2, 'y':+L},
}
regime = dict()
regime['dates'] = np.array([np.datetime64(f'2020-{mm:02d}-01') for mm in np.arange(1, 13, dtype=int)])
regime['Q'] = np.array([-1325., -1200., -1150., -1100., -1100., -1100., 0., 0., 0., 0., 0., 0.,]) * 5 # Initially guessed pumping regime.
regime['dQ'] = np.diff(np.hstack((0, regime['Q'])))
regime['stage'] = [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,] # river stage
regime['dstage'] = np.diff(np.hstack((0, regime['stage']))) # river change stage.
tstart = regime['dates'][0]
tsim = np.arange(tstart, tstart + np.timedelta64(366, 'D'))
"""
Explanation: Setup the situation, define the wells and observation point
End of explanation
"""
ax = newfig(f'Water level at critical points', 'time', 'water level [m]')
for kp in ['mid', 'ctr', 'crit']:
x0, y0 = pit[kp]['x'], pit[kp]['y']
s = np.zeros_like(tsim, dtype=float)
for dQ, change_date in zip(regime['dQ'], regime['dates']):
t = (tsim[tsim > change_date] - change_date) / np.timedelta64(1, 'D')
for kw in wells:
well = wells[kw]
dx_, dy_ = x0 - well['x'], y0 - well['y']
r2 = dx_ ** 2 + dy_ ** 2
u = r2 * S / (4 * kD * t)
s[tsim > change_date] += well['sign'] * dQ / (4 * np.pi * kD) * exp1(u)
ax.plot(tsim, s, label=f'at point pit[{kp}]=({x0:.0f},{y0:.0f})')
if True:
criterion = np.logical_and(tsim > np.datetime64('2020-07-01'), s <= -0.5)
ts, ss = tsim[criterion][-1], s[criterion][-1]
ax.plot(ts, ss, 'ro', label='Moment when drawdown becomes less than 0.5 m')
ax.annotate(f'Drawdown becomes less than 0.5 m\nat t={ts}', xy=(ts, ss), xycoords='data', xytext=(ts, -2), textcoords='data', arrowprops=dict(facecolor='black', linewidth=0.5, shrink=0.1))
ax.legend(loc='center')
ax1 = ax.twinx()
ax1.set_ylabel('Extraction m3/d')
nwells = np.sum([wells[k]['type']=='wel' for k in wells])
ax1.step(regime['dates'], nwells * regime['Q'], 'r--', lw=2, where='post', label='Extraction')
ax1.legend(loc='lower right')
"""
Explanation: Fulfilling the required drawdown
Q1: The two wells must be placed on the two corners of the building pit closest to the river. But because of the river, we also have to place two mirror wells on the opposite site with the opposite sign. These wells heve been defined in the dict 'wells' above.
Q2: The most critical point is the center of mid of the line of the building point that is farthest from the river.
Q3: After some trial and error, the regime['Q'] establishese the desired steady drawdown after one month of pumpiong.
Q4: The data at which the dradown is again less than 0.5 m is shown in the figure with the error and the thick dot.
End of explanation
"""
dstages = regime['dstage'][regime['dstage'] != 0]
change_dates = regime['dates' ][regime['dstage'] != 0]
ax = newfig("Effect of the river stage on points in the building pit", "time", "delta h [m]")
for k in ['mid', 'ctr', 'crit']:
yp = pit[k]['y']
s = np.zeros_like(tsim, dtype=float)
dy = 0 - yp # distance from the river (river is at x=0 m)
for dstage, change_date in zip(dstages, change_dates):
t = (tsim[tsim > change_date] -change_date) / np.timedelta64(1, 'D')
u = dy * np.sqrt(S / (4 * kD * t))
s[tsim > change_date] += dstage * erfc(u)
ax.plot(tsim, s, label=f'point {k}, y={yp:.0f}')
ax.legend()
"""
Explanation: River-stage change
Q6: What happens to the groundwater levels due to the change in the river stage?
End of explanation
"""
regime['Q'] = np.array([-1325., -1200., -1150., -1100., -1100., -1100., 0., 0., 0., 0., 0., 0.,]) * 5 # Initially guessed pumping regime.
# Extraction that compensates for the river srage change
# Set both to 1 to see what happens without extra pumping.
regime['Q'][3] *= 1.10
regime['Q'][4] *= 1.05
regime['dQ'] = np.diff(np.hstack((0, regime['Q'])))
ax = newfig(f'Water level at critical points', 'time', 'water level [m]')
for kp in ['mid', 'ctr', 'crit']:
xp, yp = pit[kp]['x'], pit[kp]['y']
dy = 0 - yp # distance from the river (river is at x=0 m)
s = np.zeros_like(tsim, dtype=float)
for dQ, change_date in zip(regime['dQ'], regime['dates']):
t = (tsim[tsim > change_date] - change_date) / np.timedelta64(1, 'D')
for kw in wells:
well = wells[kw]
dx_, dy_ = xp - well['x'], yp - well['y']
r2 = dx_ ** 2 + dy_ ** 2
u = r2 * S / (4 * kD * t)
s[tsim > change_date] += well['sign'] * dQ / (4 * np.pi * kD) * exp1(u)
for dstage, change_date in zip(dstages, change_dates):
t = (tsim[tsim > change_date] -change_date) / np.timedelta64(1, 'D')
u = dy * np.sqrt(S / (4 * kD * t))
s[tsim > change_date] += dstage * erfc(u)
ax.plot(tsim, s, label=f'at point pit[{kp}]=({x0:.0f},{y0:.0f})')
ax1 = ax.twinx()
ax1.set_ylabel('Extraction m3/d')
nwells = np.sum([wells[k]['type']=='wel' for k in wells])
ax1.step(regime['dates'], nwells * regime['Q'], 'r--', lw=2, where='post', label='Extraction')
ax1.legend(loc='lower right')
"""
Explanation: Impact of the month-long wave in the river stage
Q6: Compensate for the effect of the change of the river stage.
We should superimpose this change on the drawdown by the wells and adapt the well regime to compensate for this stage change.
We should generate an extra drawdown of 0.4 m at the end of month 4. For this the regime has to be adapated
End of explanation
"""
A, T = 1.0, 0.5 # Amplitude (m), cycle time (d)
omega = 2 * np.pi / T
a = np.sqrt(omega * S / (2 * kD))
show_times = np.arange(24, dtype=float) / 24
Y = np.linspace(0, 2 * L, 101) # keep sign postiive use negative Y only in the graph
ax =newfig("Effect of river tide on the adjacent groundwater", "distance from river[m]", "efect of tide")
env = A * np.exp(-a * Y)
for show_time in show_times:
s = env * np.sin(omega * show_time - a * Y )
ax.plot(Y, s, label=f'tide at t={show_time * 24:.0f} h')
ax.plot(Y, +env, 'k', lw=2, label='top envelope')
ax.plot(Y, -env, 'k', lw=2, label='bottom env.')
ax.legend(loc='upper right')
plt.show()
"""
Explanation: Impact of river tide
The impact is shown below. The penetration depth of the tide is so small compared to the distance between the river and the building pit, that no measures have to be taken.
Of course, in the case of a lower frequency tide, like two-monthly extra high tides, and storm tides, pentration depth will be larger and this compensation may become necessary.
The same is true for high river stages that persist over longer times than only one month and with much higher values. during wet periods.
End of explanation
"""
dx_, dy_ = xp - well['x'], yp - well['y']
r2 = dx_ ** 2 + dy_ ** 2
a = 5
xx = np.logspace(2, np.log10(a * L), 50)
Xr = np.hstack((-xx[::-1], 0, xx))[:, np.newaxis]
Yr = np.zeros_like(Xr)
qy = np.zeros((len(Yr), len(tsim)))
qx = np.zeros_like(qy)
for dQ, change_date in zip(regime['dQ'], regime['dates']):
t = (tsim[tsim > change_date] - change_date) / np.timedelta64(1, 'D')
for k in wells:
well = wells[k]
dx_, dy_ = Xr - well['x'], Yr - well['y']
r2 = dx_ ** 2 + dy_ ** 2
u = r2 * S / (4 * kD * t)
qx[:, tsim > change_date] += well['sign'] * dQ / (2 * np.pi) * np.exp(-u) * dx_ / r2
qy[:, tsim > change_date] += well['sign'] * dQ / (2 * np.pi) * np.exp(-u) * dy_ / r2
Qx = np.sum(0.5 * (qx[:-1,:] + qx[1:, :]) * np.diff(Xr, axis=0), axis=0)
Qy = np.sum(0.5 * (qy[:-1,:] + qx[1:, :]) * np.diff(Xr, axis=0), axis=0)
ax = newfig("Extraction from the rive", "time", "Total extraction m3/d")
ax.plot(tsim, Qx, label='Qx m3/d (must be zero)')
ax.plot(tsim, Qy, label='Qy m3/d (= total induced from river)')
nwells = np.sum([wells[k]['type']=='wel' for k in wells])
ax.step(regime['dates'], nwells * regime['Q'], where='post', label='Total extraction by wells')
ax.legend(loc='lower right')
"""
Explanation: How much water is extracted from the river?
$$ Q_r = Q e^{-u} $$
$$ q_r = \frac Q {2 \pi r} e^{-u} $$
$$ q_x = \frac Q {2 \pi} e^{-u} \frac{x - x_w}{r^2} $$
$$ q_y = \frac Q {2 \pi} e^{-u} \frac{y - y_w}{r^2}$$
End of explanation
"""
|
Kaggle/learntools | notebooks/python/raw/ex_6.ipynb | apache-2.0 | from learntools.core import binder; binder.bind(globals())
from learntools.python.ex6 import *
print('Setup complete.')
"""
Explanation: You are almost done with the course. Nice job!
We have a couple more interesting problems for you before you go.
As always, run the setup code below before working on the questions.
End of explanation
"""
a = ""
length = ____
q0.a.check()
"""
Explanation: Let's start with a string lightning round to warm up. What are the lengths of the strings below?
For each of the five strings below, predict what len() would return when passed that string. Use the variable length to record your answer, then run the cell to check whether you were right.
0a.
End of explanation
"""
b = "it's ok"
length = ____
q0.b.check()
"""
Explanation: 0b.
End of explanation
"""
c = 'it\'s ok'
length = ____
q0.c.check()
"""
Explanation: 0c.
End of explanation
"""
d = """hey"""
length = ____
q0.d.check()
"""
Explanation: 0d.
End of explanation
"""
e = '\n'
length = ____
q0.e.check()
"""
Explanation: 0e.
End of explanation
"""
def is_valid_zip(zip_code):
"""Returns whether the input string is a valid (5 digit) zip code
"""
pass
# Check your answer
q1.check()
#%%RM_IF(PROD)%%
def is_valid_zip(zip_code):
"""Returns whether the input string is a valid (5 digit) zip code
"""
return len(zip_code) == 5 and zip_code.isdigit()
q1.assert_check_passed()
#%%RM_IF(PROD)%%
def is_valid_zip(zip_code):
"""Returns whether the input string is a valid (5 digit) zip code
"""
return len(zip_code) == 5
q1.assert_check_failed()
#_COMMENT_IF(PROD)_
q1.hint()
#_COMMENT_IF(PROD)_
q1.solution()
"""
Explanation: 1.
There is a saying that "Data scientists spend 80% of their time cleaning data, and 20% of their time complaining about cleaning data." Let's see if you can write a function to help clean US zip code data. Given a string, it should return whether or not that string represents a valid zip code. For our purposes, a valid zip code is any string consisting of exactly 5 digits.
HINT: str has a method that will be useful here. Use help(str) to review a list of string methods.
End of explanation
"""
def word_search(doc_list, keyword):
"""
Takes a list of documents (each document is a string) and a keyword.
Returns list of the index values into the original list for all documents
containing the keyword.
Example:
doc_list = ["The Learn Python Challenge Casino.", "They bought a car", "Casinoville"]
>>> word_search(doc_list, 'casino')
>>> [0]
"""
pass
# Check your answer
q2.check()
#_COMMENT_IF(PROD)_
q2.hint()
#_COMMENT_IF(PROD)_
q2.solution()
"""
Explanation: 2.
A researcher has gathered thousands of news articles. But she wants to focus her attention on articles including a specific word. Complete the function below to help her filter her list of articles.
Your function should meet the following criteria:
Do not include documents where the keyword string shows up only as a part of a larger word. For example, if she were looking for the keyword “closed”, you would not include the string “enclosed.”
She does not want you to distinguish upper case from lower case letters. So the phrase “Closed the case.” would be included when the keyword is “closed”
Do not let periods or commas affect what is matched. “It is closed.” would be included when the keyword is “closed”. But you can assume there are no other types of punctuation.
End of explanation
"""
def multi_word_search(doc_list, keywords):
"""
Takes list of documents (each document is a string) and a list of keywords.
Returns a dictionary where each key is a keyword, and the value is a list of indices
(from doc_list) of the documents containing that keyword
>>> doc_list = ["The Learn Python Challenge Casino.", "They bought a car and a casino", "Casinoville"]
>>> keywords = ['casino', 'they']
>>> multi_word_search(doc_list, keywords)
{'casino': [0, 1], 'they': [1]}
"""
pass
# Check your answer
q3.check()
#_COMMENT_IF(PROD)_
q3.solution()
"""
Explanation: 3.
Now the researcher wants to supply multiple keywords to search for. Complete the function below to help her.
(You're encouraged to use the word_search function you just wrote when implementing this function. Reusing code in this way makes your programs more robust and readable - and it saves typing!)
End of explanation
"""
|
tpin3694/tpin3694.github.io | python/pandas_data_structures.ipynb | mit | import pandas as pd
"""
Explanation: Title: pandas Data Structures
Slug: pandas_data_structures
Summary: pandas Data Structures
Date: 2016-05-01 12:00
Category: Python
Tags: Data Wrangling
Authors: Chris Albon
Import modules
End of explanation
"""
floodingReports = pd.Series([5, 6, 2, 9, 12])
floodingReports
"""
Explanation: Series 101
Series are one-dimensional arrays (like R's vectors)
Create a series of the number of floodingReports
End of explanation
"""
floodingReports = pd.Series([5, 6, 2, 9, 12], index=['Cochise County', 'Pima County', 'Santa Cruz County', 'Maricopa County', 'Yuma County'])
floodingReports
"""
Explanation: Note that the first column of numbers (0 to 4) are the index.
Set county names to be the index of the floodingReports series
End of explanation
"""
floodingReports['Cochise County']
"""
Explanation: View the number of floodingReports in Cochise County
End of explanation
"""
floodingReports[floodingReports > 6]
"""
Explanation: View the counties with more than 6 flooding reports
End of explanation
"""
# Create a dictionary
fireReports_dict = {'Cochise County': 12, 'Pima County': 342, 'Santa Cruz County': 13, 'Maricopa County': 42, 'Yuma County' : 52}
# Convert the dictionary into a pd.Series, and view it
fireReports = pd.Series(fireReports_dict); fireReports
"""
Explanation: Create a pandas series from a dictionary
Note: when you do this, the dict's key's will become the series's index
End of explanation
"""
fireReports.index = ["Cochice", "Pima", "Santa Cruz", "Maricopa", "Yuma"]
fireReports
"""
Explanation: Change the index of a series to shorter names
End of explanation
"""
data = {'county': ['Cochice', 'Pima', 'Santa Cruz', 'Maricopa', 'Yuma'],
'year': [2012, 2012, 2013, 2014, 2014],
'reports': [4, 24, 31, 2, 3]}
df = pd.DataFrame(data)
df
"""
Explanation: DataFrame 101
DataFrames are like R's Dataframes
Create a dataframe from a dict of equal length lists or numpy arrays
End of explanation
"""
dfColumnOrdered = pd.DataFrame(data, columns=['county', 'year', 'reports'])
dfColumnOrdered
"""
Explanation: Set the order of the columns using the columns attribute
End of explanation
"""
dfColumnOrdered['newsCoverage'] = pd.Series([42.3, 92.1, 12.2, 39.3, 30.2])
dfColumnOrdered
"""
Explanation: Add a column
End of explanation
"""
del dfColumnOrdered['newsCoverage']
dfColumnOrdered
"""
Explanation: Delete a column
End of explanation
"""
dfColumnOrdered.T
"""
Explanation: Transpose the dataframe
End of explanation
"""
|
CalPolyPat/phys202-2015-work | assignments/project/Progress Report.ipynb | mit | import numpy as np
import matplotlib
from matplotlib import pyplot as plt
matplotlib.style.use('ggplot')
import IPython as ipynb
%matplotlib inline
"""
Explanation: An Exploration of Nueral Net Capabilities
End of explanation
"""
z = np.linspace(-10, 10, 100)
f=plt.figure(figsize=(15, 5))
plt.subplot(1, 2,1)
plt.plot(z, 1/(1+np.exp(-z)));
plt.xlabel("Input to Nueron")
plt.title("Sigmoid Response with Bias=0")
plt.ylabel("Sigmoid Response");
plt.subplot(1, 2,2)
plt.plot(z, 1/(1+np.exp(-z+5)));
plt.xlabel("Input to Nueron")
plt.title("Sigmoid Response with Bias=5")
plt.ylabel("Sigmoid Response");
"""
Explanation: Abstract
A nueral network is a computational analogy to the methods by which humans think. Their design builds upon the idea of a neuron either firing or not firing based on some stimuli and learn whether or not they made the right choice. To allow
for richer results with less complicated networks, boolean response is replaced with a continuous analog, the sigmoid
function. The network learns by taking our definition of how incorrect they are in the form of a so-called cost function and find the most effective way to reduce the function to a minimum, i.e. be the least incorrect. It is ideal to minimize the number of training sessions that must be used to get a maximum accuracy due to computational cost and time. In this
project, the minimum number of training sets to reach a sufficient accuracy will be explored for multiple standard cost functions. As well, a new cost function may be explored along with a method for generating cost functions. And finally,
given a sufficient amount of time, the network will be tested with nonconformant input, in this case, scanned and
partitioned handwritten digits.
Base Question
Does it work?
Does it work well?
The first step in building a neural net is simply understanding and building the base algorithms. There are three things that define a network:
Shape
The shape of a network merely describes how many neurons there are and where they are. There are typically the locations that neurons live in: The Input Layer, The Hidden Layer, and The Output Layer. The Hidden Layer can be composed of more than one layer, but by convention, it is referred to as one layer. The Input Layer is significant because it takes the inputs. It typically does not do any discrimination before passing it along, but there is nothing barring that from occurring. The Output Layer produces a result. In most cases, the result still requires some interpretation, but is in its final form as far as the network is concerned. Each of the layers can have as many neurons as are needed but it is favorable to reduce the number to the bare minimum for both computational reasons and for accuracy.
Weights
Weights live in between individual neurons and dictate how much the decision made by a neuron in the layer before it matters to the next neurons decision. A good analogy might be that Tom(a neuron) has two friends, Sally(a neurette?) and Joe(also a neuron). They are good friends so Tom likes to ask Sally and Joe's opinion about decisions he is about to make. However, Joe is a bit crazy, likes to go out and party, etc. so Tom trusts Sally's opinion a bit more than Joe's. If Tom quantified how much he trusted Sally or Joe, that quantification would be called a weight.
Biases
Biases are tied to each neuron and its decision making proccess. A bias in the boolean sense acts as a threshold at which point a true is returned. In the continuous generalization of the boolean proccess, the bias corresponds to the threshold at which point a value above 0.5 is returned. Back to our analogy with Tom and his friends, a bias might constitute how strongly each person feels about their opinion on a subject. So when Tim asks Sally and Joe about their opinion about someone else, call her Julie, Sally responds with a fairly nuetral response because she doesn't know Julie, so her bias is around 0. Joe, on the other hand, used to date Julie and they had a bad break up, so he responds quite negatively, and somewhat unintuitively, his bias is very high. (See the graph of the sigmoid function below with zero bias) In other words, he has a very high threshold for speaking positively about Julie.
End of explanation
"""
ipynb.display.Image("http://neuralnetworksanddeeplearning.com/images/tikz11.png")
"""
Explanation: So, how does it work?
There are three core algorithms behind every neural net: Feed Forward, Back Propagation/Error Computation, and Gradient Descent.
Feed Forward
The Feed Forward algorithm could be colloquially called the "Gimme an Answer" algorithm. It sends the inputs through the network and returns the outputs. We can break it down step by step and see what is really going on:
Inputs
Each input value is fed into the corresponding input nueron, that's it. In a more sophisticated network, some inputs could be rejected based on bias criterion, but for now we leave them alone.
Channels
Each input neuron is connected to every neuron in the first hidden layer through a channel, to see this visually, look at the diagram below. Each channel is given a weight that is multiplied by the value passed on by the input neuron and is then summed with all the channels feeding the same neuron and is passed into the hidden layer neuron. The channels can be thought of as pipes allowing water to flow from each input neuron to each hidden layer neuron. The weights in our network represent the diameter of these pipes(is it large or small). As well, pipes converge to a hidden layer neuron and dump all of their water into a basin representing the neuron.
Neurons
Once a value reaches a neuron that is not an input neuron, the value is passed through a sigmoid function similar to those above with the proper bias for that neuron. The sigmoid response is the value that gets passed on to the next layer of neurons.
Repeat
The Channels and Neurons steps are repeated through each layer until the final output is reached.
End of explanation
"""
ipynb.display.Image("http://blog.datumbox.com/wp-content/uploads/2013/10/gradient-descent.png")
"""
Explanation: Back Propagation/Error Computation
Back Propagation is one of the scary buzz words in the world of neural nets, it doesn't have to be so scary. I prefer to call it error computation to be more transparent because, in essence, that is what it does. Let's dig in!
Cost Function
The cost function is a major factor in how your network learns. It defines, numerically, how wrong your network is. The function itself is typically defined by some sort of difference of your networks output to the actual correct answer. Because it is a function of the output, it is also a function of every weight and bias in your network. This means that it could have potentially thousands of independant variables. In its simplest form, a cost function should have some quite definite properties: when the ouput is near the correct answer, the cost function should be near zero, a small change in any single weight or bias should result in a small change in the cost function, and the cost function must be non-negative everywhere.
Error Computation
Through a set of nifty equations which will not be shown here, once you have a cost function and take the gradient with respect to the output of said cost function, you are able to calculate a metric for the error of the output. Through some clever deductions based on the fact that a small change in any independent variable results in a small change in the cost function we can calculate that same metric for each independent variable. (That is the Back Propagation bit) You can then calculate, through further clever deductions, the partial derivative of the cost function with respect to each independent variable. The partial derivative of the cost function with respect to each variable will come in handy for when we do Gradient Descent.
Gradient Descent
Gradient Descent uses the fact that we want to minimize our cost function together with the idea of the gradient as the path of steepest descent.
Down the Mountain
The Gradient Descent uses the gradients we calculated in the Error Computation step and tells us how we should change our variables if we want to reach a minimum in the fastest way possible. The algorithm usess the fact that the gradient with respect to an independent variable represents the component of the vector pointing in the direction of most change in that variables dimension. Because even Euler couldn't imagine a thousand dimensional space, we draw some intuition from the familiar three dimensioanl case. Suppose that you are dropped at a random location on a mountain. Suppose further that you are blind.(or it is so foggy that you can't see anything) How do you find the fastest way to the bottom? Well, the only thing that you can do is sense the slope that seems to be the steepest and walk down it. But you are a mathemetician and have no grasp on estimating things, so you calculate the gradient with respect to your left-right direction and your front-back direction. You see that if you take a half step to the left and a quarter step forward you will move the furthest downwards. Wait! Why just one step? First of all, mountains are complicated surfaces and their slopes change from place to place so continuing to make the same steps may not take you the most downwards, or even downwards at all. Secondly, you are blind!(or it is really foggy) If you start running or jumping down the slope, you may overshoot a minimum and have to stop and turn around. In the actual gradient descent algorithm, the step size is represented by something called the learning rate. A step in the right direction is performed in the algorithm by reducing each individual variable by this learning constant multiplied by the gradient with respect to that particular variable. After doing this thousands of times, we find the local minimums of our cost funtion.
End of explanation
"""
|
kubernetes-client/python | examples/notebooks/create_deployment.ipynb | apache-2.0 | from kubernetes import client, config
"""
Explanation: How to create a Deployment
In this notebook, we show you how to create a Deployment with 3 ReplicaSets. These ReplicaSets are owned by the Deployment and are managed by the Deployment controller. We would also learn how to carry out RollingUpdate and RollBack to new and older versions of the deployment.
End of explanation
"""
config.load_kube_config()
apps_api = client.AppsV1Api()
"""
Explanation: Load config from default location
End of explanation
"""
deployment = client.V1Deployment()
"""
Explanation: Create Deployment object
End of explanation
"""
deployment.api_version = "apps/v1"
deployment.kind = "Deployment"
deployment.metadata = client.V1ObjectMeta(name="nginx-deployment")
"""
Explanation: Fill required Deployment fields (apiVersion, kind, and metadata)
End of explanation
"""
spec = client.V1DeploymentSpec()
spec.replicas = 3
"""
Explanation: A Deployment also needs a .spec section
End of explanation
"""
spec.template = client.V1PodTemplateSpec()
spec.template.metadata = client.V1ObjectMeta(labels={"app": "nginx"})
spec.template.spec = client.V1PodSpec()
"""
Explanation: Add Pod template in .spec.template section
End of explanation
"""
container = client.V1Container()
container.name="nginx"
container.image="nginx:1.7.9"
container. ports = [client.V1ContainerPort(container_port=80)]
spec.template.spec.containers = [container]
deployment.spec = spec
"""
Explanation: Pod template container description
End of explanation
"""
apps_api.create_namespaced_deployment(namespace="default", body=deployment)
"""
Explanation: Create Deployment
End of explanation
"""
deployment.spec.template.spec.containers[0].image = "nginx:1.9.1"
"""
Explanation: Update container image
End of explanation
"""
apps_api.replace_namespaced_deployment(name="nginx-deployment", namespace="default", body=deployment)
"""
Explanation: Apply update (RollingUpdate)
End of explanation
"""
apps_api.delete_namespaced_deployment(name="nginx-deployment", namespace="default", body=client.V1DeleteOptions(propagation_policy="Foreground", grace_period_seconds=5))
"""
Explanation: Delete Deployment
End of explanation
"""
|
cliburn/sta-663-2017 | scratch/Lecture09.ipynb | mit | %matplotlib inline
import seaborn as sns
sns.set_context('notebook', font_scale=1.5)
"""
Explanation: Machine Learning in Python
End of explanation
"""
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import RidgeClassifierCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
import pandas as pd
"""
Explanation: Illustration
An image classification example
Python-powered
opencv and scikit-image for feature extraction
keras for feature augmentation
scikit-learn for classification
flask for web application
plus some JavaScript for interactive web features
Types of learning
Unsupervised (clustering, density estimation)
Supervised (classification, regression)
Reinforcement (reward and punishment)
Objective of S/L
Predict outcome from features
y = outcome or label
X = vector of features
y = f(X, Θ) + error
Loss function = g(y, f(X, Θ))
Model evaluation
In-sample (training) and out-of-sample (test) errors
Cross validation
Holdout
K-fold
LOOCV
Note: Any step which uses label/outcome information must be included in cross-validation pipeline
S/L training pipeline
Raw data
Extract features from raw data
Normalize/scale features
Select features for use in model
Model selection/evaluation
S/L training process in scikit-learn
Consistent API for scikit-learn classes
fit
transform
predict
fit_transform for transformations
fit_predict for clustering
score for classification and regression
get_params
set_params
Feature extraction
Domain knowledge useful
Consider augmenting with external data sources
More specialized tools
From natural language
From images
From images/video
Image feature augmentation
From audio
Normalize/scale features
Necessary for methods based on measures of distance
Most commonly
Note: Must apply same scaling to training and test data
Feature selection
Note: Include feature selection in a pipeline
Model selection/evaluation
Example
This example is only meant to show the mechanics of using scikit-learn.
End of explanation
"""
iris = pd.read_csv('iris.csv')
iris.head()
sns.pairplot(iris, hue='Species')
pass
"""
Explanation: Get data
End of explanation
"""
X = iris.iloc[:, :4].values
y = iris.iloc[:, 4].astype('category').cat.codes.values
X[:3]
y[:3]
"""
Explanation: Feature extraction
Split labels and features as plain numpy arrays
End of explanation
"""
poly = PolynomialFeatures(2)
X_poly = poly.fit_transform(X)
X_poly[:3]
"""
Explanation: Generate polynomial (interaction) features
End of explanation
"""
scaler = StandardScaler()
X_poly_scaled = scaler.fit_transform(X_poly)
X_poly_scaled[:3]
"""
Explanation: Scale features to have zero mean and unit standard deviation
End of explanation
"""
selector = VarianceThreshold(threshold=0.1)
X_new = selector.fit_transform(X_poly_scaled)
X_poly_scaled.shape, X_new.shape
"""
Explanation: Select "useful" features
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X_new, y, random_state=1)
X_train[:3]
y_train[:3]
X_test[:3]
y_test[:3]
"""
Explanation: Split into training and test sets
End of explanation
"""
alphas = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10]
clf = RidgeClassifierCV(alphas=alphas, cv=5)
clf.fit(X_train, y_train)
"""
Explanation: Train an estimator
End of explanation
"""
y_pred = clf.predict(X_test)
print(classification_report(y_test, y_pred))
"""
Explanation: Evaluate estimator
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
pipe = Pipeline([
('polynomaial_features', PolynomialFeatures(2)),
('standard_scalar', StandardScaler()),
('feature_selection', VarianceThreshold(threshold=0.1)),
('classification', clf)
])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
print(classification_report(y_test, y_pred))
"""
Explanation: Putting it all together in a pipeline
End of explanation
"""
params = {'n_estimators': [5, 10, 25], 'max_depth': [1, 3, None]}
rf = RandomForestClassifier()
clf2 = GridSearchCV(rf, params, cv=5, n_jobs=-1)
pipe2 = Pipeline([
('polynomaial_features', PolynomialFeatures(2)),
('feature_selection', VarianceThreshold(threshold=0.1)),
('classification', clf2)
])
pipe2.fit(X_train, y_train)
y_pred2 = pipe2.predict(X_test)
print(classification_report(y_test, y_pred2))
"""
Explanation: Alternative pipeline
End of explanation
"""
classifier = pipe2.named_steps['classification']
classifier.best_params_
"""
Explanation: Getting detailed information from pipeline
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/mohc/cmip6/models/sandbox-1/ocnbgchem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'sandbox-1', 'ocnbgchem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocnbgchem
MIP Era: CMIP6
Institute: MOHC
Source ID: SANDBOX-1
Topic: Ocnbgchem
Sub-Topics: Tracers.
Properties: 65 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:15
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
4. Key Properties --> Transport Scheme
5. Key Properties --> Boundary Forcing
6. Key Properties --> Gas Exchange
7. Key Properties --> Carbon Chemistry
8. Tracers
9. Tracers --> Ecosystem
10. Tracers --> Ecosystem --> Phytoplankton
11. Tracers --> Ecosystem --> Zooplankton
12. Tracers --> Disolved Organic Matter
13. Tracers --> Particules
14. Tracers --> Dic Alkalinity
1. Key Properties
Ocean Biogeochemistry key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean biogeochemistry model code (PISCES 2.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
"""
Explanation: 1.4. Elemental Stoichiometry
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe elemental stoichiometry (fixed, variable, mix of the two)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Elemental Stoichiometry Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe which elements have fixed/variable stoichiometry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all prognostic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all diagnotic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Damping
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any tracer damping used (such as artificial correction or relaxation to climatology,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Time stepping method for passive tracers transport in ocean biogeochemistry
2.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for passive tracers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for passive tracers (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Time stepping framework for biology sources and sinks in ocean biogeochemistry
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for biology sources and sinks
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for biology sources and sinks (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Transport Scheme
Transport scheme in ocean biogeochemistry
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transport scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Transport scheme used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Use Different Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Decribe transport scheme if different than that of ocean model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Boundary Forcing
Properties of biogeochemistry boundary forcing
5.1. Atmospheric Deposition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how atmospheric deposition is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
"""
Explanation: 5.2. River Input
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how river input is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Sediments From Boundary Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Sediments From Explicit Model
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from explicit sediment model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Gas Exchange
*Properties of gas exchange in ocean biogeochemistry *
6.1. CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.2. CO2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe CO2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.3. O2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is O2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. O2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe O2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.5. DMS Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is DMS gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. DMS Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify DMS gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.7. N2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.8. N2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.9. N2O Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2O gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.10. N2O Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2O gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.11. CFC11 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC11 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.12. CFC11 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC11 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.13. CFC12 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC12 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.14. CFC12 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC12 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.15. SF6 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is SF6 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.16. SF6 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify SF6 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.17. 13CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 13CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.18. 13CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 13CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.19. 14CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 14CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.20. 14CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 14CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.21. Other Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any other gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Carbon Chemistry
Properties of carbon chemistry biogeochemistry
7.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how carbon chemistry is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.2. PH Scale
Is Required: FALSE Type: ENUM Cardinality: 0.1
If NOT OMIP protocol, describe pH scale.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Constants If Not OMIP
Is Required: FALSE Type: STRING Cardinality: 0.1
If NOT OMIP protocol, list carbon chemistry constants.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Tracers
Ocean biogeochemistry tracers
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of tracers in ocean biogeochemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Sulfur Cycle Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sulfur cycle modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Nutrients Present
Is Required: TRUE Type: ENUM Cardinality: 1.N
List nutrient species present in ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Nitrous Species If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous species.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.5. Nitrous Processes If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous processes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Tracers --> Ecosystem
Ecosystem properties in ocean biogeochemistry
9.1. Upper Trophic Levels Definition
Is Required: TRUE Type: STRING Cardinality: 1.1
Definition of upper trophic level (e.g. based on size) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Upper Trophic Levels Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Define how upper trophic level are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
"""
Explanation: 10. Tracers --> Ecosystem --> Phytoplankton
Phytoplankton properties in ocean biogeochemistry
10.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of phytoplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Pft
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton functional types (PFT) (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Tracers --> Ecosystem --> Zooplankton
Zooplankton properties in ocean biogeochemistry
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of zooplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Zooplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Tracers --> Disolved Organic Matter
Disolved organic matter properties in ocean biogeochemistry
12.1. Bacteria Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there bacteria representation ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Lability
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe treatment of lability in dissolved organic matter
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Tracers --> Particules
Particulate carbon properties in ocean biogeochemistry
13.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is particulate carbon represented in ocean biogeochemistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, type(s) of particulate matter taken into account
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
"""
Explanation: 13.3. Size If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.4. Size If Discrete
Is Required: FALSE Type: STRING Cardinality: 0.1
If prognostic and discrete size, describe which size classes are used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Sinking Speed If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, method for calculation of sinking speed of particules
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
"""
Explanation: 14. Tracers --> Dic Alkalinity
DIC and alkalinity properties in ocean biogeochemistry
14.1. Carbon Isotopes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which carbon isotopes are modelled (C13, C14)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.2. Abiotic Carbon
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is abiotic carbon modelled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
"""
Explanation: 14.3. Alkalinity
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is alkalinity modelled ?
End of explanation
"""
|
DistrictDataLabs/intro-to-nltk | NLTK.ipynb | mit | import nltk
nltk.download()
"""
Explanation: Introduction to NLP with NLTK
Natural Language Processing (NLP) is often taught at the academic level from the perspective of computational linguists. However, as data scientists, we have a richer view of the natural language world - unstructured data that by its very nature has latent information that is important to humans. NLP practioners have benefited from machine learning techniques to unlock meaning from large corpora, and in this class we’ll explore how to do that particularly with Python and with the Natural Language Toolkit (NLTK).
NLTK is an excellent library for machine-learning based NLP, written in Python by experts from both academia and industry. Python allows you to create rich data applications rapidly, iterating on hypotheses. The combination of Python + NLTK means that you can easily add language-aware data products to your larger analytical workflows and applications.
Quick Overview of NLTK
NLTK stands for the Natural Language Toolkit and is written by two eminent computational linguists, Steven Bird (Senior Research Associate of the LDC and professor at the University of Melbourne) and Ewan Klein (Professor of Linguistics at Edinburgh University). NTLK provides a combination of natural language corpora, lexical resources, and example grammars with language processing algorithms, methodologies and demonstrations for a very pythonic "batteries included" view of Natural Language Processing.
As such, NLTK is perfect for researh driven (hypothesis driven) workflows for agile data science. Its suite of libraries includes:
tokenization, stemming, and tagging
chunking and parsing
language modeling
classification and clustering
logical semantics
NLTK is a useful pedagogical resource for learning NLP with Python and serves as a starting place for producing production grade code that requires natural language analysis. It is also important to understand what NLTK is not:
Production ready out of the box
Lightweight
Generally applicable
Magic
NLTK provides a variety of tools that can be used to explore the linguistic domain but is not a lightweight dependency that can be easily included in other workflows, especially those that require unit and integration testing or other build processes. This stems from the fact that NLTK includes a lot of added code but also a rich and complete library of corpora that power the built-in algorithms.
The Good parts of NLTK
Preprocessing
segmentation
tokenization
PoS tagging
Word level processing
WordNet
Lemmatization
Stemming
NGrams
Utilities
Tree
FreqDist
ConditionalFreqDist
Streaming CorpusReaders
Classification
Maximum Entropy
Naive Bayes
Decision Tree
Chunking
Named Entity Recognition
Parsers Galore!
The Bad parts of NLTK
Syntactic Parsing
No included grammar (not a black box)
No Feature/Dependency Parsing
No included feature grammar
The sem package
Toy only (lambda-calculus & first order logic)
Lots of extra stuff (heavyweight dependency)
papers, chat programs, alignments, etc.
Knowing the good and the bad parts will help you explore NLTK further - looking into the source code to extract the material you need, then moving that code to production. We will explore NLTK in more detail in the rest of this notebook.
Installing NLTK
This notebook has a few dependencies, most of which can be installed via the python package manger - pip.
Python 2.7 or later (anaconda is ok)
NLTK
The NLTK corpora
The BeautifulSoup library
The gensim libary
Once you have Python and pip installed you can install NLTK as follows:
~$ pip install nltk
~$ pip install matplotlib
~$ pip install beautifulsoup4
~$ pip install gensim
Note that these will also install Numpy and Scipy if they aren't already installed.
To download the corpora, open a python interperter:
End of explanation
"""
moby = nltk.text.Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt'))
"""
Explanation: This will open up a window with which you can download the various corpora and models to a specified location. For now, go ahead and download it all as we will be exploring as much of NLTK as we can. Also take note of the download_directory - you're going to want to know where that is so you can get a detailed look at the corpora that's included. I usually export an enviornment variable to track this:
~$ export NLTK_DATA=/path/to/nltk_data
Take a moment to explore what is in this directory
Working with Example Corpora
NLTK ships with a variety of corpora, let's use a few of them to do some work. Get access to the text from Moby Dick as follows:
End of explanation
"""
moby.concordance("monstrous", 55, lines=10)
"""
Explanation: The nltk.text.Text class is a wrapper around a sequence of simple (string) tokens - intended only for the initial exploration of text usually via the Python REPL. It has the following methods:
common_contexts
concordance
collocations
count
plot
findall
index
You shouldn't use this class in production level systems, but it is useful to explore (small) snippets of text in a meaningful fashion.
The corcordance function performs a search for the given token and then also provides the surrounding context:
End of explanation
"""
print moby.similar("ahab")
austen = nltk.text.Text(nltk.corpus.gutenberg.words('austen-sense.txt'))
print
print austen.similar("monstrous")
"""
Explanation: Given some context surrounding a word, we can discover similar words, e.g. words that that occur frequently in the same context and with a similar distribution: Distributional similarity:
End of explanation
"""
moby.common_contexts(["ahab", "starbuck"])
"""
Explanation: As you can see, this takes a bit of time to build the index in memory, one of the reasons it's not suggested to use this class in production code. Now that we can do searching and similarity, find the common contexts of a set of words:
End of explanation
"""
inaugural = nltk.text.Text(nltk.corpus.inaugural.words())
inaugural.dispersion_plot(["citizens", "democracy", "freedom", "duties", "America"])
"""
Explanation: your turn, go ahead and explore similar words and contexts - what does the common context mean?
NLTK also uses matplotlib and pylab to display graphs and charts that can show dispersions and frequency. This is especially interesting for the corpus of innagural addresses given by U.S. presidents.
End of explanation
"""
# Lists the various corpora and CorpusReader classes in the nltk.corpus module
for name in dir(nltk.corpus):
if name.islower() and not name.startswith('_'): print name
# For a specific corpus, list the fileids that are available:
print nltk.corpus.shakespeare.fileids()
print nltk.corpus.gutenberg.fileids()
print nltk.corpus.stopwords.fileids()
nltk.corpus.stopwords.words('english')
import string
print string.punctuation
"""
Explanation: To explore much of the built in corpus, use the following methods:
End of explanation
"""
corpus = nltk.corpus.brown
print corpus.paras()
print corpus.sents()
print corpus.words()
print corpus.raw()[:200] # Be careful!
"""
Explanation: These corpora export several vital methods:
paras (iterate through each paragraph)
sents (iterate through each sentence)
words (iterate through each word)
raw (get access to the raw text)
End of explanation
"""
reuters = nltk.corpus.reuters # Corpus of news articles
counts = nltk.FreqDist(reuters.words())
vocab = len(counts.keys())
words = sum(counts.values())
lexdiv = float(words) / float(vocab)
print "Corpus has %i types and %i tokens for a lexical diversity of %0.3f" % (vocab, words, lexdiv)
counts.B()
print counts.most_common(40) # The n most common tokens in the corpus
print counts.max() # The most frequent token in the corpus
print counts.hapaxes()[0:10] # A list of all hapax legomena
counts.freq('stipulate') * 100 # percentage of the corpus for this token
counts.plot(200, cumulative=False)
from itertools import chain
brown = nltk.corpus.brown
categories = brown.categories()
counts = nltk.ConditionalFreqDist(chain(*[[(cat, word) for word in brown.words(categories=cat)] for cat in categories]))
for category, dist in counts.items():
vocab = len(dist.keys())
tokens = sum(dist.values())
lexdiv = float(tokens) / float(vocab)
print "%s: %i types with %i tokens and lexical diveristy of %0.3f" % (category, vocab, tokens, lexdiv)
"""
Explanation: Your turn! Explore some of the text in the available corpora
Frequency Analyses
In statistical machine learning approaches to NLP, the very first thing we need to do is count things - especially the unigrams that appear in the text and their relationships to each other. NLTK provides two very excellent classes to enable these frequency analyses:
FreqDist
ConditionalFreqDist
And these two classes serve as the foundation for most of the probability and statistical analyses that we will conduct.
First we will compute the following:
The count of words
The vocabulary (unique words)
The lexical diversity (the ratio of word count to vocabulary)
End of explanation
"""
for ngram in nltk.ngrams(["The", "bear", "walked", "in", "the", "woods", "at", "midnight"], 5):
print ngram
"""
Explanation: Your turn: compute the conditional frequency distribution of bigrams in a corpus
Hint:
End of explanation
"""
text = u"Medical personnel returning to New York and New Jersey from the Ebola-riddled countries in West Africa will be automatically quarantined if they had direct contact with an infected person, officials announced Friday. New York Gov. Andrew Cuomo (D) and New Jersey Gov. Chris Christie (R) announced the decision at a joint news conference Friday at 7 World Trade Center. “We have to do more,” Cuomo said. “It’s too serious of a situation to leave it to the honor system of compliance.” They said that public-health officials at John F. Kennedy and Newark Liberty international airports, where enhanced screening for Ebola is taking place, would make the determination on who would be quarantined. Anyone who had direct contact with an Ebola patient in Liberia, Sierra Leone or Guinea will be quarantined. In addition, anyone who traveled there but had no such contact would be actively monitored and possibly quarantined, authorities said. This news came a day after a doctor who had treated Ebola patients in Guinea was diagnosed in Manhattan, becoming the fourth person diagnosed with the virus in the United States and the first outside of Dallas. And the decision came not long after a health-care worker who had treated Ebola patients arrived at Newark, one of five airports where people traveling from West Africa to the United States are encountering the stricter screening rules."
for sent in nltk.sent_tokenize(text):
print sent
print
for sent in nltk.sent_tokenize(text):
print list(nltk.wordpunct_tokenize(sent))
print
for sent in nltk.sent_tokenize(text):
print list(nltk.pos_tag(nltk.word_tokenize(sent)))
print
"""
Explanation: Preprocessing Text
NLTK is great at the preprocessing of Raw text - it provides the following tools for dividing text into it's constituent parts:
sent_tokenize: a Punkt sentence tokenizer:
This tokenizer divides a text into a list of sentences, by using an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences. It must be trained on a large collection of plaintext in the target language before it can be used.
However, Punkt is designed to learn parameters (a list of abbreviations, etc.) unsupervised from a corpus similar to the target domain. The pre-packaged models may therefore be unsuitable: use PunktSentenceTokenizer(text) to learn parameters from the given text.
word_tokenize: a Treebank tokenizer
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. This is the method that is invoked by word_tokenize(). It assumes that the text has already been segmented into sentences, e.g. using sent_tokenize().
pos_tag: a maximum entropy tagger trained on the Penn Treebank
There are several other taggers including (notably) the BrillTagger as well as the BrillTrainer to train your own tagger or tagset.
End of explanation
"""
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem.porter import PorterStemmer
text = list(nltk.word_tokenize("The women running in the fog passed bunnies working as computer scientists."))
snowball = SnowballStemmer('english')
lancaster = LancasterStemmer()
porter = PorterStemmer()
for stemmer in (snowball, lancaster, porter):
stemmed_text = [stemmer.stem(t) for t in text]
print " ".join(stemmed_text)
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
lemmas = [lemmatizer.lemmatize(t) for t in text]
print " ".join(lemmas)
"""
Explanation: All of these taggers work pretty well - but you can (and should train them on your own corpora).
Stemming and Lemmatization
We have an immense number of word forms as you can see from our various counts in the FreqDist above - it is helpful for many applications to normalize these word forms (especially applications like search) into some canonical word for further exploration. In English (and many other languages) - mophological context indicate gender, tense, quantity, etc. but these sublties might not be necessary:
Stemming = chop off affixes to get the root stem of the word:
running --> run
flowers --> flower
geese --> geese
Lemmatization = look up word form in a lexicon to get canonical lemma
women --> woman
foxes --> fox
sheep --> sheep
There are several stemmers available:
- Lancaster (English, newer and aggressive)
- Porter (English, original stemmer)
- Snowball (Many langauges, newest)
The Lemmatizer uses the WordNet lexicon
End of explanation
"""
import string
## Module constants
lemmatizer = WordNetLemmatizer()
stopwords = set(nltk.corpus.stopwords.words('english'))
punctuation = string.punctuation
def normalize(text):
for token in nltk.word_tokenize(text):
token = token.lower()
token = lemmatizer.lemmatize(token)
if token not in stopwords and token not in punctuation:
yield token
print list(normalize("The eagle flies at midnight."))
"""
Explanation: Note that the lemmatizer has to load the WordNet corpus which takes a bit.
Typical normalization of text for use as features in machine learning models looks something like this:
End of explanation
"""
print nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize("John Smith is from the United States of America and works at Microsoft Research Labs")))
"""
Explanation: Named Entity Recognition
NLTK has an excellent MaxEnt backed Named Entity Recognizer that is trained on the Penn Treebank. You can also retrain the chunker if you'd like - the code is very readable to extend it with a Gazette or otherwise.
End of explanation
"""
import os
from nltk.tag import StanfordNERTagger
# change the paths below to point to wherever you unzipped the Stanford NER download file
stanford_root = '/Users/benjamin/Development/stanford-ner-2014-01-04'
stanford_data = os.path.join(stanford_root, 'classifiers/english.all.3class.distsim.crf.ser.gz')
stanford_jar = os.path.join(stanford_root, 'stanford-ner-2014-01-04.jar')
st = StanfordNERTagger(stanford_data, stanford_jar, 'utf-8')
for i in st.tag("John Bengfort is from the United States of America and works at Microsoft Research Labs".split()):
print '[' + i[1] + '] ' + i[0]
"""
Explanation: You can also wrap the Stanford NER system, which many of you are also probably used to using.
End of explanation
"""
|
JarnoRFB/qtpyvis | notebooks/keras/inference.ipynb | mit | model = keras.models.load_model('example_keras_mnist_model.h5')
model.summary()
"""
Explanation: Inference in Keras is rather simple. One just calls the predict method of the loaded model.
End of explanation
"""
dataset = mnist.load_data()
train_data = dataset[0][0] / 255
train_data = train_data[..., np.newaxis].astype('float32')
train_labels = np_utils.to_categorical(dataset[0][1]).astype('float32')
test_data = dataset[1][0] / 255
test_data = test_data[..., np.newaxis].astype('float32')
test_labels = np_utils.to_categorical(dataset[1][1]).astype('float32')
test_data.shape
for i in range(5):
plt.imshow(test_data[i, ..., 0])
plt.show()
"""
Explanation: Loading the dataset and looking at the first five samples of the test data.
End of explanation
"""
softmax_predictions = model.predict(test_data[:5])
softmax_predictions
predictions = np.argmax(softmax_predictions, axis=-1)
predictions
predictions == np.argmax(test_labels[:5], axis=1)
"""
Explanation: Doing the inference
End of explanation
"""
|
ericmjl/Network-Analysis-Made-Simple | archive/3-hubs-and-paths-instructor.ipynb | mit | # Load the sociopatterns network data.
G = cf.load_sociopatterns_network()
# How many nodes and edges are present?
len(G.nodes()), len(G.edges())
"""
Explanation: Load Data
We will load the sociopatterns network data for this notebook. From the Konect website:
End of explanation
"""
# Let's find out the number of neighbors that individual #7 has.
# list(G.neighbors(7))
"""
Explanation: Hubs: How do we evaluate the importance of some individuals in a network?
Within a social network, there will be certain individuals which perform certain important functions. For example, there may be hyper-connected individuals who are connected to many, many more people. They would be of use in the spreading of information. Alternatively, if this were a disease contact network, identifying them would be useful in stopping the spread of diseases. How would one identify these people?
Approach 1: Neighbors
One way we could compute this is to find out the number of people an individual is conencted to. NetworkX let's us do this by giving us a G.neighbors(node) function.
End of explanation
"""
# nx.degree_centrality(G)
# Uncomment the next line to show a truncated version.
list(nx.degree_centrality(G).items())[0:5]
"""
Explanation: API Note: As of NetworkX 2.0, G.neighbors(node) now returns a dict_keyiterator, which means we have to cast them as a list first in order to compute its length.
Exercise
Can you create a ranked list of the importance of each individual, based on the number of neighbors they have? (3 min.)
Hint: One suggested output would be a list of tuples, where the first element in each tuple is the node ID (an integer number), and the second element is the number of neighbors that it has.
Hint: Python's sorted(iterable, key=lambda x:...., reverse=True) function may be of help here.
An alternative:
python
sorted([(n, G.neighbors(n)) for n in G.nodes()],
key=lambda x: len(x[1]), reverse=True)[0:5]
An alternative from @dgerlanc using generator expressions to save on memory materialization cost & time:
python
gen = ((len(list(G.neighbors(x))), x) for x in G.nodes())
sorted(gen, reverse=True)
This answer was raised on GitHub.
Approach 2: Degree Centrality
The number of other nodes that one node is connected to is a measure of its centrality. NetworkX implements a degree centrality, which is defined as the number of neighbors that a node has normalized to the number of individuals it could be connected to in the entire graph. This is accessed by using nx.degree_centrality(G)
End of explanation
"""
# Possible Answers:
fig = plt.figure(0)
# Get a list of degree centrality scores for all of the
# nodes in the graph
degree_centralities = list(
nx.degree_centrality(G).values())
x, y = ecdf(degree_centralities)
# Plot the histogram of degree centralities.
plt.scatter(x, y)
# Set the plot title.
plt.title('Degree Centralities')
fig = plt.figure(1)
neighbors = [len(list(G.neighbors(node))) for node in G.nodes()]
x, y = ecdf(neighbors)
plt.scatter(x, y)
plt.title('Number of Neighbors')
fig = plt.figure(2)
plt.scatter(degree_centralities, neighbors, alpha=0.1)
plt.xlabel('Degree Centralities')
plt.ylabel('Number of Neighbors')
"""
Explanation: If you inspect the dictionary closely, you will find that node 51 is the one that has the highest degree centrality, just as we had measured by counting the number of neighbors.
There are other measures of centrality, namely betweenness centrality, flow centrality and load centrality. You can take a look at their definitions on the NetworkX API docs and their cited references. You can also define your own measures if those don't fit your needs, but that is an advanced topic that won't be dealt with here.
The NetworkX API docs that document the centrality measures are here: http://networkx.readthedocs.io/en/networkx-1.11/reference/algorithms.centrality.html?highlight=centrality#module-networkx.algorithms.centrality
Exercises
The following exercises are designed to get you familiar with the concept of "distribution of metrics" on a graph.
Can you create an ECDF of the distribution of degree centralities?
Can you create an ECDF of the distribution of number of neighbors?
Can you create a scatterplot of the degree centralities against number of neighbors?
If I have n nodes, then how many possible edges are there in total, assuming self-edges are allowed? What if self-edges are not allowed?
Exercise Time: 8 minutes.
Here is what an ECDF is (https://en.wikipedia.org/wiki/Empirical_distribution_function).
Hint: You may want to use:
ecdf(list_of_values)
to get the empirical CDF x- and y-values for plotting, and
plt.scatter(x_values, y_values)
Hint: You can access the dictionary .keys() and .values() and cast them as a list.
If you know the Matplotlib API, feel free to get fancy :).
End of explanation
"""
c = CircosPlot(G, node_order='order', node_color='order')
c.draw()
plt.savefig('images/sociopatterns.png', dpi=300)
"""
Explanation: Exercise
Before we move on to paths in a network, see if you can use the Circos plot to visualize the network. Order and color the nodes according to the order keyword. (2 min.)
The CircosPlot API needs documentation written; for now, I am providing the following "on-the-spot" docs for you.
To instantiate and draw a CircosPlot:
python
c = CircosPlot(G, node_order='node_key', node_color='node_key')
c.draw()
plt.show() # or plt.savefig(...)
Notes:
'node_key' is a key in the node metadata dictionary that the CircosPlot constructor uses for determining the colour, grouping, and ordering of the nodes.
In the following exercise, you may want to use order, which is already encoded on each node in the graph.
End of explanation
"""
# Test your answer below
def test_path_exists():
assert path_exists(18, 10, G)
assert path_exists(22, 51, G)
test_path_exists()
def test_path_does_not_exist(G):
g = G.copy() # so that we do not mutate original graph.
g.add_node(100000)
assert not path_exists(18, 100000, g)
test_path_does_not_exist(G)
"""
Explanation: What can you deduce about the structure of the network, based on this visualization?
Nodes are sorted by ID. Nodes are more connected to proximal rather than distal nodes. The data are based on people streaming through an enclosed space, so it makes sense that people are mostly connected to others proximal in order, but occasionally some oddballs stick around.
Paths in a Network
Graph traversal is akin to walking along the graph, node by node, restricted by the edges that connect the nodes. Graph traversal is particularly useful for understanding the local structure (e.g. connectivity, retrieving the exact relationships) of certain portions of the graph and for finding paths that connect two nodes in the network.
Using the synthetic social network, we will figure out how to answer the following questions:
How long will it take for a message to spread through this group of friends? (making some assumptions, of course)
How do we find the shortest path to get from individual A to individual B?
Shortest Path
Let's say we wanted to find the shortest path between two nodes. How would we approach this? One approach is what one would call a breadth-first search (http://en.wikipedia.org/wiki/Breadth-first_search). While not necessarily the fastest, it is the easiest to conceptualize.
The approach is essentially as such:
Begin with a queue of the starting node.
Add the neighbors of that node to the queue.
If destination node is present in the queue, end.
If destination node is not present, proceed.
For each node in the queue:
Remove node from the queue.
Add neighbors of the node to the queue. Check if destination node is present or not.
If destination node is present, end. <!--Credit: @cavaunpeu for finding bug in pseudocode.-->
If destination node is not present, continue.
Exercise
Try implementing this algorithm in a function called path_exists(node1, node2, G). (15 min.)
The function should take in two nodes, node1 and node2, and the graph G that they belong to, and return a Boolean that indicates whether a path exists between those two nodes or not. For convenience, also print out whether a path exists or not between the two nodes.
End of explanation
"""
nx.has_path(G, 400, 1)
"""
Explanation: If you write an algorithm that runs breadth-first, the recursion pattern is likely to follow what we have done above. If you do a depth-first search (i.e. DFS), the recursion pattern is likely to look a bit different. Take it as a challenge exercise to figure out how a DFS looks like.
Meanwhile... thankfully, NetworkX has a function for us to use, titled has_path, so we don't have to implement this on our own. :-) Check it out here.
End of explanation
"""
nx.shortest_path(G, 4, 400)
"""
Explanation: NetworkX also has other shortest path algorithms implemented.
We can build upon these to build our own graph query functions. Let's see if we can trace the shortest path from one node to another.
nx.shortest_path(G, source, target) gives us a list of nodes that exist within one of the shortest paths between the two nodes. (Not all paths are guaranteed to be found.)
End of explanation
"""
# Possible Answer:
def extract_path_edges(G, source, target):
# Check to make sure that a path does exists between source and target.
if nx.has_path(G, source, target):
nodes = nx.shortest_path(G, source, target)
newG = G.subgraph(nodes)
return newG
else:
raise Exception('Path does not exist between nodes {0} and {1}.'.format(source, target))
newG = extract_path_edges(G, 4, 400)
nx.draw(newG, with_labels=True)
"""
Explanation: Incidentally, the node list is in order as well.
Exercise
Write a function that extracts the edges in the shortest path between two nodes and puts them into a new graph, and draws it to the screen. It should also return an error if there is no path between the two nodes. (5 min.)
Hint: You may want to use G.subgraph(iterable_of_nodes) to extract just the nodes and edges of interest from the graph G. You might want to use the following lines of code somewhere:
newG = G.subgraph(nodes_of_interest)
nx.draw(newG)
newG will be comprised of the nodes of interest and the edges that connect them.
End of explanation
"""
# Possible Answer
def extract_neighbor_edges(G, node):
neighbors = list(G.neighbors(node))
newG = nx.Graph()
for n1, n2 in G.edges():
if (n1 == node and n2 in neighbors) or (n1 in neighbors and n2 == node):
newG.add_edge(n1, n2)
return newG
fig = plt.figure(0)
newG = extract_neighbor_edges(G, 23)
nx.draw(newG, with_labels=True)
def extract_neighbor_edges2(G, node):
neighbors = G.neighbors(node)
newG = nx.Graph()
for neighbor in neighbors:
if (node, neighbor) in G.edges() or (neighbor, node) in G.edges():
newG.add_edge(node, neighbor)
return newG
fig = plt.figure(1)
newG = extract_neighbor_edges2(G, 19)
nx.draw(newG, with_labels=True)
"""
Explanation: Challenge Exercise (at home)
These exercises below are designed to let you become more familiar with manipulating and visualizing subsets of a graph's nodes.
Write a function that extracts only node, its neighbors, and the edges between that node and its neighbors as a new graph. Then, draw the new graph to screen.
End of explanation
"""
# Possible answer to Question 1:
# All we need here is the length of the path.
def compute_transmission_time(G, source, target):
"""
Fill in code below.
"""
length = nx.shortest_path_length(G, source, target)
time = sum(range(1, length+1))
return time
compute_transmission_time(G, 14, 4)
# Possible answer to Question 2:
# We need to know the length of every single shortest path between every pair of nodes.
# If we don't put a source and target into the nx.shortest_path_length(G) function call, then
# we get a dictionary of dictionaries, where all source-->target-->lengths are shown.
lengths = []
times = []
for source, sink_length in nx.shortest_path_length(G):
for sink, length in sink_length.items():
times.append(sum(range(1, length+1)))
lengths.append(length)
plt.figure(0)
plt.bar(list(Counter(lengths).keys()), list(Counter(lengths).values()))
plt.figure(1)
plt.bar(list(Counter(times).keys()), list(Counter(times).values()))
"""
Explanation: Challenge Exercises (at home)
Let's try some other problems that build on the NetworkX API. Refer to the following for the relevant functions:
http://networkx.readthedocs.io/en/networkx-1.11/reference/algorithms.shortest_paths.html
If we want a message to go from one person to another person, and we assume that the message takes 1 day for the initial step and 1 additional day per step in the transmission chain (i.e. the first step takes 1 day, the second step takes 2 days etc.), how long will the message take to spread from any two given individuals? Write a function to compute this.
What is the distribution of message spread times from person to person? What about chain lengths?
End of explanation
"""
btws = nx.betweenness_centrality(G, normalized=False)
plt.bar(list(btws.keys()), list(btws.values()))
"""
Explanation: Hubs Revisited
If a message has to be passed through the network in the shortest time possible, there may be "bottleneck" nodes through which information must always flow through. Such a node has a high betweenness centrality. This is implemented as one of NetworkX's centrality algorithms. Check out the Wikipedia page for a further description.
http://en.wikipedia.org/wiki/Betweenness_centrality
End of explanation
"""
dc = pd.Series(nx.degree_centrality(G))
bc = pd.Series(nx.betweenness_centrality(G))
df = pd.DataFrame({'dc': dc, 'bc': bc})
df.plot(kind='scatter', x='dc', y='bc')
# Possible answer:
deg_centrality = nx.degree_centrality(G)
btw_centrality = nx.betweenness_centrality(G)
deg_cent_sorted = [i[1] for i in sorted(zip(deg_centrality.keys(), deg_centrality.values()))]
btw_cent_sorted = [i[1] for i in sorted(zip(btw_centrality.keys(), btw_centrality.values()))]
plt.scatter(deg_cent_sorted, btw_cent_sorted)
plt.xlabel('degree')
plt.ylabel('betweeness')
plt.title('centrality scatterplot')
"""
Explanation: Exercise
Plot betweeness centrality against degree centrality for the network data. (5 min.)
End of explanation
"""
nx.draw(nx.barbell_graph(5, 1))
"""
Explanation: Think about it...
From the scatter plot, we can see that the dots don't all fall on the same line. Degree centrality and betweenness centrality don't necessarily correlate. Can you think of scenarios where this is true?
What would be the degree centrality and betweenness centrality of the middle connecting node in the barbell graph below?
End of explanation
"""
|
tensorflow/docs-l10n | site/en-snapshot/addons/tutorials/image_ops.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
"""
!pip install -U tensorflow-addons
import tensorflow as tf
import numpy as np
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
"""
Explanation: TensorFlow Addons Image: Operations
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/addons/tutorials/image_ops"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/addons/blob/master/docs/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/addons/blob/master/docs/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/addons/docs/tutorials/image_ops.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Overview
This notebook will demonstrate how to use the some image operations in TensorFlow Addons.
Here is the list of image operations you'll be covering in this example:
tfa.image.mean_filter2d
tfa.image.rotate
tfa.image.transform
tfa.image.random_hsv_in_yiq
tfa.image.adjust_hsv_in_yiq
tfa.image.dense_image_warp
tfa.image.euclidean_dist_transform
Setup
End of explanation
"""
img_path = tf.keras.utils.get_file('tensorflow.png','https://tensorflow.org/images/tf_logo.png')
"""
Explanation: Prepare and Inspect Images
Download the images
End of explanation
"""
img_raw = tf.io.read_file(img_path)
img = tf.io.decode_image(img_raw)
img = tf.image.convert_image_dtype(img, tf.float32)
img = tf.image.resize(img, [500,500])
plt.title("TensorFlow Logo with shape {}".format(img.shape))
_ = plt.imshow(img)
"""
Explanation: Inspect the images
TensorFlow Icon
End of explanation
"""
bw_img = 1.0 - tf.image.rgb_to_grayscale(img)
plt.title("Mask image with shape {}".format(bw_img.shape))
_ = plt.imshow(bw_img[...,0], cmap='gray')
"""
Explanation: Make a black and white version
End of explanation
"""
mean = tfa.image.mean_filter2d(img, filter_shape=11)
_ = plt.imshow(mean)
"""
Explanation: Play with tfa.image
Mean filtering
Mean filtering is a filtering technique, which is often used to remove noise from an image or signal. The idea is to run through the image pixel by pixel and replacing it with the average values of neighboring pixels.
End of explanation
"""
rotate = tfa.image.rotate(img, tf.constant(np.pi/8))
_ = plt.imshow(rotate)
"""
Explanation: Rotate
This operation rotates the given image by the angle (in radians) input by the user.
End of explanation
"""
transform = tfa.image.transform(img, [1.0, 1.0, -250, 0.0, 1.0, 0.0, 0.0, 0.0])
_ = plt.imshow(transform)
"""
Explanation: Transform
This operation transforms the given image on the basis of the transform vector given by the user.
End of explanation
"""
delta = 0.5
lower_saturation = 0.1
upper_saturation = 0.9
lower_value = 0.2
upper_value = 0.8
rand_hsvinyiq = tfa.image.random_hsv_in_yiq(img, delta, lower_saturation, upper_saturation, lower_value, upper_value)
_ = plt.imshow(rand_hsvinyiq)
"""
Explanation: Random HSV in YIQ
This operation changes color scale of a given RGB image to YIQ but here delta hue and saturation values are picked randomly from the given range.
End of explanation
"""
delta = 0.5
saturation = 0.3
value = 0.6
adj_hsvinyiq = tfa.image.adjust_hsv_in_yiq(img, delta, saturation, value)
_ = plt.imshow(adj_hsvinyiq)
"""
Explanation: Adjust HSV in YIQ
This operation changes color scale of a given RGB image to YIQ but here instead of choosing randomly, delta hue and saturation values are inputs form the user.
End of explanation
"""
input_img = tf.image.convert_image_dtype(tf.expand_dims(img, 0), tf.dtypes.float32)
flow_shape = [1, input_img.shape[1], input_img.shape[2], 2]
init_flows = np.float32(np.random.normal(size=flow_shape) * 2.0)
dense_img_warp = tfa.image.dense_image_warp(input_img, init_flows)
dense_img_warp = tf.squeeze(dense_img_warp, 0)
_ = plt.imshow(dense_img_warp)
"""
Explanation: Dense Image Warp
This operation is for non-linear warp of any image specified by the flow field of the offset vector (here used random values for example).
End of explanation
"""
gray = tf.image.convert_image_dtype(bw_img,tf.uint8)
# The op expects a batch of images, so add a batch dimension
gray = tf.expand_dims(gray, 0)
eucid = tfa.image.euclidean_dist_transform(gray)
eucid = tf.squeeze(eucid, (0, -1))
_ = plt.imshow(eucid, cmap='gray')
"""
Explanation: Euclidian Distance Transform
This operation updates the pixel value with the euclidian distance from the foreground pixel to the background one.
* Note : It takes only binary image and results in transformed image. If a different image is given it results in a image with single value
End of explanation
"""
|
tclaudioe/Scientific-Computing | SC2/U2_QuadWorldAll.ipynb | bsd-3-clause | import numpy as np
from matplotlib import pyplot as plt
import math
import time
%matplotlib inline
from ipywidgets import interact
import inspect
"""
Explanation: <center>
<h1> ILI286 - Computación Científica II </h1>
<h2> Integración Numérica </h2>
<h2> <a href="#acknowledgements"> [S]cientific [C]omputing [T]eam </a> </h2>
<h2> Version: 1.23</h2>
</center>
Tabla de Contenidos
Introducción
Configuraciones
Sumas de Riemann
Métodos de Newton-Cotes
Regla del Trapecio
Regla de Simpson
Midpoint
Cuadratura Gaussiana
Análisis de Convergencia
Análisis de Tiempo
Acknowledgements
<div id='intro' />
Introducción
En este notebook estudiaremos métodos numéricos para integrar numéricamente funciones de distinto tipo. Las motivaciones para la utilización de estos métodos son varias, algunas de ellas:
* El cálculo teórico de la antiderivada de una función es un proceso tedioso, incluso para librerías de álgebra simbólica como SymPy.
* Peor que lo anterior, existen funciones que no tienen integral elemental, esto es, aquellas cuya antiderivada no puede ser expresada como una expresión de funciones elementales. Un ejemplo clásico es:
$$ \int e^{-x^2}dx $$
* Como veremos, existen métodos numéricos sumamente precisos, para los cuales el error que se comete debido a las aproximaciones, converge rápidamente a $0$ a medida que se aumenta la cantidad de puntos en la malla utilizada.
Partiendo de la noción geométrica de la integral definida, como aquella cantidad que expresa el área bajo la curva de una función, nacen los distintos métodos que estudiaremos a continuación.
<div id='config' />
Configuraciones
Librerías utilizadas en el notebook
End of explanation
"""
###########################################################################
# General plotting framework
###########################################################################
def plot(f, xbin, ybin, int_val, N, text, figname=''):
plt.figure(figsize=(12,6))
n = 201
# Get a representation of f as a continuous function
x = np.linspace(xbin.min(), xbin.max(), n)
y = f(x)
# Plot the function
plt.plot(x, y, 'r', lw=2.0)
# Plot the interpolation
plt.fill_between(xbin, 0, ybin, alpha=0.25, lw=2.0)
# Setting the lims
ymin, ymax = y.min(), y.max()
if abs(ymax-ymin)<1E-6:
ymin, ymax = 0.0, 1.0
dy = .1*(ymax-ymin)
plt.ylim([ymin-dy,ymax+dy])
xmin, xmax = x.min(), x.max()
if abs(b-a)<1E-6:
xmin, xmax = 0.0, 1.0
dx = .1*(b-a)
plt.xlim([xmin-dx,xmax+dx])
# Do the text
if N>1:
text_N = r"$%s \approx %.10f$ (usando %d evaluaciones de $f$)" %(text, int_val, N)
plt.text(min(x), max(y), text_N, fontsize=18)
#plt.text(min(x), 0.9*max(y), "Valor exacto $2.35040$", fontsize=18)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
return
"""
Explanation: La siguiente función nos permitirá graficar y visualizar apropiadamente los resultados.
End of explanation
"""
#limits of integration
a = -1; b = 1
#function to integrate
myfun = lambda x : np.exp(x) #x**2 #1 # x #np.exp(-x)
#number the points in the 1D grid
N = 10
#text to show in the graphs
text= r"\int_{%+.2f}^{%+.2f} e^x dx" %(a,b)
"""
Explanation: Parámetros
Para los experimentos numéricos, consideraremos la función $f(x)$ con dominio $x \in [a,b]$. El número de puntos a utilizar en las mallas equiespaciadas vendrá definido por $N$. Todas las anteriores se definen en la celda siguiente.
End of explanation
"""
###########################################################################
# Riemann Rule
###########################################################################
def riemann(myfun, N, a, b, direction='left', verbose=False, text=text, figname=''):
f = np.vectorize(myfun) # So we can apply it to arrays without trouble
x = np.linspace(a, b, N+1) # We want N bins, so N+1 points
dx = x[1]-x[0]
if direction in 'left':
points = x[:-1]
elif direction in 'right':
points = x[1:]
else:
print("Riemann Sum: choose left or right")
return
point_values = f(points)
int_val = sum(point_values*dx)
if verbose:
xbin = np.vstack([x[:-1], x[1:]]).flatten('F')
ybin = np.vstack([point_values, point_values]).flatten('F')
plot(f, xbin, ybin, int_val, N, text, figname)
return int_val
print('Approximated sum: {0}'.format(riemann(myfun, N, a, b, direction="left",
verbose=True, figname="riemann_left_%d.png"%N)))
print('Approximated sum: {0}'.format(riemann(myfun, N, a, b, direction="right",
verbose=True, figname="riemann_right_%d.png"%N)))
"""
Explanation: <div id='sr' />
Sumas de Riemann
Como primera aproximación hacia la integración numérica, revisaremos la suma que compone la base sobre la cual se define una integral definida: La integral de Riemann. Esta consiste en particionar el dominio de integración $D = [a,b] \rightarrow a = x_0 < x_1 < \cdots < x_{n-1} < x_n = b $, de tal modo que para cada partición $[x_k,x_{k+1}]$ aproximemos el área bajo $f$ por rectángulos, tomando como altura de los rectángulos a $f(c)$ con $c \in [x_k,x_{k+1}]$. Cuando se elige tal $c$ como uno de los extremos de la partición, se da origen a las siguiente dos aproximaciones:
Suma izquierda de Riemann
Eligiendo $c = x_{k}$ (extremo izquierdo) para cada partición, sobre una malla regular de ancho $x_{k+1}-x_{k}=\Delta x$:
\begin{align}
A = \int_a^b f(x) dx & \approx \sum_{k=0}^{n-1} f(x_k) \Delta x
\end{align}
Suma derecha de Riemann
Eligiendo $c = x_{k+1}$ (extremo derecho) para cada partición, sobre una malla regular de ancho $x_{k+1}-x_{k}=\Delta x$:
\begin{align}
A = \int_a^b f(x) dx \approx \sum_{k=1}^{n} f(x_k)\Delta x
\end{align}
El código que se provee a continuación, implementa la integración numérica por sumas de Riemann.
End of explanation
"""
def trapezoid(myfun, N, a, b, verbose=False, text='', figname=''):
f = np.vectorize(myfun) # So we can apply it to arrays without trouble
x = np.linspace(a, b, N+1) # We want N bins, so N+1 points
h = x[1]-x[0]
xleft = x[:-1]
xright = x[1:]
int_val = 0.5*h*sum(f(xleft)+f(xright))
if verbose:
xbin = x
ybin = f(x)
plot(f, xbin, ybin, int_val, N, text, figname)
return int_val
N = 40
#myfun = lambda x : x**2
print('Approximated sum: {0}'.format(trapezoid(myfun, N, a, b, verbose=True, text=text, figname="trapezoid_%d.png"%N)))
"""
Explanation: <div id='nc' />
Métodos de Newton-Cotes
Los siguientes dos métodos que se presentan a continuación, conforman parte de una familia de métodos llamados de Newton-Cotes. Estos generan una malla equiespaciada para particionar el dominio de integración $[a,b]$.
Todos estos métodos se basan en aproximar la función $f$ por un polinomio de grado $n-1: \ p_{n-1}$, que interpole $n$ puntos de tal malla, y de este modo computar el área bajo la curva de este polinomio (En vez de $f$), lo cual es fácil dado que las integrales de polinomios son también polinomios (de un grado superior).
Las derivaciones de las fórmulas a utilizar pueden ser consultadas en el texto guía: Numerical Analysis, Timothy Sauer.
<div id='rt' />
Regla del Trapecio
El primero y más simple de tales métodos, es utilizar polinomios de grado $1$ que interpolen cada $(x_k,f(x_k))$ y $(x_{k+1},f(x_{k+1}))$. Es fácil notar que como resultado, el área aproximada entre cada dos puntos de la malla, será el área de un trapecio, motivo del nombre de tal regla.
Al particionar el intervalo $[a,b]$ en $m$ segmentos y $m+1$ puntos $a = x_0 < \cdots < x_{m} = b \ \ $, se obtiene el siguiente resultado
\begin{align}
\int_{x_0}^{x_m} f(x) dx = \sum_{i=1}^{m} \int_{x_{i-1}}^{x_{i}} f(x) dx
= \frac{h}{2}\left[f(a) + f(b) + 2\sum_{i=1}^{m-1} f(x_i) \right] - \underbrace{(b-a) \frac{h^2}{12} f''(c)}_{\text{Error term}}
\end{align}
donde $h=(b-a)/m \ $ es el largo de cada subintervalo, y $\ c \in [a, b]$. Dado que $c$ no es conocido, el Error term en la práctica no se toma en cuenta, y por lo mismo constituye el error del método.
End of explanation
"""
def simpsons(myfun, N, a, b, verbose=False, text="", figname=""):
f = np.vectorize(myfun) # So we can apply it to arrays without trouble
x = np.linspace(a, b, N+1) # We want N bins, so N+1 points
if N%2==1:
if verbose: print("Simpsons rule only applicable to even number of segments")
return np.nan
dx = x[1]-x[0]
xleft = x[:-2:2]
xmiddle = x[1::2]
xright = x[2::2]
int_val = sum((f(xleft)+4*f(xmiddle)+f(xright))*dx/3)
if verbose:
xbin, ybin = simpsons_bins(f, xleft, xmiddle, xright)
plot(f, xbin, ybin, int_val, N, text, figname)
return int_val
def simpsons_bins(f, xleft, xmiddle, xright):
xbin, ybin = [], []
n = 21
for x0, x1, x2 in zip(xleft, xmiddle, xright):
x = np.linspace(x0, x2, n)
y = (f(x0)*(x-x1)*(x-x2)) / ((x0-x1)*(x0-x2))
y+= (f(x1)*(x-x0)*(x-x2)) / ((x1-x0)*(x1-x2))
y+= (f(x2)*(x-x0)*(x-x1)) / ((x2-x0)*(x2-x1))
xbin.extend(list(x))
ybin.extend(list(y))
return np.array(xbin), np.array(ybin)
N=4
print('Approximated sum: {0}'.format(simpsons(myfun, N, a, b, verbose=True, text=text, figname="simpsons_%d.png"%N)))
"""
Explanation: <div id='rs' />
Regla de Simpson
La extensión lógica a la regla anterior, es utilizar polinomio de grado $2$ (parábolas) para aproximar la función $f$. Para ello, cada tres puntos $(x_k,f(x_k))$, $(x_{k+1},f(x_{k+1}))$ y $(x_{k+2},f(x_{k+2}))$ una parábola es utilizada para aproximar la función.
Al particionar el intervalo $[a,b]$ en $m$ segmentos y $m+1$ puntos $a = x_0 < \cdots < x_{m} = b \ \ $, con $\ m\ $ par, se obtiene el siguiente resultado:
\begin{align}
\int_{a}^{b} f(x) dx = \frac{h}{3} \left( f(x_0) + \sum_{i=1}^{N} 4 f(x_{2i-1}) + \sum_{i=1}^{N-1} 2 f(x_{2i}) + f(x_N) \right) - \underbrace{(b-a)\frac{h^4}{90} f^{(4)}(c)}_{\text{Error term}}
\end{align}
donde $h=(x_{i+1}-x_i) \ \ $ y $c \in [a, b]$
End of explanation
"""
def midpoint(myfun, N, a, b, verbose=False, text='', figname=''):
f = np.vectorize(myfun) # So we can apply it to arrays without trouble
x = np.linspace(a, b, N+1) # We want N bins, so N+1 points
dx = x[1]-x[0]
midpoints = x[:-1] + .5*dx
midpoint_values = f(midpoints)
int_val = sum(midpoint_values*dx)
if verbose:
xbin = np.vstack([x[:-1], x[1:]]).flatten('F')
ybin = np.vstack([midpoint_values, midpoint_values]).flatten('F')
plot(f, xbin, ybin, int_val, N, text, figname)
return int_val
N = 20
#myfun = lambda x : x + 1
print('Approximated sum: {0}'.format(midpoint(myfun, N, a, b, verbose=True, text=text, figname="midpoint_%d.png"%N)))
"""
Explanation: <div id='mp' />
Midpoint
Una de las limitaciones de las dos reglas anteriores, es que requieren evaluar $f$ en los extremos del intervalo, y pueden haber casos en donde $f$ no esté bien definida en tales puntos.
La regla del punto medio midpoint para cada dos puntos $(x_k,f(x_k))$ y $(x_{k+1},f(x_{k+1}))$, aproxima la función por una expansión de Taylor de grado $1$, centrada en el punto medio $\displaystyle \left( \frac{x_k+x_{k+1}}{2}, f\left(\frac{x_k+x_{k+1}}{2} \right) \right)$, es decir, aproxima $f$ por un polinomio de grado $1$ (al igual que la regla del trapecio).
Al particionar el intervalo $[a,b]$ en $m$ segmentos y $m+1$ puntos $a = x_0 < \cdots < x_{m} = b \ \ $, se obtiene el siguiente resultado:
\begin{align}
\int_{a}^{b} f(x) dx = \sum_{i=1}^{m} \int_{x_{i-1}}^{x_{i}} f(x) dx \
= \sum_{i=1}^{m} h f(w_i) + \underbrace{\frac{(b-a)}{24} h^2 f''(c)}_{\text{Error term}}
\end{align}
donde $h=(b-a)/m \ \ \ $,
$w_i=\frac{1}{2}(x_{i-1}+x_{i})\ \ \ $
y
$\ \ c \in [a, b]$
End of explanation
"""
def gaussianquad(myfun, N, a, b, verbose=False, text="", figname=""):
f = np.vectorize(myfun) # So we can apply it to arrays without trouble
x, w = gaussian_nodes_and_weights(N, a, b)
int_val = sum( w * f(x) )
if verbose:
xbin, ybin = gaussian_bins(f, x, w)
plot(f, xbin, ybin, int_val, N, text, figname)
return int_val
# Comment: These nodes could be precomputed in advance, here
# they are computed as needed so it will penalize the
# computation time.
def gaussian_nodes_and_weights(N, a, b):
if N==1:
return np.array([1]), np.array([2])
beta = .5 / np.sqrt(1.-(2.*np.arange(1.,N))**(-2))
T = np.diag(beta,1) + np.diag(beta,-1)
D, V = np.linalg.eigh(T)
x = D
x = .5 * ( (b-a)*x + b + a) # Rescaling
w = 2*V[0,:]**2
w = .5*(b-a)*w
return x, w
def gaussian_bins(f, x, w):
z = [a] + list(a + w.cumsum())
xbin = np.vstack([z[:-1], z[1:]]).flatten('F')
z = f(x)
ybin = np.vstack([z[:], z[:]]).flatten('F')
return np.array(xbin), np.array(ybin)
print('Approximated sum: {0}'.format(gaussianquad(myfun, N, a, b, verbose=True,
text=text, figname="gaussianquad_%d.png"%N)))
gaussian_nodes_and_weights(2, -1, 1)
"""
Explanation: <div id='cg' />
Cuadratura Gaussiana
Similar a las ideas que propuso Chebyshev para mejorar los método de interpolación, nace la siguiente pregunta:
¿Se podrá mejorar el proceso de integración particionando $[a,b]$ de una forma no equiespaciada?
La respuesta a esta pregunta es sí. De modo análogo a los puntos de Chebyshev, aca podemos seleccionar las raíces de los polinomios de Legendre $p_n(x)$ siguientes:
\begin{align}
p_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} \left[ (x^2 - 1)^n \right]
\end{align}
para luego interpolar $f$ sobre estos puntos, generando un polinomio $p_{n-1}$ sobre el cual realizar la integración.
Este método utiliza la siguiente aproximación, para un intervalo $[-1,1]$:
\begin{align}
\int_{a}^{b} f(x) dx \approx \sum_{i=1}^n w_i f(x_i)
\end{align}
donde los $x_i$ se definen como las raíces del n-ésimo polinomio de Legendre $p_n(x)$, y los $w_i$ se calculan como:
$$
w_i = \int_{-1}^{1}L_{i}(x)dx, \ \ \ i = 1,\dots,n
$$
siendo $L_i(x)$ los conocidos polinomio de la interpolación de Lagrange.
Nota: Para un intervalo arbitrario $[a,b]$ es necesario realizar la transformación lineal correspondiente.
End of explanation
"""
def get_error(quadrature_rule, myfun, Nrange, a, b, true_value):
quad_error = []
for N in Nrange:
error = np.abs(true_value - quadrature_rule(myfun, N, a, b) )
if error<1E-16:
quad_error.append(1E-16)
else:
quad_error.append(error)
return quad_error
def set_ylim(ymin, ymax):
ymin = min(plt.ylim()[0], ymin)
ymax = max(plt.ylim()[1], ymax)
plt.ylim([ymin, ymax])
return
def convergence(exp_data):
Nrange = exp_data[0]
myfun = exp_data[1]
a = exp_data[2]
b = exp_data[3]
true_value = exp_data[4]
#######################################################
print('Printing numerical experiment details:')
print('Nrange: ', Nrange)
print(inspect.getsource(myfun).replace('\n', ''))
print('a: ', a)
print('b: ', b)
print('true_value: ', true_value)
#######################################################
ms = 10
f = np.vectorize(myfun) # So we can apply it to arrays without trouble
e_mp = get_error(midpoint, myfun, Nrange, a, b, true_value)
e_tr = get_error(trapezoid, myfun, Nrange, a, b, true_value)
e_sp = get_error(simpsons, myfun, Nrange, a, b, true_value)
e_gq = get_error(gaussianquad, myfun, Nrange, a, b, true_value)
#plt.figure(figsize=(12,16))
fig = plt.figure(figsize=(24,24))
plt.rcParams.update({'font.size': 22})
# First plot
ax = plt.subplot(2,2,1)
dd = 0.1*(b-a)
x = np.linspace(a-dd, b+dd, 1000)
plt.plot(x, f(x), 'k', label="f(x)", lw=2.0)
x = np.linspace(a, b, 1000)
plt.fill_between(x, f(x), 0, alpha=0.5, label=r"$\int_a^b f(x) dx$")
plt.xlabel("x")
plt.ylabel("f(x)")
ymax = 1.05*plt.ylim()[1]
plt.ylim([-ymax, ymax])
plt.grid('on')
plt.legend(loc="lower left")
# Second plot
ax = plt.subplot(2,2,2)
plt.plot(Nrange, e_mp, 'sb', lw=2.0, ms=ms, label="Midpoint")
plt.plot(Nrange, e_tr, 'or', lw=2.0, ms=ms, label="Trapezoid")
plt.plot(Nrange, e_sp, '>y', lw=2.0, ms=ms, label="Simpsons")
plt.plot(Nrange, e_gq, 'Dg', lw=2.0, ms=ms, label="Gaussian Quad")
set_ylim(-5E-2, 1E-1)
plt.xlabel("N")
plt.ylabel("Absolute Error")
ax.legend(loc='best', bbox_to_anchor=(0.5, 1.00), ncol=1, fancybox=True, shadow=True, numpoints=1)
plt.grid('on')
# Third plot
ax = plt.subplot(2,2,3)
plt.loglog(Nrange, e_mp, 'sb', ms=ms, lw=2.0, label="Midpoint")
plt.loglog(Nrange, e_tr, 'or', ms=ms, lw=2.0, label="Trapezoid")
plt.loglog(Nrange, e_sp, '>y', ms=ms, lw=2.0, label="Simpsons")
plt.loglog(Nrange, e_gq, 'Dg', ms=ms, lw=2.0, label="Gaussian Quad")
ax.legend(loc='lower left', ncol=1, fancybox=True, shadow=True, numpoints=1)
plt.ylim([1E-18, 1E+1])
N = np.arange(1,101,10)
#plt.loglog(N, 1./N, '-k', lw=2.0, alpha=0.5)
plt.loglog(N, 1./N**2, '-k', lw=2.0, alpha=0.5)
#plt.loglog(N, 1./N**3, '-k', lw=2.0, alpha=0.5)
plt.loglog(N, 1./N**4, '-k', lw=2.0, alpha=0.5)
plt.xlabel("N")
plt.ylabel("Absolute Error")
plt.xlim([0.9*min(Nrange),1.1*max(Nrange)])
plt.grid('on')
#print(Nrange)
#print(np.log10(e_mp))
#print(np.log10(e_tr))
#print(np.log10(e_sp))
#print(np.log10(e_gq))
# Forth plot
ax = plt.subplot(2,2,4)
h = 1./np.arange(1,101,10)
#plt.loglog(h, h, '-k', lw=2.0, alpha=0.5)
plt.loglog(h, h**2, '-k', lw=2.0, alpha=0.5)
#plt.loglog(h, h**3, '-k', lw=2.0, alpha=0.5)
plt.loglog(h, h**4, '-k', lw=2.0, alpha=0.5)
# Plotting Gaussian Quadratupre first but using larger markers
h = 1./np.array(Nrange)
plt.loglog(h, e_mp, 'sb', lw=2.0, ms=ms, label="Midpoint")
plt.loglog(h, e_tr, 'or', lw=2.0, ms=ms, label="Trapezoid")
plt.loglog(h, e_sp, '>y', lw=2.0, ms=ms, label="Simpsons")
plt.loglog(h, e_gq, 'Dg', lw=2.0, ms=ms, label="Gaussian Quad")
ax.legend(loc='lower right', ncol=1, fancybox=True, shadow=True, numpoints=1)
plt.ylim([1E-18, 1E+1])
plt.xlabel("h")
plt.ylabel("Absolute Error")
plt.grid('on')
plt.xlim([0.9*min(h),1.1*max(h)])
plt.show()
#fig.savefig('myfig.eps', format='eps')
"""
Explanation: Preguntas
El método de Midpoint y el del Trapecio se basan ambos en aproximaciones por polinomio de grado $1$. ¿Cuál considera usted mejor?. Hint: Considere la cantidad de evaluaciones de funciones, y el termino de error.
¿Cuál es la importancia del término $h$ en los Error term de cada método?
¿Cuál es la importancia del orden de la derivada $f^{(k)}(c)$ en los Error term de cada método?
<div id='ac' />
Análisis de Convergencia
Para entender bien el concepto de convergencia, es fundamental entender correctamente dos conceptos:
Grado de Precisión (Degree of Precision). Para un método de integración numérica, corresponde al mayor entero $k$ para el cual todos los polinomios de grado $k$ o menor, son integrados exactamente (sin error). Si recordamos los métodos ya estudiados, el término de error tiene la forma general:
$$
\text{Error term} = C \ h^p \ f^{(q)}(c) \ \ \ \ \text{con} \ \ C \ \text{constante} \ \ \text{y} \ \ p,q \in \mathbb{N}
$$
Dado que para cualquier polinomio de grado $\leq q-1$ se obtiene que $f^{(q)}=0$, entonces el grado de precisión de un tal método, viene determinado por el orden de la derivada, y es $q-1$.
Orden de convergencia. El orden de convergencia está derminada por la otra componente del Error term, esto es, por $h^p$. Recordar que $h$ debe ser pequeño ($h < 1$), y por lo tanto a mayor potencia $p$, menor será el orden del método. El orden de convergencia, nos dice cómo decrece el error a medida que se aumentan la cantidad de puntos de la malla.
A continuación compararemos gráficamente la convergencia de estos métodos:
End of explanation
"""
list_experiments=[]
###########################
# Function 1: Constant
# All methods are good
Ns = range(4, 11)
x0, x1, x2 = 5., -1., 3.
f = lambda x : 1
a = -1.0
b = +1.0
sol = 2
exp1 = (Ns, f, a, b, sol)
list_experiments.append(('exp1: '+inspect.getsource(f).replace('\n', ''),exp1))
###########################
# Function 2: sin
# All method equal if symmetric interval
# Gaussian quad better if asymmetric interval
Ns = range(4, 100)
f = lambda x : np.sin(x)
a = 0.0
b = +1.0
sol = -np.cos(b)+np.cos(a)
exp2 = (Ns, f, a, b, sol)
list_experiments.append(('exp2: '+inspect.getsource(f).replace('\n', ''),exp2))
###########################
# Function 3: gaussian bell
# gaussian quad outperforms all the other methods
Ns = range(4, 20)
f = lambda x : np.exp(-x**2)
a = -1.0
b = +1.0
sol = 1.4936482656248541 # Specific value for the range [-1,1]
exp3 = (Ns, f, a, b, sol)
list_experiments.append(('exp3: '+inspect.getsource(f).replace('\n', ''),exp3))
###########################
# Function 4: exponential
# gaussian quad outperforms all the other methods
Ns = range(4, 11) #20
f = lambda x : np.exp(x)
a = -1.0
b = +2.0
sol = np.exp(b) - np.exp(a)
exp4 = (Ns, f, a, b, sol)
list_experiments.append(('exp4: '+inspect.getsource(f).replace('\n', ''),exp4))
###########################
# Function 5: logarithm
Ns = range(4, 20)
f = lambda x : np.log(np.abs(x))
a = 1e-10
b = +1.0
sol = (b*np.log(b)-b) - (a*np.log(a)-a)#-2.0000000000000000
exp5 = (Ns, f, a, b, sol)
list_experiments.append(('exp5: '+inspect.getsource(f).replace('\n', ''),exp5))
###########################
# Function 6 and true value
Ns = range(4, 20)
f = lambda x : np.sin(x)/x #if abs(x)>1e-6 else 1.0
a = -1.0
b = +1.0
sol = 1.8921661407343660
exp6 = (Ns, f, a, b, sol)
list_experiments.append(('exp6: '+inspect.getsource(f).replace('\n', ''),exp6))
###########################
# Function 7 : absolute value
# Midpoint wins
# Do (-1,1) and (-2,1)
Ns = range(4, 20)
f = lambda x : abs(x)
a = -1 #-np.pi
b = +1.0
sol = (a**2+b**2)/2.
exp7 = (Ns, f, a, b, sol)
list_experiments.append(('exp7: '+inspect.getsource(f).replace('\n', ''),exp7))
###########################
# Function 8 : Gaussian
# Midpoint/Traps wins over gaussian
Ns = range(4, 100)
f = lambda x : np.exp(-x**2)
a = -10.0
b = +10.0
sol = np.sqrt(math.pi)
exp8 = (Ns, f, a, b, sol)
list_experiments.append(('exp8: '+inspect.getsource(f).replace('\n', ''),exp8))
###########################
# Function 9 : 1/x^2
# Gaussian wins, but they all degrade if a->0
# Here we should try an adaptative method
Ns = range(4, 20)
f = lambda x : 1.0/(x**2)
a = 1e-4
b = +1.
sol = 1.0/a - 1.0/b
exp9 = (Ns, f, a, b, sol)
list_experiments.append(('exp9: '+inspect.getsource(f).replace('\n', ''),exp9))
###########################
# Function 10 : 1/x^0.5
Ns = range(4, 20)
f = lambda x : 1.0/(np.sqrt(np.abs(x)))
a = 0.0
b = 1.0
sol = 2.0
exp10 = (Ns, f, a, b, sol)
list_experiments.append(('exp10: '+inspect.getsource(f).replace('\n', ''),exp10))
"""
Explanation: Parámetros
A continuación, se defininen los parámetros a utilizar en nuestros experimentos.
End of explanation
"""
interact(convergence, exp_data=list_experiments)
"""
Explanation: Test de Convergencia
Finalmente podemos testear la convergencia de nuestros métodos
End of explanation
"""
def timeit(f):
def timed(*args, **kw):
N = 50
T = 0.
for i in range(N):
ts = time.time()
result = f(*args, **kw)
te = time.time()
T += (te-ts)*1000. # In ms
return T/N
return timed
"""
Decorated functions
"""
@timeit
def t_trapezoid(myfun, N, a, b):
return trapezoid(myfunc, N, a, b)
@timeit
def t_simpsons(myfun, N, a, b):
return simpsons(myfunc, N, a, b)
@timeit
def t_midpoint(myfun, N, a, b):
return midpoint(myfun, N, a, b)
@timeit
def t_gaussianquad(myfun, N, a, b):
return gaussianquad(myfunc, N, a, b)
def timing(exp_data):
Nrange = exp_data[0]
myfun = exp_data[1]
a = exp_data[2]
b = exp_data[3]
true_value = 0.0
#######################################################
print('Printing numerical experiment details:')
print('Nrange: ', Nrange)
print(inspect.getsource(myfun).replace('\n', ''))
print('a: ', a)
print('b: ', b)
print('true_value (for time!): ', true_value)
#######################################################
ms = 10
f = np.vectorize(myfun) # So we can apply it to arrays without trouble
e_mp = get_error(midpoint, myfun, Nrange, a, b, true_value)
e_tr = get_error(trapezoid, myfun, Nrange, a, b, true_value)
e_sp = get_error(simpsons, myfun, Nrange, a, b, true_value)
e_gq = get_error(gaussianquad, myfun, Nrange, a, b, true_value)
plt.figure(figsize=(12,16))
# First plot
ax = plt.subplot(4,1,1)
dd = 0.1*(b-a)
x = np.linspace(a-dd, b+dd, 1000)
plt.plot(x, f(x), 'k', label="f(x)", lw=2.0)
x = np.linspace(a, b, 1000)
plt.fill_between(x, myfun(x), 0, alpha=0.5, label=r"$\int_a^b f(x) dx$")
plt.xlabel("x")
plt.ylabel("f(x)")
ymax = 1.05*plt.ylim()[1]
plt.ylim([-ymax, ymax])
plt.grid('on')
plt.legend(loc="lower left")
# Second plot
ax = plt.subplot(4,1,2)
plt.plot(Nrange, e_mp, 'sb', lw=2.0, ms=ms, label="Midpoint")
plt.plot(Nrange, e_tr, 'or', lw=2.0, ms=ms, label="Trapezoid")
plt.plot(Nrange, e_gq, 'Dg', lw=2.0, ms=ms, label="Gaussian Quad")
plt.plot(Nrange, e_sp, '>y', lw=2.0, ms=ms, label="Simpsons")
set_ylim(-5E-2, 1E-1)
plt.xlabel("N")
plt.ylabel("Tiempo [ms]")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.00),
ncol=4, fancybox=True, shadow=True, numpoints=1)
plt.grid('on')
# Third plot
plt.subplot(4,1,3)
plt.loglog(Nrange, e_mp, 'sb', ms=ms, lw=2.0)
plt.loglog(Nrange, e_tr, 'or', ms=ms, lw=2.0)
plt.loglog(Nrange, e_sp, '>y', ms=ms, lw=2.0)
plt.loglog(Nrange, e_gq, 'Dg', ms=ms, lw=2.0)
#set_ylim(1E-18, 1E+1)
plt.xlabel("N")
plt.ylabel("Tiempo [ms]")
plt.grid('on')
# Forth plot
plt.subplot(4,1,4)
# Plotting Gaussian Quadratupre first but using larger markers
h = 1./np.array(Nrange)
plt.loglog(h, e_gq, 'Dg', lw=2.0, ms=ms)
plt.loglog(h, e_mp, 'sb', lw=2.0, ms=ms)
plt.loglog(h, e_tr, 'or', lw=2.0, ms=ms)
plt.loglog(h, e_sp, '>y', lw=2.0, ms=ms)
#set_ylim(1E-18, 1E+1)
plt.xlabel("h")
plt.ylabel("Tiempo [ms]")
plt.grid('on')
plt.show()
"""
Explanation: <div id='at' />
Análisis de Tiempo
Usamos el siguiente decorador para medir los tiempos de ejecución de las funciones
End of explanation
"""
interact(timing, exp_data=list_experiments);
"""
Explanation: Test de tiempos
Finalmente podemos testear los tiempos de nuestros métodos
End of explanation
"""
|
spulido99/Programacion | Alex/.ipynb_checkpoints/Cancer-checkpoint.ipynb | mit | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
%matplotlib inline
n=np.random.normal(10,6,100)
n1=np.random.normal(5,7,100)
sns.distplot(n)
sns.distplot(n1)
import matplotlib.pyplot as plt
plt.scatter(n,n1)
data = pd.DataFrame({'x':n, 'y':n1})
data.head()
sns.lmplot('x', 'y', data)
y=12+72*n+ np.random.normal(0,500,100)
data = pd.DataFrame({'x':n, 'y':y})
sns.lmplot('x', 'y', data)
"""
Explanation: ejemplo de como usar numeros aleatorios
End of explanation
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set()
%matplotlib inline
"""
Explanation: Trabajo de programación con Carcinoma de Pulmón
Se importan las librerias necesarias para trabajar
End of explanation
"""
df= pd.read_table('C:/Users/Alex/Documents/eafit/semestres/X semestre/programacion/gdac/LUAD.txt', sep='\t')
pd.set_option('display.mpl_style', 'default')
df=df.convert_objects(convert_dates=True, convert_numeric=True)
df=df.drop([0])
df.head()
p1=df['TCGA-05-4244-01A-01R-1107-07']
p1[:5]
p1 = pd.to_numeric(p1)
type(p1)
sns.distplot(p1)
gen1=df[df['Hybridization REF']=='?|10431']
gen1=gen1.drop('Hybridization REF', 1)
#gen1=gen1.values.astype(float).tolist()
gen2=df[df['Hybridization REF']=='tAKR|389932']
gen2=gen2.drop('Hybridization REF', 1)
#gen2=gen2.values.astype(float).tolist()
sns.distplot(gen1)
sns.distplot(gen2)
df2=df.ix[:,'TCGA-05-4244-01A-01R-1107-07':]
a=df2.max(axis=0)
#a.min
#df['TCGA-05-4244-01A-01R-1107-07'].value_counts()
"""
Explanation: Se llama el archivo con los datos a trabajar, los cuales son una lista de genes normalizados asociados al cancer y un numero n de pacientes a los cuales se les hizo un analisis de expresion de estos genes mediante mRNAseq, por lo que para cada gen y persona se encuentra el número de reads correspondientes.
End of explanation
"""
maximo=df.loc[df.ix[:,1] > 100000 ]
gen_list=maximo['Hybridization REF'].values.tolist()
gen_list
#maximo
sns.boxplot(x="Hybridization REF", y= 'TCGA-05-4389-01A-01R-1206-07', data=maximo, palette="PRGn") #mirar cuantos pacientes presentan un gen mas expresado que otro
"""
Explanation: Se seleccionan los genes que posean mas de cien mil reads
End of explanation
"""
for i in range(len(gen_list)):
name=gen_list[i]
gen1=df[df['Hybridization REF']== name].ix[:,'TCGA-05-4244-01A-01R-1107-07':]
sns.distplot(gen1)
genes=df.loc[df.ix[:,1] > 100000 ]
genes
g = sns.factorplot(x='Hybridization REF', data=genes, kind="count",
palette="BuPu", size=6, aspect=1.5)
g.set_xticklabels(step=2)
genes
maximo = maximo.set_index('Hybridization REF')
maximo1 = maximo.transpose()
maximo1.head()
sns.violinplot(maximo1[['ACTB|60']])
import matplotlib.pyplot as plt
maximo1=maximo.transpose()
gen_list=maximo1.ix[1:].as_matrix()
plt.boxplot(gen_list,1)
gen_list
print(maximo.mean(1))
print (maximo.std(1))
gen2=df[df['Hybridization REF']=='SFTPB|6439']
gen2=gen2.drop('Hybridization REF',1)
gen2=gen2.values.tolist()
plt.boxplot(gen2)
"""
Explanation: Se realiza un grafico de distribuciones de los 5 genes que fueron los que mas reads presentaron
End of explanation
"""
|
lin99/NLPTM-2016 | 4.Docs/quickIntro2NN.ipynb | mit | from pybrain.tools.shortcuts import buildNetwork
net = buildNetwork(2, 1, outclass=pybrain.SigmoidLayer)
print net.params
def print_pred2(dataset, network):
df = pd.DataFrame(dataset.data['sample'][:dataset.getLength()],columns=['X', 'Y'])
prediction = np.round(network.activateOnDataset(dataset),3)
df['output'] = pd.DataFrame(prediction)
return df
from pybrain.datasets import UnsupervisedDataSet, SupervisedDataSet
D = UnsupervisedDataSet(2) # define a dataset in pybrain
D.addSample([0,0])
D.addSample([0,1])
D.addSample([1,0])
D.addSample([1,1])
print_pred2(D, net)
"""
Explanation: Quick and Dirty Introduction to Neural Networks
Fabio A. González, Universidad Nacional de Colombia
Artificial Neuron
<img src="http://upload.wikimedia.org/wikipedia/commons/thumb/6/60/ArtificialNeuronModel_english.png/600px-ArtificialNeuronModel_english.png" >
$$o_j^{(n)} = \varphi\left(\sum_{i\; in\; layer (n-1)}w_{ij}o_i^{(n-1)} \right)$$
Step activation function
<img src="https://c.mql5.com/2/4/act1.png" align="middle">
Logistic activation function
$$\varphi(x) = \frac{1}{1 - e^{-(x-b)}}$$
<img width= 300 src="http://upload.wikimedia.org/wikipedia/commons/thumb/b/b5/SigmoidFunction.png/400px-SigmoidFunction.png" align="middle">
Question: How to program an artificial neuron to calculate the and function?
<br>
<table>
<tr>
<th>$X$</th>
<th>$Y$</th>
<th>$X$ and $Y$</th>
</tr>
<tr>
<td>0</td>
<td>0</td>
<td style="text-align:center">0</td>
</tr>
<tr>
<td>0</td>
<td>1</td>
<td style="text-align:center">0</td>
</tr>
<tr>
<td>1</td>
<td>0</td>
<td style="text-align:center">0</td>
</tr>
<tr>
<td>1</td>
<td>1</td>
<td style="text-align:center">1</td>
</tr>
</table>
AND Neural Network
<img width=500 src="2in-neuron.jpg" align="middle">
End of explanation
"""
net.params[:] = [0, 0, 0]
print_pred2(D, net)
"""
Explanation: AND Neural Network
<img width=500 src="2in-neuron.jpg" align="middle">
End of explanation
"""
def plot_nn_prediction(N):
# a function to plot the binary output of a network on the [0,1]x[0,1] space
x_list = np.arange(0.0,1.0,0.025)
y_list = np.arange(1.0,0.0,-0.025)
z = [0.0 if N.activate([x,y])[0] <0.5 else 1.0 for y in y_list for x in x_list]
z = np.array(z)
grid = z.reshape((len(x_list), len(y_list)))
plt.imshow(grid, extent=(x_list.min(), x_list.max(), y_list.min(), y_list.max()),cmap=plt.get_cmap('Greys_r'))
plt.show()
"""
Explanation: Question: How to program an artificial neuron to calculate the xor function?
<br/>
<table>
<tr>
<th>$X$</th>
<th>$Y$</th>
<th>$X$ xor $Y$</th>
</tr>
<tr>
<td>0</td>
<td>0</td>
<td style="text-align:center">0</td>
</tr>
<tr>
<td>0</td>
<td>1</td>
<td style="text-align:center">1</td>
</tr>
<tr>
<td>1</td>
<td>0</td>
<td style="text-align:center">1</td>
</tr>
<tr>
<td>1</td>
<td>1</td>
<td style="text-align:center">0</td>
</tr>
</table>
Plotting the NN Output
End of explanation
"""
net.params[:] = [-30, 20, 20]
plot_nn_prediction(net)
"""
Explanation: Plotting the NN Output
End of explanation
"""
Dtrain = SupervisedDataSet(2,1) # define a dataset in pybrain
Dtrain.addSample([0,0],[0])
Dtrain.addSample([0,1],[1])
Dtrain.addSample([1,0],[1])
Dtrain.addSample([1,1],[0])
from pybrain.supervised.trainers import BackpropTrainer
net = buildNetwork(2, 2, 1, hiddenclass=pybrain.SigmoidLayer, outclass=pybrain.SigmoidLayer)
T = BackpropTrainer(net, learningrate=0.1, momentum=0.9)
T.trainOnDataset(Dtrain, 1000)
print_pred2(D, net)
"""
Explanation: <br/>
<br/>
Answer: It is impossible with only one neuron!
<br/>
<br/>
We need to use more than one neuron....
Multilayer Neural Network
<img src="http://www.cs.nott.ac.uk/~gxk/courses/g5aiai/006neuralnetworks/images/ffnet.jpg">
Learning an XOR NN
End of explanation
"""
plot_nn_prediction(net)
"""
Explanation: XOR NN Output Plot
End of explanation
"""
from pybrain.tools.validation import Validator
validator = Validator()
Dlrrh = SupervisedDataSet(4,4)
Dlrrh.addSample([1,1,0,0],[1,0,0,0])
Dlrrh.addSample([0,1,1,0],[0,0,1,1])
Dlrrh.addSample([0,0,0,1],[0,1,1,0])
df = pd.DataFrame(Dlrrh['input'],columns=['Big Ears', 'Big Teeth', 'Handsome', 'Wrinkled'])
print df.join(pd.DataFrame(Dlrrh['target'],columns=['Scream', 'Hug', 'Food', 'Kiss']))
net = buildNetwork(4, 3, 4, hiddenclass=pybrain.SigmoidLayer, outclass=pybrain.SigmoidLayer)
"""
Explanation: The Little Red Riding Hood Neural Network
<img src="http://themaleharem.com/wp-content/uploads/2014/06/Walter-crane-little-red-riding-hood-meets-the-wolf-in-the-woods.jpg">
LRRH Network Architecture
<img src="lrrh net.jpg">
Training
End of explanation
"""
T = BackpropTrainer(net, learningrate=0.01, momentum=0.99)
scores = []
for i in xrange(1000):
T.trainOnDataset(Dlrrh, 1)
prediction = net.activateOnDataset(Dlrrh)
scores.append(validator.MSE(prediction, Dlrrh.getField('target')))
plt.ylabel('Mean Square Error')
plt.xlabel('Iteration')
plt.plot(scores)
"""
Explanation: Backpropagation
End of explanation
"""
def lrrh_input(vals):
return pd.DataFrame(vals,index=['big ears', 'big teeth', 'handsome', 'wrinkled'], columns=['input'])
def lrrh_output(vals):
return pd.DataFrame(vals,index=['scream', 'hug', 'offer food', 'kiss cheek'], columns=['output'])
in_vals = [1, 1, 0, 0]
lrrh_input(in_vals)
lrrh_output(net.activate(in_vals))
"""
Explanation: Prediction
End of explanation
"""
|
joaoandre/algorithms | intro-python-data-science/week1.ipynb | mit | x = 1
y = 2
x + y
x
"""
Explanation: You are currently looking at version 1.0 of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the Jupyter Notebook FAQ course resource.
The Python Programming Language: Functions
End of explanation
"""
def add_numbers(x, y):
return x + y
add_numbers(1, 2)
"""
Explanation: <br>
add_numbers is a function that takes two numbers and adds them together.
End of explanation
"""
def add_numbers(x,y,z=None):
if (z==None):
return x+y
else:
return x+y+z
print(add_numbers(1, 2))
print(add_numbers(1, 2, 3))
"""
Explanation: <br>
add_numbers updated to take an optional 3rd parameter. Using print allows printing of multiple expressions within a single cell.
End of explanation
"""
def add_numbers(x, y, z=None, flag=False):
if (flag):
print('Flag is true!')
if (z==None):
return x + y
else:
return x + y + z
print(add_numbers(1, 2, flag=True))
"""
Explanation: <br>
add_numbers updated to take an optional flag parameter.
End of explanation
"""
def add_numbers(x,y):
return x+y
a = add_numbers
a(1,2)
"""
Explanation: <br>
Assign function add_numbers to variable a.
End of explanation
"""
type('This is a string')
type(None)
type(1)
type(1.0)
type(add_numbers)
"""
Explanation: <br>
The Python Programming Language: Types and Sequences
<br>
Use type to return the object's type.
End of explanation
"""
x = (1, 'a', 2, 'b')
type(x)
"""
Explanation: <br>
Tuples are an immutable data structure (cannot be altered).
End of explanation
"""
x = [1, 'a', 2, 'b']
type(x)
"""
Explanation: <br>
Lists are a mutable data structure.
End of explanation
"""
x.append(3.3)
print(x)
"""
Explanation: <br>
Use append to append an object to a list.
End of explanation
"""
for item in x:
print(item)
"""
Explanation: <br>
This is an example of how to loop through each item in the list.
End of explanation
"""
i=0
while( i != len(x) ):
print(x[i])
i = i + 1
"""
Explanation: <br>
Or using the indexing operator:
End of explanation
"""
[1,2] + [3,4]
"""
Explanation: <br>
Use + to concatenate lists.
End of explanation
"""
[1]*3
"""
Explanation: <br>
Use * to repeat lists.
End of explanation
"""
1 in [1, 2, 3]
"""
Explanation: <br>
Use the in operator to check if something is inside a list.
End of explanation
"""
x = 'This is a string'
print(x[0]) #first character
print(x[0:1]) #first character, but we have explicitly set the end character
print(x[0:2]) #first two characters
"""
Explanation: <br>
Now let's look at strings. Use bracket notation to slice a string.
End of explanation
"""
x[-1]
"""
Explanation: <br>
This will return the last element of the string.
End of explanation
"""
x[-4:-2]
"""
Explanation: <br>
This will return the slice starting from the 4th element from the end and stopping before the 2nd element from the end.
End of explanation
"""
x[:3]
"""
Explanation: <br>
This is a slice from the beginning of the string and stopping before the 3rd element.
End of explanation
"""
x[3:]
firstname = 'Christopher'
lastname = 'Brooks'
print(firstname + ' ' + lastname)
print(firstname*3)
print('Chris' in firstname)
"""
Explanation: <br>
And this is a slice starting from the 3rd element of the string and going all the way to the end.
End of explanation
"""
firstname = 'Christopher Arthur Hansen Brooks'.split(' ')[0] # [0] selects the first element of the list
lastname = 'Christopher Arthur Hansen Brooks'.split(' ')[-1] # [-1] selects the last element of the list
print(firstname)
print(lastname)
"""
Explanation: <br>
split returns a list of all the words in a string, or a list split on a specific character.
End of explanation
"""
'Chris' + 2
'Chris' + str(2)
"""
Explanation: <br>
Make sure you convert objects to strings before concatenating.
End of explanation
"""
x = {'Christopher Brooks': 'brooksch@umich.edu', 'Bill Gates': 'billg@microsoft.com'}
x['Christopher Brooks'] # Retrieve a value by using the indexing operator
x['Kevyn Collins-Thompson'] = None
x['Kevyn Collins-Thompson']
"""
Explanation: <br>
Dictionaries associate keys with values.
End of explanation
"""
for name in x:
print(x[name])
"""
Explanation: <br>
Iterate over all of the keys:
End of explanation
"""
for email in x.values():
print(email)
"""
Explanation: <br>
Iterate over all of the values:
End of explanation
"""
for name, email in x.items():
print(name)
print(email)
"""
Explanation: <br>
Iterate over all of the items in the list:
End of explanation
"""
x = ('Christopher', 'Brooks', 'brooksch@umich.edu')
fname, lname, email = x
fname
lname
"""
Explanation: <br>
You can unpack a sequence into different variables:
End of explanation
"""
x = ('Christopher', 'Brooks', 'brooksch@umich.edu', 'Ann Arbor')
fname, lname, email = x
"""
Explanation: <br>
Make sure the number of values you are unpacking matches the number of variables being assigned.
End of explanation
"""
print('Chris' + 2)
print('Chris' + str(2))
"""
Explanation: <br>
The Python Programming Language: More on Strings
End of explanation
"""
sales_record = {
'price': 3.24,
'num_items': 4,
'person': 'Chris'}
sales_statement = '{} bought {} item(s) at a price of {} each for a total of {}'
print(sales_statement.format(sales_record['person'],
sales_record['num_items'],
sales_record['price'],
sales_record['num_items']*sales_record['price']))
"""
Explanation: <br>
Python has a built in method for convenient string formatting.
End of explanation
"""
import csv
%precision 2
with open('mpg.csv') as csvfile:
mpg = list(csv.DictReader(csvfile))
mpg[:3] # The first three dictionaries in our list.
"""
Explanation: <br>
Reading and Writing CSV files
<br>
Let's import our datafile mpg.csv, which contains fuel economy data for 234 cars.
End of explanation
"""
len(mpg)
"""
Explanation: <br>
csv.Dictreader has read in each row of our csv file as a dictionary. len shows that our list is comprised of 234 dictionaries.
End of explanation
"""
mpg[0].keys()
"""
Explanation: <br>
keys gives us the column names of our csv.
End of explanation
"""
sum(float(d['cty']) for d in mpg) / len(mpg)
"""
Explanation: <br>
This is how to find the average cty fuel economy across all cars. All values in the dictionaries are strings, so we need to convert to float.
End of explanation
"""
sum(float(d['hwy']) for d in mpg) / len(mpg)
"""
Explanation: <br>
Similarly this is how to find the average hwy fuel economy across all cars.
End of explanation
"""
cylinders = set(d['cyl'] for d in mpg)
cylinders
"""
Explanation: <br>
Use set to return the unique values for the number of cylinders the cars in our dataset have.
End of explanation
"""
CtyMpgByCyl = []
for c in cylinders: # iterate over all the cylinder levels
summpg = 0
cyltypecount = 0
for d in mpg: # iterate over all dictionaries
if d['cyl'] == c: # if the cylinder level type matches,
summpg += float(d['cty']) # add the cty mpg
cyltypecount += 1 # increment the count
CtyMpgByCyl.append((c, summpg / cyltypecount)) # append the tuple ('cylinder', 'avg mpg')
CtyMpgByCyl.sort(key=lambda x: x[0])
CtyMpgByCyl
"""
Explanation: <br>
Here's a more complex example where we are grouping the cars by number of cylinder, and finding the average cty mpg for each group.
End of explanation
"""
vehicleclass = set(d['class'] for d in mpg) # what are the class types
vehicleclass
"""
Explanation: <br>
Use set to return the unique values for the class types in our dataset.
End of explanation
"""
HwyMpgByClass = []
for t in vehicleclass: # iterate over all the vehicle classes
summpg = 0
vclasscount = 0
for d in mpg: # iterate over all dictionaries
if d['class'] == t: # if the cylinder amount type matches,
summpg += float(d['hwy']) # add the hwy mpg
vclasscount += 1 # increment the count
HwyMpgByClass.append((t, summpg / vclasscount)) # append the tuple ('class', 'avg mpg')
HwyMpgByClass.sort(key=lambda x: x[1])
HwyMpgByClass
"""
Explanation: <br>
And here's an example of how to find the average hwy mpg for each class of vehicle in our dataset.
End of explanation
"""
import datetime as dt
import time as tm
"""
Explanation: <br>
The Python Programming Language: Dates and Times
End of explanation
"""
tm.time()
"""
Explanation: <br>
time returns the current time in seconds since the Epoch. (January 1st, 1970)
End of explanation
"""
dtnow = dt.datetime.fromtimestamp(tm.time())
dtnow
"""
Explanation: <br>
Convert the timestamp to datetime.
End of explanation
"""
dtnow.year, dtnow.month, dtnow.day, dtnow.hour, dtnow.minute, dtnow.second # get year, month, day, etc.from a datetime
"""
Explanation: <br>
Handy datetime attributes:
End of explanation
"""
delta = dt.timedelta(days = 100) # create a timedelta of 100 days
delta
"""
Explanation: <br>
timedelta is a duration expressing the difference between two dates.
End of explanation
"""
today = dt.date.today()
today - delta # the date 100 days ago
today > today-delta # compare dates
"""
Explanation: <br>
date.today returns the current local date.
End of explanation
"""
class Person:
department = 'School of Information' #a class variable
def set_name(self, new_name): #a method
self.name = new_name
def set_location(self, new_location):
self.location = new_location
person = Person()
person.set_name('Christopher Brooks')
person.set_location('Ann Arbor, MI, USA')
print('{} live in {} and works in the department {}'.format(person.name, person.location, person.department))
"""
Explanation: <br>
The Python Programming Language: Objects and map()
<br>
An example of a class in python:
End of explanation
"""
store1 = [10.00, 11.00, 12.34, 2.34]
store2 = [9.00, 11.10, 12.34, 2.01]
cheapest = map(min, store1, store2)
cheapest
"""
Explanation: <br>
Here's an example of mapping the min function between two lists.
End of explanation
"""
for item in cheapest:
print(item)
"""
Explanation: <br>
Now let's iterate through the map object to see the values.
End of explanation
"""
my_function = lambda a, b, c : a + b
my_function(1, 2, 3)
"""
Explanation: <br>
The Python Programming Language: Lambda and List Comprehensions
<br>
Here's an example of lambda that takes in three parameters and adds the first two.
End of explanation
"""
my_list = []
for number in range(0, 1000):
if number % 2 == 0:
my_list.append(number)
my_list
"""
Explanation: <br>
Let's iterate from 0 to 999 and return the even numbers.
End of explanation
"""
my_list = [number for number in range(0,1000) if number % 2 == 0]
my_list
"""
Explanation: <br>
Now the same thing but with list comprehension.
End of explanation
"""
import numpy as np
"""
Explanation: <br>
The Python Programming Language: Numerical Python (NumPy)
End of explanation
"""
mylist = [1, 2, 3]
x = np.array(mylist)
x
"""
Explanation: <br>
Creating Arrays
Create a list and convert it to a numpy array
End of explanation
"""
y = np.array([4, 5, 6])
y
"""
Explanation: <br>
Or just pass in a list directly
End of explanation
"""
m = np.array([[7, 8, 9], [10, 11, 12]])
m
"""
Explanation: <br>
Pass in a list of lists to create a multidimensional array.
End of explanation
"""
m.shape
"""
Explanation: <br>
Use the shape method to find the dimensions of the array. (rows, columns)
End of explanation
"""
n = np.arange(0, 30, 2) # start at 0 count up by 2, stop before 30
n
"""
Explanation: <br>
arange returns evenly spaced values within a given interval.
End of explanation
"""
n = n.reshape(3, 5) # reshape array to be 3x5
n
"""
Explanation: <br>
reshape returns an array with the same data with a new shape.
End of explanation
"""
o = np.linspace(0, 4, 9) # return 9 evenly spaced values from 0 to 4
o
"""
Explanation: <br>
linspace returns evenly spaced numbers over a specified interval.
End of explanation
"""
o.resize(3, 3)
o
"""
Explanation: <br>
resize changes the shape and size of array in-place.
End of explanation
"""
np.ones((3, 2))
"""
Explanation: <br>
ones returns a new array of given shape and type, filled with ones.
End of explanation
"""
np.zeros((2, 3))
"""
Explanation: <br>
zeros returns a new array of given shape and type, filled with zeros.
End of explanation
"""
np.eye(3)
"""
Explanation: <br>
eye returns a 2-D array with ones on the diagonal and zeros elsewhere.
End of explanation
"""
np.diag(y)
"""
Explanation: <br>
diag extracts a diagonal or constructs a diagonal array.
End of explanation
"""
np.array([1, 2, 3] * 3)
"""
Explanation: <br>
Create an array using repeating list (or see np.tile)
End of explanation
"""
np.repeat([1, 2, 3], 3)
"""
Explanation: <br>
Repeat elements of an array using repeat.
End of explanation
"""
p = np.ones([2, 3], int)
p
"""
Explanation: <br>
Combining Arrays
End of explanation
"""
np.vstack([p, 2*p])
"""
Explanation: <br>
Use vstack to stack arrays in sequence vertically (row wise).
End of explanation
"""
np.hstack([p, 2*p])
"""
Explanation: <br>
Use hstack to stack arrays in sequence horizontally (column wise).
End of explanation
"""
print(x + y) # elementwise addition [1 2 3] + [4 5 6] = [5 7 9]
print(x - y) # elementwise subtraction [1 2 3] - [4 5 6] = [-3 -3 -3]
print(x * y) # elementwise multiplication [1 2 3] * [4 5 6] = [4 10 18]
print(x / y) # elementwise divison [1 2 3] / [4 5 6] = [0.25 0.4 0.5]
print(x**2) # elementwise power [1 2 3] ^2 = [1 4 9]
"""
Explanation: <br>
Operations
Use +, -, *, / and ** to perform element wise addition, subtraction, multiplication, division and power.
End of explanation
"""
x.dot(y) # dot product 1*4 + 2*5 + 3*6
z = np.array([y, y**2])
print(len(z)) # number of rows of array
"""
Explanation: <br>
Dot Product:
$ \begin{bmatrix}x_1 \ x_2 \ x_3\end{bmatrix}
\cdot
\begin{bmatrix}y_1 \ y_2 \ y_3\end{bmatrix}
= x_1 y_1 + x_2 y_2 + x_3 y_3$
End of explanation
"""
z = np.array([y, y**2])
z
"""
Explanation: <br>
Let's look at transposing arrays. Transposing permutes the dimensions of the array.
End of explanation
"""
z.shape
"""
Explanation: <br>
The shape of array z is (2,3) before transposing.
End of explanation
"""
z.T
"""
Explanation: <br>
Use .T to get the transpose.
End of explanation
"""
z.T.shape
"""
Explanation: <br>
The number of rows has swapped with the number of columns.
End of explanation
"""
z.dtype
"""
Explanation: <br>
Use .dtype to see the data type of the elements in the array.
End of explanation
"""
z = z.astype('f')
z.dtype
"""
Explanation: <br>
Use .astype to cast to a specific type.
End of explanation
"""
a = np.array([-4, -2, 1, 3, 5])
a.sum()
a.max()
a.min()
a.mean()
a.std()
"""
Explanation: <br>
Math Functions
Numpy has many built in math functions that can be performed on arrays.
End of explanation
"""
a.argmax()
a.argmin()
"""
Explanation: <br>
argmax and argmin return the index of the maximum and minimum values in the array.
End of explanation
"""
s = np.arange(13)**2
s
"""
Explanation: <br>
Indexing / Slicing
End of explanation
"""
s[0], s[4], s[-1]
"""
Explanation: <br>
Use bracket notation to get the value at a specific index. Remember that indexing starts at 0.
End of explanation
"""
s[1:5]
"""
Explanation: <br>
Use : to indicate a range. array[start:stop]
Leaving start or stop empty will default to the beginning/end of the array.
End of explanation
"""
s[-4:]
"""
Explanation: <br>
Use negatives to count from the back.
End of explanation
"""
s[-5::-2]
"""
Explanation: <br>
A second : can be used to indicate step-size. array[start:stop:stepsize]
Here we are starting 5th element from the end, and counting backwards by 2 until the beginning of the array is reached.
End of explanation
"""
r = np.arange(36)
r.resize((6, 6))
r
"""
Explanation: <br>
Let's look at a multidimensional array.
End of explanation
"""
r[2, 2]
"""
Explanation: <br>
Use bracket notation to slice: array[row, column]
End of explanation
"""
r[3, 3:6]
"""
Explanation: <br>
And use : to select a range of rows or columns
End of explanation
"""
r[:2, :-1]
"""
Explanation: <br>
Here we are selecting all the rows up to (and not including) row 2, and all the columns up to (and not including) the last column.
End of explanation
"""
r[-1, ::2]
"""
Explanation: <br>
This is a slice of the last row, and only every other element.
End of explanation
"""
r[r > 30]
"""
Explanation: <br>
We can also perform conditional indexing. Here we are selecting values from the array that are greater than 30. (Also see np.where)
End of explanation
"""
r[r > 30] = 30
r
"""
Explanation: <br>
Here we are assigning all values in the array that are greater than 30 to the value of 30.
End of explanation
"""
r2 = r[:3,:3]
r2
"""
Explanation: <br>
Copying Data
Be careful with copying and modifying arrays in NumPy!
r2 is a slice of r
End of explanation
"""
r2[:] = 0
r2
"""
Explanation: <br>
Set this slice's values to zero ([:] selects the entire array)
End of explanation
"""
r
"""
Explanation: <br>
r has also been changed!
End of explanation
"""
r_copy = r.copy()
r_copy
"""
Explanation: <br>
To avoid this, use r.copy to create a copy that will not affect the original array
End of explanation
"""
r_copy[:] = 10
print(r_copy, '\n')
print(r)
"""
Explanation: <br>
Now when r_copy is modified, r will not be changed.
End of explanation
"""
test = np.random.randint(0, 10, (4,3))
test
"""
Explanation: <br>
Iterating Over Arrays
Let's create a new 4 by 3 array of random numbers 0-9.
End of explanation
"""
for row in test:
print(row)
"""
Explanation: <br>
Iterate by row:
End of explanation
"""
for i in range(len(test)):
print(test[i])
"""
Explanation: <br>
Iterate by index:
End of explanation
"""
for i, row in enumerate(test):
print('row', i, 'is', row)
"""
Explanation: <br>
Iterate by row and index:
End of explanation
"""
test2 = test**2
test2
for i, j in zip(test, test2):
print(i,'+',j,'=',i+j)
"""
Explanation: <br>
Use zip to iterate over multiple iterables.
End of explanation
"""
|
mari-linhares/tensorflow-workshop | code_samples/estimators-for-free/estimators_for_free.ipynb | apache-2.0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# our model
import model as m
# tensorflow
import tensorflow as tf
print(tf.__version__) #tested with tf v1.2
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.python.estimator.inputs import numpy_io
# MNIST data
from tensorflow.examples.tutorials.mnist import input_data
# Numpy
import numpy as np
# Enable TensorFlow logs
tf.logging.set_verbosity(tf.logging.INFO)
"""
Explanation: Before start: make sure you deleted the output_dir folder from this path
Some things we get for free by using Estimators
Estimators are a high level abstraction (Interface) that supports all the basic operations you need to support a ML model on top of TensorFlow.
Estimators:
* provide a simple interface for users of canned model architectures: Training, evaluation, prediction, export for serving.
* provide a standard interface for model developers
* drastically reduces the amount of user code required. This avoids bugs and speeds up development significantly.
* enable building production services against a standard interface.
* using experiments abstraction give you free data-parallelism (more here)
In the Estimator's interface includes: Training, evaluation, prediction, export for serving.
Image from Effective TensorFlow for Non-Experts (Google I/O '17)
You can use a already implemented estimator (canned estimator) or implement your own (custom estimator).
This tutorial is not focused on how to build your own estimator, we're using a custom estimator that implements a CNN classifier for MNIST dataset defined in the model.py file, but we're not going into details about how that's implemented.
Here we're going to show how Estimators make your life easier, once you have a estimator model is very simple to change your model and compare results.
Having a look at the code and running the experiment
Dependencies
End of explanation
"""
# Import the MNIST dataset
mnist = input_data.read_data_sets("/tmp/MNIST/", one_hot=True)
x_train = np.reshape(mnist.train.images, (-1, 28, 28, 1))
y_train = mnist.train.labels
x_test = np.reshape(mnist.test.images, (-1, 28, 28, 1))
y_test = mnist.test.labels
"""
Explanation: Getting the data
We're not going into details here
End of explanation
"""
BATCH_SIZE = 128
x_train_dict = {'x': x_train }
train_input_fn = numpy_io.numpy_input_fn(
x_train_dict, y_train, batch_size=BATCH_SIZE,
shuffle=True, num_epochs=None,
queue_capacity=1000, num_threads=4)
x_test_dict = {'x': x_test }
test_input_fn = numpy_io.numpy_input_fn(
x_test_dict, y_test, batch_size=BATCH_SIZE, shuffle=False, num_epochs=1)
"""
Explanation: Defining the input function
If we look at the image above we can see that there're two main parts in the diagram, a input function interacting with data files and the Estimator interacting with the input function and checkpoints.
This means that the estimator doesn't know about data files, it knows about input functions. So if we want to interact with a data set we need to creat an input function that interacts with it, in this example we are creating a input function for the train and test data set.
You can learn more about input functions here
End of explanation
"""
# parameters
LEARNING_RATE = 0.01
STEPS = 1000
# create experiment
def generate_experiment_fn():
def _experiment_fn(run_config, hparams):
del hparams # unused, required by signature.
# create estimator
model_params = {"learning_rate": LEARNING_RATE}
estimator = tf.estimator.Estimator(model_fn=m.get_model(),
params=model_params,
config=run_config)
train_input = train_input_fn
test_input = test_input_fn
return tf.contrib.learn.Experiment(
estimator,
train_input_fn=train_input,
eval_input_fn=test_input,
train_steps=STEPS
)
return _experiment_fn
"""
Explanation: Creating an experiment
After an experiment is created (by passing an Estimator and inputs for training and evaluation), an Experiment instance knows how to invoke training and eval loops in a sensible fashion for distributed training. More about it here
End of explanation
"""
OUTPUT_DIR = 'output_dir/model1'
learn_runner.run(generate_experiment_fn(), run_config=tf.contrib.learn.RunConfig(model_dir=OUTPUT_DIR))
"""
Explanation: Run the experiment
End of explanation
"""
STEPS = STEPS + 1000
learn_runner.run(generate_experiment_fn(), run_config=tf.contrib.learn.RunConfig(model_dir=OUTPUT_DIR))
"""
Explanation: Running a second time
Okay, the model is definitely not good... But, check OUTPUT_DIR path, you'll see that a output_dir folder was created and that there are a lot of files there that were created automatically by TensorFlow!
So, most of these files are actually checkpoints, this means that if we run the experiment again with the same model_dir it will just load the checkpoint and start from there instead of starting all over again!
This means that:
If we have a problem while training you can just restore from where you stopped instead of start all over again
If we didn't train enough we can just continue to train
If you have a big file you can just break it into small files and train for a while with each small file and the model will continue from where it stopped at each time :)
This is all true as long as you use the same model_dir!
So, let's run again the experiment for more 1000 steps to see if we can improve the accuracy. So, notice that the first step in this run will actually be the step 1001. So, we need to change the number of steps to 2000 (otherwhise the experiment will find the checkpoint and will think it already finished training)
End of explanation
"""
LEARNING_RATE = 0.05
OUTPUT_DIR = 'output_dir/model2'
learn_runner.run(generate_experiment_fn(), run_config=tf.contrib.learn.RunConfig(model_dir=OUTPUT_DIR))
"""
Explanation: Tensorboard
Another thing we get for free is tensorboard.
If you run: tensorboard --logdir=OUTPUT_DIR
You'll see that we get the graph and some scalars, also if you use an embedding layer you'll get an embedding visualization in tensorboard as well!
So, we can make small changes and we'll have an easy (and totally for free) way to compare the models.
Let's make these changes:
1. change the learning rate to 0.05
2. change the OUTPUT_DIR to some path in output_dir/
The 2. is must be inside output_dir/ because we can run: tensorboard --logdir=output_dir/
And we'll get both models visualized at the same time in tensorboard.
You'll notice that the model will start from step 1, because there's no existing checkpoint in this path.
End of explanation
"""
|
jhprinz/openpathsampling | examples/alanine_dipeptide_mstis/AD_mstis_4_analysis.ipynb | lgpl-2.1 | %matplotlib inline
import matplotlib.pyplot as plt
import openpathsampling as paths
import numpy as np
"""
Explanation: Analyzing the MSTIS simulation
Included in this notebook:
Opening files for analysis
Rates, fluxes, total crossing probabilities, and condition transition probabilities
Per-ensemble properties such as path length distributions and interface crossing probabilities
Move scheme analysis
Replica exchange analysis
Replica move history tree visualization
Replaying the simulation
MORE TO COME! Like free energy projections, path density plots, and more
End of explanation
"""
%%time
storage = paths.AnalysisStorage("ala_mstis_production.nc")
print "PathMovers:", len(storage.pathmovers)
print "Engines:", len(storage.engines)
print "Samples:", len(storage.samples)
print "Ensembles:", len(storage.ensembles)
print "SampleSets:", len(storage.samplesets)
print "Snapshots:", len(storage.snapshots)
print "Trajectories:", len(storage.trajectories)
print "Networks:", len(storage.networks)
%%time
mstis = storage.networks[0]
%%time
for cv in storage.cvs:
print cv.name, cv._store_dict
"""
Explanation: The optimum way to use storage depends on whether you're doing production or analysis. For analysis, you should open the file as an AnalysisStorage object. This makes the analysis much faster.
End of explanation
"""
mstis.hist_args['max_lambda'] = { 'bin_width' : 2, 'bin_range' : (0.0, 90) }
mstis.hist_args['pathlength'] = { 'bin_width' : 5, 'bin_range' : (0, 100) }
%%time
mstis.rate_matrix(storage.steps, force=True)
"""
Explanation: Reaction rates
TIS methods are especially good at determining reaction rates, and OPS makes it extremely easy to obtain the rate from a TIS network.
Note that, although you can get the rate directly, it is very important to look at other results of the sampling (illustrated in this notebook and in notebooks referred to herein) in order to check the validity of the rates you obtain.
By default, the built-in analysis calculates histograms the maximum value of some order parameter and the pathlength of every sampled ensemble. You can add other things to this list as well, but you must always specify histogram parameters for these two. The pathlength is in units of frames.
End of explanation
"""
stateA = storage.volumes["A0"]
stateB = storage.volumes["B0"]
stateC = storage.volumes["C0"]
tcp_AB = mstis.transitions[(stateA, stateB)].tcp
tcp_AC = mstis.transitions[(stateA, stateC)].tcp
tcp_BC = mstis.transitions[(stateB, stateC)].tcp
tcp_BA = mstis.transitions[(stateB, stateA)].tcp
tcp_CA = mstis.transitions[(stateC, stateA)].tcp
tcp_CB = mstis.transitions[(stateC, stateB)].tcp
plt.plot(tcp_AB.x, tcp_AB)
plt.plot(tcp_CA.x, tcp_CA)
plt.plot(tcp_BC.x, tcp_BC)
plt.plot(tcp_AC.x, tcp_AC) # same as tcp_AB in MSTIS
"""
Explanation: The self-rates (the rate of returning the to initial state) are undefined, and return not-a-number.
The rate is calcuated according to the formula:
$$k_{AB} = \phi_{A,0} P(B|\lambda_m) \prod_{i=0}^{m-1} P(\lambda_{i+1} | \lambda_i)$$
where $\phi_{A,0}$ is the flux from state A through its innermost interface, $P(B|\lambda_m)$ is the conditional transition probability (the probability that a path which crosses the interface at $\lambda_m$ ends in state B), and $\prod_{i=0}^{m-1} P(\lambda_{i+1} | \lambda_i)$ is the total crossing probability. We can look at each of these terms individually.
Total crossing probability
End of explanation
"""
plt.plot(tcp_AB.x, np.log(tcp_AB))
plt.plot(tcp_CA.x, np.log(tcp_CA))
plt.plot(tcp_BC.x, np.log(tcp_BC))
"""
Explanation: We normally look at these on a log scale:
End of explanation
"""
import pandas as pd
flux_matrix = pd.DataFrame(columns=mstis.states, index=mstis.states)
for state_pair in mstis.transitions:
transition = mstis.transitions[state_pair]
flux_matrix.set_value(state_pair[0], state_pair[1], transition._flux)
flux_matrix
"""
Explanation: Flux
Here we also calculate the flux contribution to each transition. The flux is calculated based on
End of explanation
"""
outer_ctp_matrix = pd.DataFrame(columns=mstis.states, index=mstis.states)
for state_pair in mstis.transitions:
transition = mstis.transitions[state_pair]
outer_ctp_matrix.set_value(state_pair[0], state_pair[1], transition.ctp[transition.ensembles[-1]])
outer_ctp_matrix
ctp_by_interface = pd.DataFrame(index=mstis.transitions)
for state_pair in mstis.transitions:
transition = mstis.transitions[state_pair]
for ensemble_i in range(len(transition.ensembles)):
ctp_by_interface.set_value(
state_pair, ensemble_i,
transition.conditional_transition_probability(
storage.steps,
transition.ensembles[ensemble_i]
))
ctp_by_interface
"""
Explanation: Conditional transition probability
End of explanation
"""
hists_A = mstis.transitions[(stateA, stateB)].histograms
hists_B = mstis.transitions[(stateB, stateC)].histograms
hists_C = mstis.transitions[(stateC, stateB)].histograms
"""
Explanation: Path ensemble properties
End of explanation
"""
for hist in [hists_A, hists_B, hists_C]:
for ens in hist['max_lambda']:
normalized = hist['max_lambda'][ens].normalized()
plt.plot(normalized.x, normalized)
# add visualization of the sum
for hist in [hists_A, hists_B, hists_C]:
for ens in hist['max_lambda']:
reverse_cumulative = hist['max_lambda'][ens].reverse_cumulative()
plt.plot(reverse_cumulative.x, reverse_cumulative)
for hist in [hists_A, hists_B, hists_C]:
for ens in hist['max_lambda']:
reverse_cumulative = hist['max_lambda'][ens].reverse_cumulative()
plt.plot(reverse_cumulative.x, np.log(reverse_cumulative))
"""
Explanation: Interface crossing probabilities
We obtain the total crossing probability, shown above, by combining the individual crossing probabilities of
End of explanation
"""
for hist in [hists_A, hists_B, hists_C]:
for ens in hist['pathlength']:
normalized = hist['pathlength'][ens].normalized()
plt.plot(normalized.x, normalized)
for ens in hists_A['pathlength']:
normalized = hists_A['pathlength'][ens].normalized()
plt.plot(normalized.x, normalized)
"""
Explanation: Path length histograms
End of explanation
"""
scheme = storage.schemes[0]
scheme.move_summary(storage.steps)
scheme.move_summary(storage.steps, 'shooting')
scheme.move_summary(storage.steps, 'minus')
scheme.move_summary(storage.steps, 'repex')
scheme.move_summary(storage.steps, 'pathreversal')
"""
Explanation: Sampling properties
The properties we illustrated above were properties of the path ensembles. If your path ensembles are sufficiently well-sampled, these will never depend on how you sample them.
But to figure out whether you've done a good job of sampling, you often want to look at properties related to the sampling process. OPS also makes these very easy.
Move scheme analysis
End of explanation
"""
repx_net = paths.ReplicaNetwork(scheme, storage.steps)
"""
Explanation: Replica exchange sampling
See the notebook repex_networks.ipynb for more details on tools to study the convergence of replica exchange. However, a few simple examples are shown here. All of these are analyzed with a separate object, ReplicaNetwork.
End of explanation
"""
repx_net.mixing_matrix()
"""
Explanation: Replica exchange mixing matrix
End of explanation
"""
repxG = paths.ReplicaNetworkGraph(repx_net)
repxG.draw('spring')
"""
Explanation: Replica exchange graph
The mixing matrix tells a story of how well various interfaces are connected to other interfaces. The replica exchange graph is essentially a visualization of the mixing matrix (actually, of the transition matrix -- the mixing matrix is a symmetrized version of the transition matrix).
Note: We're still developing better layout tools to visualize these.
End of explanation
"""
import openpathsampling.visualize as vis
reload(vis)
from IPython.display import SVG
tree = vis.PathTree(
[step for step in storage.steps if not isinstance(step.change, paths.EmptyPathMoveChange)],
vis.ReplicaEvolution(replica=3, accepted=False)
)
tree.options.css['width'] = 'inherit'
SVG(tree.svg())
decorrelated = tree.generator.decorrelated
print "We have " + str(len(decorrelated)) + " decorrelated trajectories."
"""
Explanation: Replica exchange flow
Replica flow is defined as TODO
Flow is designed for calculations where the replica exchange graph is linear, which ours clearly is not. However, we can define the flow over a subset of the interfaces.
Replica move history tree
End of explanation
"""
|
KshitijT/fundamentals_of_interferometry | 1_Radio_Science/1_5_black_body_radiation.ipynb | gpl-2.0 | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
"""
Explanation: Outline
Glossary
1. Radio Science using Interferometric Arrays
Previous: 1.4 Radio regime
Next: 1.6 Synchrotron emission
Section status: <span style="background-color:yellow"> </span>
Import standard modules:
End of explanation
"""
import scipy.constants
from IPython.display import Image
HTML('../style/code_toggle.html')
"""
Explanation: Import section specific modules:
End of explanation
"""
def B_v(T):
'''Function to calculate specific intensity/brightness distribution of black
body radiation at a given temperature. T is in Kelvins and frequency
is in Hertz'''
#Use Wien's displacement law to find the frequency range for given Temperature.
# nu_max(in GHz) = 58.789*T(in Kelvin)
nu_max = 58.79 * T
freq = np.arange(1,2000,1)
freq = freq * nu_max / 500.0 # Scale so that the the peak value occures at 1/4 the length of the array.
B = (2*scipy.constants.h/scipy.constants.c**2.0) * np.exp(-1. * scipy.constants.h * \
freq * 10**9/(scipy.constants.k*T)) * (freq*10**9)**3
#Change units for plotting to Jansky
# 1 Jansky = 10^-26 W/m^2 Hz
B = B * 10**26
return B
T = 2.275 #In Kelvin
freq = np.arange(1,2000,1)
freq = freq*58.79*T/500.0
B_CMB = B_v(T)
fig,ax = plt.subplots()
ax.plot(freq,B_CMB)
ax.set_xlabel("Frequency (GHz)")
ax.set_ylabel("Specific Intensity ($Jy/sr$)")
ax.set_title("Blackbody Spectral Distribution")
"""
Explanation: 1.5 Thermal Emission:
There are two main types of continuum emission mechanisms we'll encounter in radio astronomy, thermal and non-thermal. As the names suggest, the thermal emission depends on the temperature of the black body while non-thermal emission depends on some other properties of the emitting body. In the next few sections, we'll have an overview of the emission mechanisms and some examples of each in radio astronomy.
We'll begin the overview with an example of one of the mechanisms of thermal emission, blackbody emission.
1.5.1 Blackbody Emission:<a id='science:sec:blackbody_emission'></a>
Blackbody emission or thermal emission is an important form of radiation in astronomy. This is in the cases when Thermodynamic Equilibrium exists between radiation and matter - where radiation and matter interact enough to produce the following expression for the specific intensity to be:
\begin{equation}
B_{\nu}(T)=\frac{2h\nu^{3}}{c^{2}} \frac{1}{e^{\frac{h\nu}{kT}}-1},
\end{equation}
this is also known as the Planck Spectrum.
From the equation above, it can be inferred that in a given frequency interval, the brightness depends only on the Temperature of the body. Also, one can see that a body at a higher temperature will produce higher brightness at all frequencies - which means that a body at a higher temperature has higher energy output via thermal emission than that of a body at a lower temperature. Yet another consequence is that the frequency at which maximum brightness is produced is directly proportional to the temperature - this is known as Wien's displacement law:
$$\nu_{max} = 58.789 T $$
where frequency is in GHz and temperature is in Kelvin. See, for example, Fig. 1.5.2 ⤵, where the black body spectrum for various temperatures is plotted. The displacement of the frequency at which the peak occurs can be seen clearly.
At high frequencies, i.e. where $h\nu >> kT$, this will reduce to Wien's approximation:
$$B_{\nu}(T)= \frac{2h\nu^{3}}{c^{2}} e^{\frac{-h\nu}{kT}}$$
However, in radio astronomy, a more typical situation is when the frequency of the radiation is so low that $h\nu << kT$. In this case the Planck formula can be approximated by the Rayleigh-Jeans formula:
$$B_{\nu}(T)= \frac{2\nu^{2}}{c^{2}} kT $$
End of explanation
"""
T = 4 #in Kelvin
freq = np.arange(1,2000,1)
freq = freq*58.79*T/500.0
B_2 = B_v(T)
B_3 = B_v(5)
fig,ax = plt.subplots()
ax.plot(freq,B_CMB, label='2.75 K')
ax.plot(freq,B_2, label='4.0 K')
ax.plot(freq,B_3, label='5.0 K')
ax.legend()
ax.set_xlabel("Frequency (GHz)")
ax.set_ylabel("Specific Intensity ($Jy/sr$)")
ax.set_title("Blackbody Spectral Distribution")
"""
Explanation: Figure 1.5.1: Blackbody radiation spectral distribution for Cosmic Microwave Background . <a id='science:fig:bb_distr_cmb'></a>
<span style="background-color:yellow"> LB:IC: consider plotting the Wiens and Rayleigh-Jeans approximation on the same figure. Maybe a slider to vary the temparature?</span>
The above distribution is for a temperature of 2.725 K, approximately the same as that of the Cosmic Microwave Background. If T (the temperature) in the above script is changed to $5700$ K, we would get a plot for the emission from the Sun.
End of explanation
"""
Image(filename='figures/bremsstrahlung_fig.png', width=300)
"""
Explanation: Figure 1.5.2: Blackbody radiation spectral distribution. <a id='science:fig:bb_distr'></a>
As mentioned above, the brightness of the black body radiation depends only on the temperature of the body. This can be used to define a 'brightness temperature' for an object. Simply put, the brightness temperature for an object is given by:
$$T_{b} = I_{\nu} \frac{c^{2}}{2k\nu^{2}}$$
It should be noted that the actual specific intensity of an object is used here even when it does not represent black body emission i.e. the brightness temperature is simply the temperature which the object would have if the specific intensity or brightness was from black body emission. This allows characterization of any object in terms of a single parameter. This idea is used widely in radio astronomy and will be encountered in next chapters of this book.
1.5.2 Thermal Bremsstrahlung Emission :<a id='science:sec:bremsstrahlung_emission'></a>
Thermal Bremsstrahlung radiation is another example of thermal radiation. The word Bremsstrahlung is German, meaning 'braking radiation'. This is because the origin of bremsstrahlung radiation is in accelerating (non-relativistic) particles in electric field. The 'thermal' designation refers to the fact that we are dealing with a collection of particles which are in local thermodynamic equilibrium.
<p class=prerequisites>
<font size=4> <b>Prerequisites</b></font>
<br>
<br>
• <b>Larmor's formula <br><br>
</p>
<span style="background-color:cyan"> LB:RF: Do we derive Larmor's formula somewhere? If so there should be a link to it. Otherwise add link to online resource. </span>
To understand the Bremsstrahlung emission from a collection of charged particles (which is what we see in any astronomical scenario), we need to know first the case of a single accelerating charged particle in an electric field. For such a particle the total power emitted is, according to Larmor's formula, given by:
$$P= \frac{2}{3}\frac{q^{2}\mathbf{a}^{2}}{c^{3}}$$
where $\mathbf{a}$ is the acceleration of the charged particle, q is its charge and c is the speed of light.
End of explanation
"""
Image(filename='figures/orion_HII.png', width=300)
"""
Explanation: Figure 1.5.3: Illustration of symbols in Larmor's formula.
<span style="background-color:yellow"> LB:IC: I had to come up with a caption here but I'm sure we can do better. Give a short description of what the figure is supposed to convey</span>
The acceleration experienced by the charged particle can be estimated by remembering that
$$\mathbf{a} = \frac{\mathbf{F}}{m} = Qq\frac{\mathbf{r}}{mr^3}$$
where Q is the charge of the particle generating the electric field (see figure below). From this, it is apparent that the best emitters of power using this mechanism are the least massive charged particles i.e. electrons. Since the protons have a much larger mass (by a factor of 1000) the total power they emit is negligible compared to the power emitted by electrons in the same electric field. As a result an ion-electron system contributes the most to the total power emitted by the plasma. Note, in particular, that the electrons are not captured by the ions in this system, which is the reason why this type of emission is sometimes called free-free emission.
For radio frequencies, only the perpendicular (to the path) component of the acceleration is important. The emission of the energy from this interaction happens in a pulse, corresponding to the time $t = b/v$, where $v$ is the velocity of the particle. The perpendicular acceleration is:
$$a_{\perp} = \frac{Qq\,cos^{3}\theta}{m_{e}b^2}$$
For example, an electron orbiting an ion with atomic number Z will have a perpendicular acceleration given by
$$a_{\perp} = \frac{Ze^{2}\,cos^{3}\theta}{m_{e}b^2}$$
Plugging this into Larmor's formula gives a total power of
$$P = \frac{2}{3}\frac{Z^2e^{6}cos^{6}\theta}{m_{e}^2 \,b^{4}c^{3}} $$
This is the instantaneous power emitted by the electron through bremsstrahlung radiation. Since the power is emitted in a pulse of period $t \sim b/v$, the emission has a characteristic frequency $\omega_{c}\sim v/b$ below which the power spectrum is flat and above which it falls rapidly. The total energy emitted is given by:
$$E = \int P dt = \frac{2}{3}\frac{Z^2e^{6}}{m_{e}^2 \,b^{4}c^{3}}\int \cos^{6}\theta(t)\,dt $$
The integration over $\theta$ can be carried out by noting that $v = \frac{dx}{dt} dx/dt$ where $x = b\, \tan\theta$ giving:
$$E = \frac{\pi}{4}\frac{Z^2e^{6}}{m_{e}^2 \,b^{3}c^{3}v} $$
For a collection of particles, the combined spectrum of the bremsstrahlung emission would thus consist of individual contributions, essentially boiling down to integrations over the parameters $v$ and $b$ (assuming, of course, that the collection of particles is more or less homogeneous). A typical scenario is that of ionized clouds, say HII regions, since Hydrogen is one of the most abundant elements in the universe. HII regions are typically formed around young, bright stars.
For such regions, we can use a Maxwell distribution function for the velocities
$$f(v) = \frac{4v^{2}}{\sqrt \pi} (\frac{m}{2kT})^{3/2}\,{\rm exp}(-\frac{mv^{2}}{2kT})$$
Using these equations, it is possible to express the emissivity as
$$\epsilon_{\nu} = \frac{8}{3}\frac{Z^{2}e^{6}}{c^{3}}\frac{N_{i}N_{e}}{m^{2}}(\frac{2m}{\pi \,kT})^{1/2} {\rm ln}\frac{b_{2}}{b_{1}}$$
End of explanation
"""
|
tkzeng/molecular-design-toolkit | moldesign/_notebooks/Example 5. Enthalpic barriers.ipynb | apache-2.0 | import moldesign as mdt
from moldesign import units as u
%matplotlib notebook
from matplotlib.pyplot import *
try: import seaborn # optional, makes graphs look better
except ImportError: pass
u.default.energy = u.kcalpermol # use kcal/mol for energy
"""
Explanation: <span style="float:right">
<a href="http://moldesign.bionano.autodesk.com/" target="_blank" title="About">About</a>
<a href="https://forum.bionano.autodesk.com/c/Molecular-Design-Toolkit" target="_blank" title="Forum">Forum</a>
<a href="https://github.com/autodesk/molecular-design-toolkit/issues" target="_blank" title="Issues">Issues</a>
<a href="http://bionano.autodesk.com/MolecularDesignToolkit/explore.html" target="_blank" title="Tutorials">Tutorials</a>
<a href="http://autodesk.github.io/molecular-design-toolkit/" target="_blank" title="Documentation">Documentation</a></span>
</span>
<br>
<center><h1>Example 5: Calculating torsional barriers with relaxation </h1> </center>
This workflow calculates the enthalpic barrier of a small alkane.
Author: Aaron Virshup, Autodesk Research<br>
Created on: September 23, 2016
Tags: reaction path, constrained minimization, torsion, enthalpic
End of explanation
"""
mol = mdt.from_smiles('CCCC')
mol.draw()
mol.set_energy_model(mdt.models.GAFF)
mol.energy_model.configure()
minimization = mol.minimize(nsteps=40)
minimization.draw()
"""
Explanation: Contents
I. Create and minimize the molecule
II. Select the torsional bond
III. Rigid rotation scan
IV. Relaxed rotation scan
V. Plot the potential energy surfaces
VI. Investigate conformational changes
I. Create and minimize the molecule
End of explanation
"""
bs = mdt.widgets.BondSelector(mol)
bs
twist = mdt.DihedralMonitor(bs.selected_bonds[0])
twist
"""
Explanation: II. Select the torsional bond
Next, we use the BondSelector to pick the bond that we'll rotate around.
End of explanation
"""
angles = np.arange(-150, 210, 5) * u.degree
rigid = mdt.Trajectory(mol)
for angle in angles:
twist.value = angle
mol.calculate()
rigid.new_frame(annotation='angle: %s, energy: %s' % (twist.value.to(u.degrees), mol.potential_energy))
rigid.draw()
figure()
plot(angles, rigid.potential_energy)
xlabel(u'dihedral / º'); ylabel('energy / kcal/mol')
xticks(np.arange(-120,211,30))
"""
Explanation: III. Rigid rotation scan
First, we'll perform a simple energy scan, simply by rotating around the bond and calculating the energy at each point.
This gives us only an upper bound on the enthalpic rotation barrier. This is because we keep the molecule rigid, except for the single rotating bond.
End of explanation
"""
constraint = twist.constrain()
relaxed = mdt.Trajectory(mol)
for angle in angles:
print angle,':',
#add random noise to break symmetry
mol.positions += np.random.random(mol.positions.shape) * 0.01*u.angstrom
mol.positions -= mol.center_of_mass
twist.value = angle
constraint.value = angle
t = mol.minimize(nsteps=100)
relaxed.new_frame(annotation='angle: %s, energy: %s' % (twist.value.to(u.degrees), mol.potential_energy))
relaxed.draw()
"""
Explanation: IV. Relaxed rotation scan
Next, we'll get the right barrier (up to the accuracy of the energy model).
Here, we'll rotate around the bond, but then perform a constrained minimization at each rotation point. This will allow all other degrees of freedom to relax, thus finding lower energies at each point along the path.
Note: In order to break any spurious symmetries, this loop also adds a little bit of random noise to each structure before performing the minimization.
End of explanation
"""
figure()
plot(angles, rigid.potential_energy, label='rigid')
plot(angles, relaxed.potential_energy, label='relaxed')
plot(angles, abs(rigid.potential_energy - relaxed.potential_energy), label='error')
xlabel(u'dihedral / º'); ylabel('energy / kcal/mol'); legend()
xticks(np.arange(-120,211,30))
"""
Explanation: V. Plot the potential energy surfaces
If you plotted butane's rotation around its central bond, you'll see three stable points: two at about ±60º (the gauche conformations), and one at 180º (the anti conformation).
You will likely see a large differences in the energetics of the relaxed and rigid scans; depending on the exact starting conformation, the rigid scan can overestimate the rotation barrier by as much as 5 kcal/mol!
End of explanation
"""
from ipywidgets import interact_manual
bs = mdt.widgets.BondSelector(mol)
def show_dihedral():
figure()
for bond in bs.selected_bonds:
dihemon = mdt.DihedralMonitor(bond)
plot(angles, dihemon(relaxed).to(u.degrees), label=str(bond))
legend(); xlabel(u'central twist / º'); ylabel(u'bond twist / º')
interact_manual(show_dihedral)
bs
"""
Explanation: VI. Investigate conformational changes
This cell illustrates a simple interactive "app" - select the bonds you're interested in, then click the "show_dihedral" button to show their relaxed angles as a function of the central twist during the relaxed scan.
End of explanation
"""
|
sdpython/pyquickhelper | _doc/notebooks/having_a_form_in_a_notebook.ipynb | mit | from jyquickhelper import add_notebook_menu
add_notebook_menu()
"""
Explanation: Having a form in a notebook
Forms in a notebook without storing the values in it, animation with pyquickhelper and matplotlib.
End of explanation
"""
from pyquickhelper.ipythonhelper import open_html_form
params = {"module":"", "version":"v..."}
open_html_form(params, "fill the fields", "form1")
form1
"""
Explanation: Form
This following trick is inspired from IPython Notebook: Javascript/Python Bi-directional Communication. The code is copy pasted below with some modifications. It was implemented in pyquickhelper.
End of explanation
"""
from pyquickhelper.ipythonhelper import open_html_form
params= {"login":"", "password":""}
open_html_form(params, "credential", "credential")
credential
"""
Explanation: With a password:
End of explanation
"""
my_address = None
def custom_action(x):
x["combined"] = x["first_name"] + " " + x["last_name"]
return str(x)
from pyquickhelper.ipythonhelper import open_html_form
params = { "first_name":"", "last_name":"" }
open_html_form (params, title="enter your name", key_save="my_address", hook="custom_action(my_address)")
my_address
"""
Explanation: To excecute an instruction when the button Ok is clicked:
End of explanation
"""
from pyquickhelper.ipythonhelper import StaticInteract, RangeWidget, RadioWidget
def show_fib(N):
sequence = ""
a, b = 0, 1
for i in range(N):
sequence += "{0} ".format(a)
a, b = b, a + b
return sequence
StaticInteract(show_fib,
N=RangeWidget(1, 100, default=10))
"""
Explanation: Animated output
This is not maintained anymore. You should use package ipywidget.
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def plot(amplitude, color):
fig, ax = plt.subplots(figsize=(4, 3),
subplot_kw={'axisbelow':True})
ax.grid(color='w', linewidth=2, linestyle='solid')
x = np.linspace(0, 10, 1000)
ax.plot(x, amplitude * np.sin(x), color=color,
lw=5, alpha=0.4)
ax.set_xlim(0, 10)
ax.set_ylim(-1.1, 1.1)
return fig
StaticInteract(plot,
amplitude=RangeWidget(0.1, 0.5, 0.1, default=0.4),
color=RadioWidget(['blue', 'green', 'red'], default='red'))
"""
Explanation: In order to have a fast display, the function show_lib is called for each possible version. If it is a graph, all possible graphs will be generated.
End of explanation
"""
from IPython.display import display
from ipywidgets import Text
last_name = Text(description="Last Name")
first_name = Text(description="First Name")
display(last_name)
display(first_name)
first_name.value, last_name.value
"""
Explanation: A form with ipywidgets
Not yet ready and the form does not show up in the converted notebook. You need to execute the notebook.
End of explanation
"""
|
OSGeo-live/CesiumWidget | GSOC/notebooks/Projects/CARTOPY/00 Using cartopy with matplotlib.ipynb | apache-2.0 | %matplotlib inline
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 12))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines();
"""
Explanation: Beautifully simple maps
Cartopy has exposed an interface to enable easy map creation using matplotlib. Creating a basic map is as simple as telling matplotlib to use a specific map projection, and then adding some coastlines to the axes:
End of explanation
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 12))
ax = plt.axes(projection=ccrs.Mollweide())
ax.stock_img();
"""
Explanation: A list of the available projections to be used with matplotlib can be found on the Cartopy projection list notebook.
The line plt.axes(projection=ccrs.PlateCarree()) sets up a GeoAxes instance which exposes a variety of other map related methods, in the case of the previous example, we used the coastlines() method to add coastlines to the map.
Lets create another map in a different projection, and make use of the stock_img() method to add an underlay image to the map:
End of explanation
"""
%matplotlib inline
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 12))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.stock_img()
ny_lon, ny_lat = -75, 43
delhi_lon, delhi_lat = 77.23, 28.61
plt.plot([ny_lon, delhi_lon], [ny_lat, delhi_lat],
color='blue', linewidth=2, marker='o',
transform=ccrs.Geodetic(),
)
plt.plot([ny_lon, delhi_lon], [ny_lat, delhi_lat],
color='gray', linestyle='--',
transform=ccrs.PlateCarree(),
)
plt.text(ny_lon - 3, ny_lat - 12, 'New York',
horizontalalignment='right',
transform=ccrs.Geodetic())
plt.text(delhi_lon + 3, delhi_lat - 12, 'Delhi',
horizontalalignment='left',
transform=ccrs.Geodetic());
"""
Explanation: Adding data to the map
Once you have the map just the way you want it, data can be added to it in exactly the same way as with normal matplotlib axes. By default, the coordinate system of any data added to a GeoAxes is the same as the coordinate system of the GeoAxes itself, to control which coordinate system that the given data is in, you can add the transform keyword with an appropriate cartopy.crs.CRS instance:
End of explanation
"""
|
dietmarw/EK5312_ElectricalMachines | Chapman/Ch4-Problem_4-03.ipynb | unlicense | %pylab notebook
"""
Explanation: Excercises Electric Machinery Fundamentals
Chapter 4
Problem 4-3
End of explanation
"""
If = 5.0 # [A]
PF = 0.9
Xs = 2.5 # [Ohm]
Ra = 0.2 # [Ohm]
Zload = 24 * (cos(25/180.0 * pi) + sin(25/180.0 * pi)*1j)
P = 50e6 # [W]
Pf_w = 1.0e6 # [W]
Pcore = 1.5e6 # [W]
Pstray = 0 # [W]
n_m = 1800 # [r/min]
"""
Explanation: Description
Assume that the field current of the generator in Problem 4-2 has been adjusted to a value of $5\,A$.
(a)
What will the terminal voltage of this generator be if it is connected to a $\Delta$-connected load with an impedance of $24\,\Omega\angle 25°$?
(b)
Sketch the phasor diagram of this generator.
(c)
What is the efficiency of the generator at these conditions?
(d)
Now assume that another identical $\Delta$-connected load is to be paralleled with the first one.
What happens to the phasor diagram for the generator?
(e)
What is the new terminal voltage after the load has been added?
(f)
What must be done to restore the terminal voltage to its original value?
End of explanation
"""
Vl = 16.5e3 #[V]
ia = P / (sqrt(3) * Vl)
Ia_angle = -arccos(PF)
Ia = ia * (cos(Ia_angle) + sin(Ia_angle)*1j)
Ea = Vl / sqrt(3)
print('Ea = {:.0f} V'.format(Ea))
"""
Explanation: SOLUTION
(a)
If the field current is $5.0 A$, the open-circuit terminal voltage will be about $16,500\,V$, and the open-circuit phase voltage in the generator (and hence $E_A$ ) will be $\frac{16,500\,V}{\sqrt{3}}$ .
End of explanation
"""
Z = Zload/3
Z_angle = arctan(Z.imag/Z.real)
print('Z = {:.0f} Ω ∠{:.0f}° '.format(abs(Z), Z_angle / pi*180))
"""
Explanation: The load is $\Delta$-connected with three impedances of $24\,\Omega \angle 25^\circ$ . From the Y-$\Delta$ transform, this load is equivalent to a Y-connected load with three impedances of:
End of explanation
"""
ia = Ea / (abs(Ra + Xs*1j + Z))
print('ia = {:.0f} A'.format(ia))
Ia = ia * (cos(-Z_angle) + sin(-Z_angle)*1j)
Ia_angle = arctan(Ia.imag/Ia.real)
"""
Explanation: The resulting per-phase equivalent circuit is shown below:
<img src="figs/Problem_4-03a.jpg" width="70%">
The magnitude of the phase current flowing in this generator is:
$$I_A = \frac{E_A}{|R_A + jX_S +Z|}$$
End of explanation
"""
V_phase = ia * abs(Z)
print('V_phase = {:.0f} V'.format(V_phase))
"""
Explanation: Therefore, the magnitude of the phase voltage is:
$$V_\phi = I_AZ$$
End of explanation
"""
Vt = sqrt(3) * V_phase
print('''
Vt = {:.0f} V
============'''.format(Vt))
"""
Explanation: and the terminal voltage is:
$$V_T = \sqrt{3}V_\phi$$
End of explanation
"""
EA = V_phase + Ra*Ia + Xs*1j*Ia
EA_angle = arctan(EA.imag/EA.real)
print('EA = {:.0f} V ∠{:.1f}°'.format(abs(EA), EA_angle / pi *180))
"""
Explanation: (b)
Armature current is $I_A = 1004\,A\angle -25°$ , and the phase voltage is $V_\phi = 8032\,V\angle 0°$. Therefore, the internal generated voltage is:
$$\vec{E}A = \vec{V}\phi + R_A\vec{I}_A + jX_S\vec{I}_A$$
End of explanation
"""
Pout = 3 * V_phase * abs(Ia) * cos(Ia_angle)
print('Pout = {:.1f} MW'.format(Pout/1e6))
"""
Explanation: The resulting phasor diagram is shown below (not to scale and with some round-off errors):
<img src="figs/Problem_4-03b.jpg" width="70%">
(c)
The efficiency of the generator under these conditions can be found as follows:
$$P_\text{out} = 3 V_\phi I_A \cos{\theta}$$
End of explanation
"""
Pcu = 3 * abs(Ia)**2 * Ra
print('Pcu = {:.1f} kW'.format(Pcu/1e3))
"""
Explanation: $$P_\text{CU} = 3I^2_AR_A$$
End of explanation
"""
Pf_w = 1e6 # [W]
"""
Explanation: $P_\text{F\&W} = 1\,MW$:
End of explanation
"""
Pcore = 1.5e6 # [W]
"""
Explanation: $P_\text{core} = 1.5\,MW$:
End of explanation
"""
Pstray = 0 # [W]
"""
Explanation: $P_\text{stray} \approx 0$ (assumed):
End of explanation
"""
Pin = Pout + Pcu + Pf_w + Pcore + Pstray
print('Pin = {:.1f} MW'.format(Pin/1e6))
"""
Explanation: $$P_\text{in} = P_\text{out} + P_\text{CU} + P_\text{F\&W} + P_\text{core} + P_\text{stray}$$
End of explanation
"""
eta = Pout / Pin
print('''
η = {:.1f} %
=========='''.format(eta*100))
"""
Explanation: $$\eta = \frac{P_\text{out}}{P_\text{in}} \cdot 100\%$$
End of explanation
"""
Znew = Z * 0.5
Znew_angle = arctan(Z.imag/Z.real)
print('Z = {:.0f} Ω ∠{:.0f}° '.format(abs(Znew), Znew_angle /pi*180))
"""
Explanation: (d)
To get the basic idea of what happens, we will ignore the armature resistance for the moment. If the field current and the rotational speed of the generator are constant, then the magnitude of $E_A( = K \phi\omega)$ is constant. The quantity $jX_S \vec{I}A$ increases in length at the same angle, while the magnitude of $\vec{E}_A$ must remain constant. Therefore, $\vec{E}_A$ “swings” out along the arc of constant magnitude until the new $jX_S \vec{I}_S$ fits exactly between $\vec{V}\phi$ and $\vec{E}_A$ .
<img src="figs/Problem_4-03c.jpg" width="60%">
(e)
The new impedance per phase will be half of the old value, so
End of explanation
"""
ia = Ea / (abs(Ra + Xs*1j + Znew))
print('ia = {:.1f} A'.format(ia))
"""
Explanation: The magnitude of the phase current flowing in this generator is:
$$I_A = \frac{E_A}{|R_A + jX_S +Z_\text{new}|}$$
End of explanation
"""
V_phase = ia * abs(Znew)
print('V_phase = {:.1f} V'.format(V_phase))
"""
Explanation: Therefore, the magnitude of the phase voltage is:
$$V_\phi = I_AZ_\text{new}$$
End of explanation
"""
Vt = sqrt(3) * V_phase
print('''
Vt = {:.1f} V
=============='''.format(Vt))
"""
Explanation: and the terminal voltage is:
$$V_T = \sqrt{3}V_\phi$$
End of explanation
"""
|
ipython/ipywidgets | docs/source/examples/Widget Events.ipynb | bsd-3-clause | from __future__ import print_function
"""
Explanation: Index - Back - Next
Widget Events
Special events
End of explanation
"""
import ipywidgets as widgets
print(widgets.Button.on_click.__doc__)
"""
Explanation: The Button is not used to represent a data type. Instead the button widget is used to handle mouse clicks. The on_click method of the Button can be used to register function to be called when the button is clicked. The doc string of the on_click can be seen below.
End of explanation
"""
from IPython.display import display
button = widgets.Button(description="Click Me!")
display(button)
def on_button_clicked(b):
print("Button clicked.")
button.on_click(on_button_clicked)
"""
Explanation: Example
Since button clicks are stateless, they are transmitted from the front-end to the back-end using custom messages. By using the on_click method, a button that prints a message when it has been clicked is shown below.
End of explanation
"""
print(widgets.Widget.observe.__doc__)
"""
Explanation: Traitlet events
Widget properties are IPython traitlets and traitlets are eventful. To handle changes, the observe method of the widget can be used to register a callback. The doc string for observe can be seen below.
End of explanation
"""
int_range = widgets.IntSlider()
display(int_range)
def on_value_change(change):
print(change['new'])
int_range.observe(on_value_change, names='value')
"""
Explanation: Signatures
Mentioned in the doc string, the callback registered must have the signature handler(change) where change is a dictionary holding the information about the change.
Using this method, an example of how to output an IntSlider's value as it is changed can be seen below.
End of explanation
"""
caption = widgets.Label(value='The values of slider1 and slider2 are synchronized')
sliders1, slider2 = widgets.IntSlider(description='Slider 1'),\
widgets.IntSlider(description='Slider 2')
l = widgets.link((sliders1, 'value'), (slider2, 'value'))
display(caption, sliders1, slider2)
caption = widgets.Label(value='Changes in source values are reflected in target1')
source, target1 = widgets.IntSlider(description='Source'),\
widgets.IntSlider(description='Target 1')
dl = widgets.dlink((source, 'value'), (target1, 'value'))
display(caption, source, target1)
"""
Explanation: Linking Widgets
Often, you may want to simply link widget attributes together. Synchronization of attributes can be done in a simpler way than by using bare traitlets events.
Linking traitlets attributes in the kernel
The first method is to use the link and dlink functions from the traitlets module (these two functions are re-exported by the ipywidgets module for convenience). This only works if we are interacting with a live kernel.
End of explanation
"""
l.unlink()
dl.unlink()
"""
Explanation: Function traitlets.link and traitlets.dlink return a Link or DLink object. The link can be broken by calling the unlink method.
End of explanation
"""
caption = widgets.Label(value='The values of range1 and range2 are synchronized')
slider = widgets.IntSlider(min=-5, max=5, value=1, description='Slider')
def handle_slider_change(change):
caption.value = 'The slider value is ' + (
'negative' if change.new < 0 else 'nonnegative'
)
slider.observe(handle_slider_change, names='value')
display(caption, slider)
"""
Explanation: Registering callbacks to trait changes in the kernel
Since attributes of widgets on the Python side are traitlets, you can register handlers to the change events whenever the model gets updates from the front-end.
The handler passed to observe will be called with one change argument. The change object holds at least a type key and a name key, corresponding respectively to the type of notification and the name of the attribute that triggered the notification.
Other keys may be passed depending on the value of type. In the case where type is change, we also have the following keys:
owner : the HasTraits instance
old : the old value of the modified trait attribute
new : the new value of the modified trait attribute
name : the name of the modified trait attribute.
End of explanation
"""
caption = widgets.Label(value='The values of range1 and range2 are synchronized')
range1, range2 = widgets.IntSlider(description='Range 1'),\
widgets.IntSlider(description='Range 2')
l = widgets.jslink((range1, 'value'), (range2, 'value'))
display(caption, range1, range2)
caption = widgets.Label(value='Changes in source_range values are reflected in target_range1')
source_range, target_range1 = widgets.IntSlider(description='Source range'),\
widgets.IntSlider(description='Target range 1')
dl = widgets.jsdlink((source_range, 'value'), (target_range1, 'value'))
display(caption, source_range, target_range1)
"""
Explanation: Linking widgets attributes from the client side
When synchronizing traitlets attributes, you may experience a lag because of the latency due to the roundtrip to the server side. You can also directly link widget attributes in the browser using the link widgets, in either a unidirectional or a bidirectional fashion.
Javascript links persist when embedding widgets in html web pages without a kernel.
End of explanation
"""
# l.unlink()
# dl.unlink()
"""
Explanation: Function widgets.jslink returns a Link widget. The link can be broken by calling the unlink method.
End of explanation
"""
a = widgets.IntSlider(description="Delayed", continuous_update=False)
b = widgets.IntText(description="Delayed", continuous_update=False)
c = widgets.IntSlider(description="Continuous", continuous_update=True)
d = widgets.IntText(description="Continuous", continuous_update=True)
widgets.link((a, 'value'), (b, 'value'))
widgets.link((a, 'value'), (c, 'value'))
widgets.link((a, 'value'), (d, 'value'))
widgets.VBox([a,b,c,d])
"""
Explanation: The difference between linking in the kernel and linking in the client
Linking in the kernel means linking via python. If two sliders are linked in the kernel, when one slider is changed the browser sends a message to the kernel (python in this case) updating the changed slider, the link widget in the kernel then propagates the change to the other slider object in the kernel, and then the other slider's kernel object sends a message to the browser to update the other slider's views in the browser. If the kernel is not running (as in a static web page), then the controls will not be linked.
Linking using jslink (i.e., on the browser side) means contructing the link in Javascript. When one slider is changed, Javascript running in the browser changes the value of the other slider in the browser, without needing to communicate with the kernel at all. If the sliders are attached to kernel objects, each slider will update their kernel-side objects independently.
To see the difference between the two, go to the static version of this page in the ipywidgets documentation and try out the sliders near the bottom. The ones linked in the kernel with link and dlink are no longer linked, but the ones linked in the browser with jslink and jsdlink are still linked.
Continuous updates
Some widgets offer a choice with their continuous_update attribute between continually updating values or only updating values when a user submits the value (for example, by pressing Enter or navigating away from the control). In the next example, we see the "Delayed" controls only transmit their value after the user finishes dragging the slider or submitting the textbox. The "Continuous" controls continually transmit their values as they are changed. Try typing a two-digit number into each of the text boxes, or dragging each of the sliders, to see the difference.
End of explanation
"""
|
bjmorgan/bsym | examples/bsym_examples.ipynb | mit | from bsym import SymmetryOperation
SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]])
"""
Explanation: bsym – a basic symmetry module
bsym is a basic Python symmetry module. It consists of some core classes that describe configuration vector spaces, their symmetry operations, and specific configurations of objects withing these spaces. The module also contains an interface for working with pymatgen Structure objects, to allow simple generation of disordered symmetry-inequivalent structures from a symmetric parent crystal structure.
API documentation is here.
Configuration Spaces, Symmetry Operations, and Groups
The central object described by bsym is the configuration space. This defines a vector space that can be occupied by other objects. For example; the three points $a, b, c$ defined by an equilateral triangle,
<img src='figures/triangular_configuration_space.pdf'>
which can be described by a length 3 vector:
\begin{pmatrix}a\b\c\end{pmatrix}
If these points can be coloured black or white, then we can define a configuration for each different colouring (0 for white, 1 for black), e.g.
<img src='figures/triangular_configuration_example_1.pdf'>
with the corresponding vector
\begin{pmatrix}1\1\0\end{pmatrix}
A specific configuration therefore defines how objects are distributed within a particular configuration space.
The symmetry relationships between the different vectors in a configuration space are described by symmetry operations. A symmetry operation describes a transformation of a configuration space that leaves it indistinguishable. Each symmetry operation can be describes as a matrix that maps the vectors in a configuration space onto each other, e.g. in the case of the equiateral triangle the simplest symmetry operation is the identity, $E$, which leaves every corner unchanged, and can be represented by the matrix
\begin{equation}
E=\begin{pmatrix}1 & 0 & 0\0 & 1 & 0 \ 0 & 0 & 1\end{pmatrix}
\end{equation}
For this triangular example, there are other symmetry operations, including reflections, $\sigma$ and rotations, $C_n$:
<img src='figures/triangular_example_symmetry_operations.pdf'>
In this example reflection operation, $b$ is mapped to $c$; $b\to c$, and $c$ is mapped to $b$; $b\to c$.
The matrix representation of this symmetry operation is
\begin{equation}
\sigma_\mathrm{a}=\begin{pmatrix}1 & 0 & 0\0 & 0 & 1 \ 0 & 1 & 0\end{pmatrix}
\end{equation}
For the example rotation operation, $a\to b$, $b\to c$, and $c\to a$, with matrix representation
\begin{equation}
C_3=\begin{pmatrix}0 & 0 & 1\ 1 & 0 & 0 \ 0 & 1 & 0\end{pmatrix}
\end{equation}
Using this matrix and vector notation, the effect of a symmetry operation on a specific configuration can be calculated as the matrix product of the symmetry operation matrix and the configuration vector:
<img src='figures/triangular_rotation_operation.pdf'>
In matrix notation this is represented as
\begin{equation}
\begin{pmatrix}0\1\1\end{pmatrix} = \begin{pmatrix}0 & 0 & 1\ 1 & 0 & 0 \ 0 & 1 &
0\end{pmatrix}\begin{pmatrix}1\1\0\end{pmatrix}
\end{equation}
or more compactly
\begin{equation}
c_\mathrm{f} = C_3 c_\mathrm{i}.
\end{equation}
The set of all symmetry operations for a particular configuration space is a group.
For an equilateral triangle this group is the $C_{3v}$ point group, which contains six symmetry operations: the identity, three reflections (each with a mirror plane bisecting the triangle and passing through $a$, $b$, or $c$ respectively) and two rotations (120° clockwise and counterclockwise).
\begin{equation}
C_{3v} = \left{ E, \sigma_\mathrm{a}, \sigma_\mathrm{b}, \sigma_\mathrm{c}, C_3, C_3^\prime \right}
\end{equation}
Modelling this using bsym
The SymmetryOperation class
In bsym, a symmetry operation is represented by an instance of the SymmetryOperation class. A SymmetryOperation instance can be initialised from the matrix representation of the corresponding symmetry operation.
For example, in the trigonal configuration space above, a SymmetryOperation describing the identify, $E$, can be created with
End of explanation
"""
SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]], label='E' )
"""
Explanation: Each SymmetryOperation has an optional label attribute. This can be set at records the matrix representation of the symmetry operation and an optional label. We can provide the label when creating a SymmetryOperation:
End of explanation
"""
e = SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]])
e.label = 'E'
e
"""
Explanation: or set it afterwards:
End of explanation
"""
c_3 = SymmetryOperation( [ [ 0, 0, 1 ],
[ 1, 0, 0 ],
[ 0, 1, 0 ] ], label='C3' )
c_3
"""
Explanation: Or for $C_3$:
End of explanation
"""
c_3_from_vector = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C3' )
c_3_from_vector
"""
Explanation: Vector representations of symmetry operations
The matrix representation of a symmetry operation is a permutation matrix. Each row maps one position in the corresponding configuration space to one other position. An alternative, condensed, representation for each symmetry operation matrix uses vector notation, where each element gives the row containing 1 in the equivalent matrix column. e.g. for $C_3$ the vector mapping is given by $\left[2,3,1\right]$, corresponding to the mapping $1\to2$, $2\to3$, $3\to1$.
End of explanation
"""
c_3.as_vector()
"""
Explanation: The vector representation of a SymmetryOperation can be accessed using the as_vector() method.
End of explanation
"""
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C3' )
c_3_inv = SymmetryOperation.from_vector( [ 3, 1, 2 ], label='C3_inv' )
print( c_3, '\n' )
print( c_3_inv, '\n' )
"""
Explanation: Inverting symmetry operations
For every symmetry operation, $A$, there is an inverse operation, $A^{-1}$, such that
\begin{equation}
A \cdot A^{-1}=E.
\end{equation}
For example, the inverse of $C_3$ (clockwise rotation by 120°) is $C_3^\prime$ (anticlockwise rotation by 120°):
End of explanation
"""
c_3 * c_3_inv
"""
Explanation: The product of $C_3$ and $C_3^\prime$ is the identity, $E$.
End of explanation
"""
c_3.invert()
"""
Explanation: <img src="figures/triangular_c3_inversion.pdf" />
c_3_inv can also be generated using the .invert() method
End of explanation
"""
c_3.invert( label= 'C3_inv')
c_3.invert().set_label( 'C3_inv' )
"""
Explanation: The resulting SymmetryOperation does not have a label defined. This can be set directly, or by chaining the .set_label() method, e.g.
End of explanation
"""
from bsym import PointGroup
# construct SymmetryOperations for C_3v group
e = SymmetryOperation.from_vector( [ 1, 2, 3 ], label='e' )
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C_3' )
c_3_inv = SymmetryOperation.from_vector( [ 3, 1, 2 ], label='C_3_inv' )
sigma_a = SymmetryOperation.from_vector( [ 1, 3, 2 ], label='S_a' )
sigma_b = SymmetryOperation.from_vector( [ 3, 2, 1 ], label='S_b' )
sigma_c = SymmetryOperation.from_vector( [ 2, 1, 3 ], label='S_c' )
"""
Explanation: The SymmetryGroup class
A SymmetryGroup is a collections of SymmetryOperation objects. A SymmetryGroup is not required to contain all the symmetry operations of a particular configuration space, and therefore is not necessarily a complete mathematical <a href="https://en.wikipedia.org/wiki/Group_(mathematics)#Definition">group</a>.
For convenience bsym has PointGroup and SpaceGroup classes, that are equivalent to the SymmetryGroup parent class.
End of explanation
"""
c3v = PointGroup( [ e, c_3, c_3_inv, sigma_a, sigma_b, sigma_c ] )
c3v
"""
Explanation: <img src="figures/triangular_c3v_symmetry_operations.pdf" />
End of explanation
"""
from bsym import ConfigurationSpace
c = ConfigurationSpace( objects=['a', 'b', 'c' ], symmetry_group=c3v )
c
"""
Explanation: The ConfigurationSpace class
A ConfigurationSpace consists of a set of objects that represent the configuration space vectors, and the SymmetryGroup containing the relevant symmetry operations.
End of explanation
"""
from bsym import Configuration
conf_1 = Configuration( [ 1, 1, 0 ] )
conf_1
"""
Explanation: The Configuration class
A Configuration instance describes a particular configuration, i.e. how a set of objects are arranged within a configuration space. Internally, a Configuration is represented as a vector (as a numpy array).
Each element in a configuration is represented by a single digit non-negative integer.
End of explanation
"""
c1 = Configuration( [ 1, 1, 0 ] )
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ] )
c_3.operate_on( c1 )
c_3 * conf_1
"""
Explanation: The effect of a particular symmetry operation acting on a configuration can now be calculated using the SymmetryOperation.operate_on() method, or by direct multiplication, e.g.
End of explanation
"""
c = ConfigurationSpace( [ 'a', 'b', 'c', 'd' ] ) # four vector configuration space
"""
Explanation: <img src="figures/triangular_rotation_operation.pdf" />
Finding symmetry-inequivalent permutations.
A common question that comes up when considering the symmetry properties of arrangements of objects is: how many ways can these be arranged that are not equivalent by symmetry?
As a simple example of solving this problem using bsym consider four equivalent sites arranged in a square.
<img src="figures/square_configuration_space.pdf">
End of explanation
"""
c
"""
Explanation: This ConfigurationSpace has been created without a symmetry_group argument. The default behaviour in this case is to create a SymmetryGroup containing only the identity, $E$.
End of explanation
"""
c.unique_configurations( {1:2, 0:2} )
"""
Explanation: We can now calculate all symmetry inequivalent arrangements where two sites are occupied and two are unoccupied, using the unique_configurations() method. This takes as a argument a dict with the numbers of labels to be arranged in the configuration space. Here, we use the labels 1 and 0 to represent occupied and unoccupied sites, respectively, and the distribution of sites is given by { 1:2, 0:2 }.
End of explanation
"""
[ uc.count for uc in c.unique_configurations( {1:2, 0:2} ) ]
"""
Explanation: Because we have not yet taken into account the symmetry of the configuration space, we get
\begin{equation}
\frac{4\times3}{2}
\end{equation}
unique configurations (where the factor of 2 comes from the occupied sites being indistinguishable).
The configurations generated by unique_configurations have a count attribute that records the number of symmetry equivalent configurations of each case:
In this example, each configuration appears once:
End of explanation
"""
# construct point group
e = SymmetryOperation.from_vector( [ 1, 2, 3, 4 ], label='E' )
c4 = SymmetryOperation.from_vector( [ 2, 3, 4, 1 ], label='C4' )
c4_inv = SymmetryOperation.from_vector( [ 4, 1, 2, 3 ], label='C4i' )
c2 = SymmetryOperation.from_vector( [ 3, 4, 1, 2 ], label='C2' )
sigma_x = SymmetryOperation.from_vector( [ 4, 3, 2, 1 ], label='s_x' )
sigma_y = SymmetryOperation.from_vector( [ 2, 1, 4, 3 ], label='s_y' )
sigma_ac = SymmetryOperation.from_vector( [ 1, 4, 3, 2 ], label='s_ac' )
sigma_bd = SymmetryOperation.from_vector( [ 3, 2, 1, 4 ], label='s_bd' )
c4v = PointGroup( [ e, c4, c4_inv, c2, sigma_x, sigma_y, sigma_ac, sigma_bd ] )
# create ConfigurationSpace with the c4v PointGroup.
c = ConfigurationSpace( [ 'a', 'b', 'c', 'd' ], symmetry_group=c4v )
c
c.unique_configurations( {1:2, 0:2} )
[ uc.count for uc in c.unique_configurations( {1:2, 0:2 } ) ]
"""
Explanation: We can also calculate the result when all symmetry operations of this configuration space are included.
End of explanation
"""
c.unique_configurations( {2:1, 1:1, 0:2} )
[ uc.count for uc in c.unique_configurations( {2:1, 1:1, 0:2 } ) ]
"""
Explanation: Taking symmetry in to account, we now only have two unique configurations: either two adjacent site are occupied (four possible ways), or two diagonal sites are occupied (two possible ways):
<img src="figures/square_unique_configurations.pdf" >
The unique_configurations() method can also handle non-binary site occupations:
End of explanation
"""
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
import numpy as np
# construct a pymatgen Structure instance using the site fractional coordinates
coords = np.array( [ [ 0.0, 0.0, 0.0 ] ] )
atom_list = [ 'Li' ]
lattice = Lattice.from_parameters( a=1.0, b=1.0, c=1.0, alpha=90, beta=90, gamma=90 )
parent_structure = Structure( lattice, atom_list, coords ) * [ 4, 4, 1 ]
parent_structure.cart_coords.round(2)
"""
Explanation: <img src="figures/square_unique_configurations_2.pdf">
Working with crystal structures using pymatgen
One example where the it can be useful to identify symmetry-inequivalent arrangements of objects in a vector space, is when considering the possible arrangements of disordered atoms on a crystal lattice.
To solve this problem for an arbitrary crystal structure, bsym contains an interface to pymatgen that will identify symmetry-inequivalent atom substitutions in a given pymatgen Structure.
As an example, consider a $4\times4$ square-lattice supercell populated by lithium atoms.
End of explanation
"""
from bsym.interface.pymatgen import unique_structure_substitutions
print( unique_structure_substitutions.__doc__ )
"""
Explanation: We can use the bsym.interface.pymatgen.unique_structure_substitutions() function to identify symmetry-inequivalent structures generated by substituting at different sites.
End of explanation
"""
unique_structures = unique_structure_substitutions( parent_structure, 'Li', { 'Na':1, 'Li':15 } )
len( unique_structures )
"""
Explanation: As a trivial example, when substituting one Li atom for Na, we get a single unique structure
End of explanation
"""
na_substituted = unique_structures[0]
"""
Explanation: <img src="figures/pymatgen_example_one_site.pdf">
End of explanation
"""
unique_structures_with_Mg = unique_structure_substitutions( na_substituted, 'Li', { 'Mg':1, 'Li':14 } )
len( unique_structures_with_Mg )
[ s.number_of_equivalent_configurations for s in unique_structures_with_Mg ]
"""
Explanation: This Li$\to$Na substitution breaks the symmetry of the $4\times4$ supercell.
If we now replace a second lithium with a magnesium atom, we generate five symmetry inequivalent structures:
End of explanation
"""
[ s.full_configuration_degeneracy for s in unique_structures_with_Mg ]
"""
Explanation: number_of_equivalent_configurations only lists the number of equivalent configurations found when performing the second substitution, when the list of structures unique_structures_with_Mg was created. The full configuration degeneracy relative to the initial empty 4×4 lattice can be queried using full_configuration_degeneracy.
End of explanation
"""
# Check the squared distances between the Na and Mg sites in these unique structures are [1, 2, 4, 5, 8]
np.array( sorted( [ s.get_distance( s.indices_from_symbol('Na')[0],
s.indices_from_symbol('Mg')[0] )**2 for s in unique_structures_with_Mg ] ) )
"""
Explanation: <img src="figures/pymatgen_example_two_sites.pdf">
End of explanation
"""
unique_structures = unique_structure_substitutions( parent_structure, 'Li', { 'Mg':1, 'Na':1, 'Li':14 } )
len(unique_structures)
np.array( sorted( [ s.get_distance( s.indices_from_symbol('Na')[0],
s.indices_from_symbol('Mg')[0] ) for s in unique_structures ] ) )**2
[ s.number_of_equivalent_configurations for s in unique_structures ]
"""
Explanation: This double substitution can also be done in a single step:
End of explanation
"""
[ s.full_configuration_degeneracy for s in unique_structures ]
"""
Explanation: Because both substitutions were performed in a single step, number_of_equivalent_configurations and full_configuration_degeneracy now contain the same data:
End of explanation
"""
from bsym.interface.pymatgen import ( space_group_symbol_from_structure,
space_group_from_structure,
configuration_space_from_structure )
"""
Explanation: Constructing SpaceGroup and ConfigurationSpace objects using pymatgen
The bsym.interface.pymatgen module contains functions for generating SpaceGroup and ConfigurationSpace objects directly from pymatgen Structure objects.
End of explanation
"""
coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.5, 0.5, 0.0 ],
[ 0.0, 0.5, 0.5 ],
[ 0.5, 0.0, 0.5 ] ] )
atom_list = [ 'Li' ] * len( coords )
lattice = Lattice.from_parameters( a=3.0, b=3.0, c=3.0, alpha=90, beta=90, gamma=90 )
structure = Structure( lattice, atom_list, coords )
space_group_symbol_from_structure( structure )
space_group_from_structure( structure )
configuration_space_from_structure( structure )
"""
Explanation: Documentation:
space_group_symbol_from_structure
space_group_from_structure
configuration_space_from_structure
End of explanation
"""
a = 3.798 # lattice parameter
coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.5, 0.0, 0.0 ],
[ 0.0, 0.5, 0.0 ],
[ 0.0, 0.0, 0.5 ] ] )
atom_list = [ 'Ti', 'X', 'X', 'X' ]
lattice = Lattice.from_parameters( a=a, b=a, c=a, alpha=90, beta=90, gamma=90 )
unit_cell = Structure( lattice, atom_list, coords )
parent_structure = unit_cell * [ 2, 2, 2 ]
unique_structures = unique_structure_substitutions( parent_structure, 'X', { 'O':8, 'F':16 },
show_progress='notebook' )
%load_ext version_information
%version_information bsym, numpy, jupyter, pymatgen, tqdm
"""
Explanation: Progress bars
bsym.ConfigurationSpace.unique_configurations() and bsym.interface.pymatgen.unique_structure_substitutions() both accept optional show_progress arguments, which can be used to display progress bars (using tqdm(https://tqdm.github.io).
Setting show_progress=True will give a simple progress bar. If you are running bsym in a Jupyter notebook, setting show_progress="notebook" will give you a progress bar as a notebook widget.
(note, the widget status is not saved with this notebook, and may not display correctly on GitHub or using nbviewer)
In the example below, we find all unique configurations for the pseudo-ReO<sub>3</sub> structured TiOF<sub>2</sub> in a 2×2×2 supercell.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.18/_downloads/a271bc382505fca1eb3f2c32f85b865f/spm_faces_dataset.ipynb | bsd-3-clause | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 10
import matplotlib.pyplot as plt
import mne
from mne.datasets import spm_face
from mne.preprocessing import ICA, create_eog_epochs
from mne import io, combine_evoked
from mne.minimum_norm import make_inverse_operator, apply_inverse
print(__doc__)
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
"""
Explanation: From raw data to dSPM on SPM Faces dataset
Runs a full pipeline using MNE-Python:
- artifact removal
- averaging Epochs
- forward model computation
- source reconstruction using dSPM on the contrast : "faces - scrambled"
<div class="alert alert-info"><h4>Note</h4><p>This example does quite a bit of processing, so even on a
fast machine it can take several minutes to complete.</p></div>
End of explanation
"""
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1, preload=True) # Take first run
# Here to save memory and time we'll downsample heavily -- this is not
# advised for real data as it can effectively jitter events!
raw.resample(120., npad='auto')
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 30, method='fir', fir_design='firwin')
events = mne.find_events(raw, stim_channel='UPPT001')
# plot the events to get an idea of the paradigm
mne.viz.plot_events(events, raw.info['sfreq'])
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.6
baseline = None # no baseline as high-pass is applied
reject = dict(mag=5e-12)
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
# Fit ICA, find and remove major artifacts
ica = ICA(n_components=0.95, random_state=0).fit(raw, decim=1, reject=reject)
# compute correlation scores, get bad indices sorted by score
eog_epochs = create_eog_epochs(raw, ch_name='MRT31-2908', reject=reject)
eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='MRT31-2908')
ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on
ica.plot_components(eog_inds) # view topographic sensitivity of components
ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar
ica.plot_overlay(eog_epochs.average()) # inspect artifact removal
ica.apply(epochs) # clean data, default in place
evoked = [epochs[k].average() for k in event_ids]
contrast = combine_evoked(evoked, weights=[-1, 1]) # Faces - scrambled
evoked.append(contrast)
for e in evoked:
e.plot(ylim=dict(mag=[-400, 400]))
plt.show()
# estimate noise covarariance
noise_cov = mne.compute_covariance(epochs, tmax=0, method='shrunk',
rank=None)
"""
Explanation: Load and filter data, set up epochs
End of explanation
"""
# The transformation here was aligned using the dig-montage. It's included in
# the spm_faces dataset and is named SPM_dig_montage.fif.
trans_fname = data_path + ('/MEG/spm/SPM_CTF_MEG_example_faces1_3D_'
'raw-trans.fif')
maps = mne.make_field_map(evoked[0], trans_fname, subject='spm',
subjects_dir=subjects_dir, n_jobs=1)
evoked[0].plot_field(maps, time=0.170)
"""
Explanation: Visualize fields on MEG helmet
End of explanation
"""
evoked[0].plot_white(noise_cov)
"""
Explanation: Look at the whitened evoked daat
End of explanation
"""
src = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem)
"""
Explanation: Compute forward model
End of explanation
"""
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov,
loose=0.2, depth=0.8)
# Compute inverse solution on contrast
stc = apply_inverse(contrast, inverse_operator, lambda2, method, pick_ori=None)
# stc.save('spm_%s_dSPM_inverse' % contrast.comment)
# Plot contrast in 3D with PySurfer if available
brain = stc.plot(hemi='both', subjects_dir=subjects_dir, initial_time=0.170,
views=['ven'], clim={'kind': 'value', 'lims': [3., 6., 9.]})
# brain.save_image('dSPM_map.png')
"""
Explanation: Compute inverse solution
End of explanation
"""
|
aldian/tensorflow | tensorflow/lite/g3doc/performance/post_training_integer_quant.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
import numpy as np
assert float(tf.__version__[:3]) >= 2.3
"""
Explanation: Post-training integer quantization
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/lite/performance/post_training_integer_quant"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_integer_quant.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/post_training_integer_quant.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/lite/g3doc/performance/post_training_integer_quant.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Overview
Integer quantization is an optimization strategy that converts 32-bit floating-point numbers (such as weights and activation outputs) to the nearest 8-bit fixed-point numbers. This results in a smaller model and increased inferencing speed, which is valuable for low-power devices such as microcontrollers. This data format is also required by integer-only accelerators such as the Edge TPU.
In this tutorial, you'll train an MNIST model from scratch, convert it into a Tensorflow Lite file, and quantize it using post-training quantization. Finally, you'll check the accuracy of the converted model and compare it to the original float model.
You actually have several options as to how much you want to quantize a model. In this tutorial, you'll perform "full integer quantization," which converts all weights and activation outputs into 8-bit integer data—whereas other strategies may leave some amount of data in floating-point.
To learn more about the various quantization strategies, read about TensorFlow Lite model optimization.
Setup
In order to quantize both the input and output tensors, we need to use APIs added in TensorFlow r2.3:
End of explanation
"""
# Load MNIST dataset
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images.astype(np.float32) / 255.0
test_images = test_images.astype(np.float32) / 255.0
# Define the model architecture
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28)),
tf.keras.layers.Reshape(target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=5,
validation_data=(test_images, test_labels)
)
"""
Explanation: Generate a TensorFlow Model
We'll build a simple model to classify numbers from the MNIST dataset.
This training won't take long because you're training the model for just a 5 epochs, which trains to about ~98% accuracy.
End of explanation
"""
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
"""
Explanation: Convert to a TensorFlow Lite model
Now you can convert the trained model to TensorFlow Lite format using the TFLiteConverter API, and apply varying degrees of quantization.
Beware that some versions of quantization leave some of the data in float format. So the following sections show each option with increasing amounts of quantization, until we get a model that's entirely int8 or uint8 data. (Notice we duplicate some code in each section so you can see all the quantization steps for each option.)
First, here's a converted model with no quantization:
End of explanation
"""
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model_quant = converter.convert()
"""
Explanation: It's now a TensorFlow Lite model, but it's still using 32-bit float values for all parameter data.
Convert using dynamic range quantization
Now let's enable the default optimizations flag to quantize all fixed parameters (such as weights):
End of explanation
"""
def representative_data_gen():
for input_value in tf.data.Dataset.from_tensor_slices(train_images).batch(1).take(100):
# Model has only one input so each data point has one element.
yield [input_value]
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
tflite_model_quant = converter.convert()
"""
Explanation: The model is now a bit smaller with quantized weights, but other variable data is still in float format.
Convert using float fallback quantization
To quantize the variable data (such as model input/output and intermediates between layers), you need to provide a RepresentativeDataset. This is a generator function that provides a set of input data that's large enough to represent typical values. It allows the converter to estimate a dynamic range for all the variable data. (The dataset does not need to be unique compared to the training or evaluation dataset.)
To support multiple inputs, each representative data point is a list and elements in the list are fed to the model according to their indices.
End of explanation
"""
interpreter = tf.lite.Interpreter(model_content=tflite_model_quant)
input_type = interpreter.get_input_details()[0]['dtype']
print('input: ', input_type)
output_type = interpreter.get_output_details()[0]['dtype']
print('output: ', output_type)
"""
Explanation: Now all weights and variable data are quantized, and the model is significantly smaller compared to the original TensorFlow Lite model.
However, to maintain compatibility with applications that traditionally use float model input and output tensors, the TensorFlow Lite Converter leaves the model input and output tensors in float:
End of explanation
"""
def representative_data_gen():
for input_value in tf.data.Dataset.from_tensor_slices(train_images).batch(1).take(100):
yield [input_value]
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
# Ensure that if any ops can't be quantized, the converter throws an error
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# Set the input and output tensors to uint8 (APIs added in r2.3)
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model_quant = converter.convert()
"""
Explanation: That's usually good for compatibility, but it won't be compatible with devices that perform only integer-based operations, such as the Edge TPU.
Additionally, the above process may leave an operation in float format if TensorFlow Lite doesn't include a quantized implementation for that operation. This strategy allows conversion to complete so you have a smaller and more efficient model, but again, it won't be compatible with integer-only hardware. (All ops in this MNIST model have a quantized implementation.)
So to ensure an end-to-end integer-only model, you need a couple more parameters...
Convert using integer-only quantization
To quantize the input and output tensors, and make the converter throw an error if it encounters an operation it cannot quantize, convert the model again with some additional parameters:
End of explanation
"""
interpreter = tf.lite.Interpreter(model_content=tflite_model_quant)
input_type = interpreter.get_input_details()[0]['dtype']
print('input: ', input_type)
output_type = interpreter.get_output_details()[0]['dtype']
print('output: ', output_type)
"""
Explanation: The internal quantization remains the same as above, but you can see the input and output tensors are now integer format:
End of explanation
"""
import pathlib
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
# Save the unquantized/float model:
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
# Save the quantized model:
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)
"""
Explanation: Now you have an integer quantized model that uses integer data for the model's input and output tensors, so it's compatible with integer-only hardware such as the Edge TPU.
Save the models as files
You'll need a .tflite file to deploy your model on other devices. So let's save the converted models to files and then load them when we run inferences below.
End of explanation
"""
# Helper function to run inference on a TFLite model
def run_tflite_model(tflite_file, test_image_indices):
global test_images
# Initialize the interpreter
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
predictions = np.zeros((len(test_image_indices),), dtype=int)
for i, test_image_index in enumerate(test_image_indices):
test_image = test_images[test_image_index]
test_label = test_labels[test_image_index]
# Check if the input type is quantized, then rescale input data to uint8
if input_details['dtype'] == np.uint8:
input_scale, input_zero_point = input_details["quantization"]
test_image = test_image / input_scale + input_zero_point
test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
predictions[i] = output.argmax()
return predictions
"""
Explanation: Run the TensorFlow Lite models
Now we'll run inferences using the TensorFlow Lite Interpreter to compare the model accuracies.
First, we need a function that runs inference with a given model and images, and then returns the predictions:
End of explanation
"""
import matplotlib.pylab as plt
# Change this to test a different image
test_image_index = 1
## Helper function to test the models on one image
def test_model(tflite_file, test_image_index, model_type):
global test_labels
predictions = run_tflite_model(tflite_file, [test_image_index])
plt.imshow(test_images[test_image_index])
template = model_type + " Model \n True:{true}, Predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[test_image_index]), predict=str(predictions[0])))
plt.grid(False)
"""
Explanation: Test the models on one image
Now we'll compare the performance of the float model and quantized model:
+ tflite_model_file is the original TensorFlow Lite model with floating-point data.
+ tflite_model_quant_file is the last model we converted using integer-only quantization (it uses uint8 data for input and output).
Let's create another function to print our predictions:
End of explanation
"""
test_model(tflite_model_file, test_image_index, model_type="Float")
"""
Explanation: Now test the float model:
End of explanation
"""
test_model(tflite_model_quant_file, test_image_index, model_type="Quantized")
"""
Explanation: And test the quantized model:
End of explanation
"""
# Helper function to evaluate a TFLite model on all images
def evaluate_model(tflite_file, model_type):
global test_images
global test_labels
test_image_indices = range(test_images.shape[0])
predictions = run_tflite_model(tflite_file, test_image_indices)
accuracy = (np.sum(test_labels== predictions) * 100) / len(test_images)
print('%s model accuracy is %.4f%% (Number of test samples=%d)' % (
model_type, accuracy, len(test_images)))
"""
Explanation: Evaluate the models on all images
Now let's run both models using all the test images we loaded at the beginning of this tutorial:
End of explanation
"""
evaluate_model(tflite_model_file, model_type="Float")
"""
Explanation: Evaluate the float model:
End of explanation
"""
evaluate_model(tflite_model_quant_file, model_type="Quantized")
"""
Explanation: Evaluate the quantized model:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/ncc/cmip6/models/sandbox-2/ocean.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'sandbox-2', 'ocean')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocean
MIP Era: CMIP6
Institute: NCC
Source ID: SANDBOX-2
Topic: Ocean
Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing.
Properties: 133 (101 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:25
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Seawater Properties
3. Key Properties --> Bathymetry
4. Key Properties --> Nonoceanic Waters
5. Key Properties --> Software Properties
6. Key Properties --> Resolution
7. Key Properties --> Tuning Applied
8. Key Properties --> Conservation
9. Grid
10. Grid --> Discretisation --> Vertical
11. Grid --> Discretisation --> Horizontal
12. Timestepping Framework
13. Timestepping Framework --> Tracers
14. Timestepping Framework --> Baroclinic Dynamics
15. Timestepping Framework --> Barotropic
16. Timestepping Framework --> Vertical Physics
17. Advection
18. Advection --> Momentum
19. Advection --> Lateral Tracers
20. Advection --> Vertical Tracers
21. Lateral Physics
22. Lateral Physics --> Momentum --> Operator
23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
24. Lateral Physics --> Tracers
25. Lateral Physics --> Tracers --> Operator
26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
27. Lateral Physics --> Tracers --> Eddy Induced Velocity
28. Vertical Physics
29. Vertical Physics --> Boundary Layer Mixing --> Details
30. Vertical Physics --> Boundary Layer Mixing --> Tracers
31. Vertical Physics --> Boundary Layer Mixing --> Momentum
32. Vertical Physics --> Interior Mixing --> Details
33. Vertical Physics --> Interior Mixing --> Tracers
34. Vertical Physics --> Interior Mixing --> Momentum
35. Uplow Boundaries --> Free Surface
36. Uplow Boundaries --> Bottom Boundary Layer
37. Boundary Forcing
38. Boundary Forcing --> Momentum --> Bottom Friction
39. Boundary Forcing --> Momentum --> Lateral Friction
40. Boundary Forcing --> Tracers --> Sunlight Penetration
41. Boundary Forcing --> Tracers --> Fresh Water Forcing
1. Key Properties
Ocean key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean model code (NEMO 3.6, MOM 5.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the ocean.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the ocean component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Seawater Properties
Physical properties of seawater in ocean
2.1. Eos Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
"""
Explanation: 2.2. Eos Functional Temp
Is Required: TRUE Type: ENUM Cardinality: 1.1
Temperature used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
"""
Explanation: 2.3. Eos Functional Salt
Is Required: TRUE Type: ENUM Cardinality: 1.1
Salinity used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
"""
Explanation: 2.4. Eos Functional Depth
Is Required: TRUE Type: ENUM Cardinality: 1.1
Depth or pressure used in EOS for sea water ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2.5. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.6. Ocean Specific Heat
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Specific heat in ocean (cpocean) in J/(kg K)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.7. Ocean Reference Density
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Boussinesq reference density (rhozero) in kg / m3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Bathymetry
Properties of bathymetry in ocean
3.1. Reference Dates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Reference date of bathymetry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Type
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the bathymetry fixed in time in the ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Ocean Smoothing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any smoothing or hand editing of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Source
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe source of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Nonoceanic Waters
Non oceanic waters treatement in ocean
4.1. Isolated Seas
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how isolated seas is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. River Mouth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how river mouth mixing or estuaries specific treatment is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Software Properties
Software properties of ocean code
5.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Resolution
Resolution in the ocean grid
6.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.4. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.5. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.6. Is Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.7. Thickness Level 1
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Thickness of first surface ocean level (in meters)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Tuning Applied
Tuning methodology for ocean component
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the ocean component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Brief description of conservation methodology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in the ocean by the numerical schemes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Consistency Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Corrected Conserved Prognostic Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Set of variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.5. Was Flux Correction Used
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does conservation involve flux correction ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Grid
Ocean grid
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of grid in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Properties of vertical discretisation in ocean
10.1. Coordinates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical coordinates in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10.2. Partial Steps
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Using partial steps with Z or Z vertical coordinate in ocean ?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Discretisation --> Horizontal
Type of horizontal discretisation scheme in ocean
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Staggering
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal grid staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Timestepping Framework
Ocean Timestepping Framework
12.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Diurnal Cycle
Is Required: TRUE Type: ENUM Cardinality: 1.1
Diurnal cycle type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Timestepping Framework --> Tracers
Properties of tracers time stepping in ocean
13.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracers time stepping scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Tracers time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Timestepping Framework --> Baroclinic Dynamics
Baroclinic dynamics in ocean
14.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Baroclinic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Timestepping Framework --> Barotropic
Barotropic time stepping in ocean
15.1. Splitting
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time splitting method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.2. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Barotropic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Timestepping Framework --> Vertical Physics
Vertical physics time stepping in ocean
16.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Details of vertical time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Advection
Ocean advection
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of advection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
"""
Explanation: 18. Advection --> Momentum
Properties of lateral momemtum advection scheme in ocean
18.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of lateral momemtum advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Scheme Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean momemtum advection scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 18.3. ALE
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Using ALE for vertical advection ? (if vertical coordinates are sigma)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19. Advection --> Lateral Tracers
Properties of lateral tracer advection scheme in ocean
19.1. Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Order of lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 19.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for lateral tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Effective Order
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Effective order of limited lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.4. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.5. Passive Tracers
Is Required: FALSE Type: ENUM Cardinality: 0.N
Passive tracers advected
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.6. Passive Tracers Advection
Is Required: FALSE Type: STRING Cardinality: 0.1
Is advection of passive tracers different than active ? if so, describe.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20. Advection --> Vertical Tracers
Properties of vertical tracer advection scheme in ocean
20.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 20.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for vertical tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Lateral Physics
Ocean lateral physics
21.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lateral physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
"""
Explanation: 21.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transient eddy representation in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Lateral Physics --> Momentum --> Operator
Properties of lateral physics operator for momentum in ocean
22.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean
23.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics momemtum eddy viscosity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 23.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Coeff Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24. Lateral Physics --> Tracers
Properties of lateral physics for tracers in ocean
24.1. Mesoscale Closure
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a mesoscale closure in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24.2. Submesoscale Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Lateral Physics --> Tracers --> Operator
Properties of lateral physics operator for tracers in ocean
25.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean
26.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics tracers eddy diffusity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.4. Coeff Background
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 26.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean
27.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EIV in lateral physics tracers in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27.2. Constant Val
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If EIV scheme for tracers is constant, specify coefficient value (M2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Flux Type
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV flux (advective or skew)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Added Diffusivity
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV added diffusivity (constant, flow dependent or none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28. Vertical Physics
Ocean Vertical Physics
28.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vertical physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Properties of vertical physics in ocean
29.1. Langmuir Cells Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there Langmuir cells mixing in upper ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
*Properties of boundary layer (BL) mixing on tracers in the ocean *
30.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
*Properties of boundary layer (BL) mixing on momentum in the ocean *
31.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32. Vertical Physics --> Interior Mixing --> Details
*Properties of interior mixing in the ocean *
32.1. Convection Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical convection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.2. Tide Induced Mixing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how tide induced mixing is modelled (barotropic, baroclinic, none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.3. Double Diffusion
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there double diffusion
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.4. Shear Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there interior shear mixing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33. Vertical Physics --> Interior Mixing --> Tracers
*Properties of interior mixing on tracers in the ocean *
33.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 33.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34. Vertical Physics --> Interior Mixing --> Momentum
*Properties of interior mixing on momentum in the ocean *
34.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 34.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Uplow Boundaries --> Free Surface
Properties of free surface in ocean
35.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of free surface in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Free surface scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 35.3. Embeded Seaice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the sea-ice embeded in the ocean model (instead of levitating) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Uplow Boundaries --> Bottom Boundary Layer
Properties of bottom boundary layer in ocean
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.2. Type Of Bbl
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 36.3. Lateral Mixing Coef
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.4. Sill Overflow
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any specific treatment of sill overflows
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37. Boundary Forcing
Ocean boundary forcing
37.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of boundary forcing in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Surface Pressure
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.3. Momentum Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.4. Tracers Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.5. Wave Effects
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how wave effects are modelled at ocean surface.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.6. River Runoff Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how river runoff from land surface is routed to ocean and any global adjustment done.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.7. Geothermal Heating
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how geothermal heating is present at ocean bottom.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 38. Boundary Forcing --> Momentum --> Bottom Friction
Properties of momentum bottom friction in ocean
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum bottom friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 39. Boundary Forcing --> Momentum --> Lateral Friction
Properties of momentum lateral friction in ocean
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum lateral friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Properties of sunlight penetration scheme in ocean
40.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of sunlight penetration scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 40.2. Ocean Colour
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the ocean sunlight penetration scheme ocean colour dependent ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40.3. Extinction Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe and list extinctions depths for sunlight penetration scheme (if applicable).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Properties of surface fresh water forcing in ocean
41.1. From Atmopshere
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from atmos in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. From Sea Ice
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from sea-ice in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 41.3. Forced Mode Restoring
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of surface salinity restoring in forced mode (OMIP)
End of explanation
"""
|
JAmarel/QLab | ElectronChargePerMass/DataAnalysis.ipynb | mit | df = pd.read_excel('Data.xlsx', sheetname=None)
df['1000V']
keys = ['1000V','1500V','2000V','2500V','3000V']
xpoints = np.array([df[key]['x(cm)'] for key in keys]) #Same x points at all voltages
#Convert x (cm) to meters
xpoints = xpoints*1e-2
tic_length = df['1000V']['tic length (m)'][0] #Length of ticks on paper
y_uncertainty = np.array(df['1000V']['uncertainty +/- (tic)']) #Eyeball measurement
#convert y_uncertainty from ticks to meters
y_uncertainty = y_uncertainty*tic_length
i_uncertainty = df['1000V']['current uncertainty (A)'][0]
v_uncertainty = df['1000V']['voltage uncertainty (V)'][0]
offsets = np.array([df[key]['offset (tic)'] for key in keys]) #At V=0, the beam wasn't quite through y = 0.
#Convert ticks to meters
offsets = offsets*tic_length
y_up = np.array([df[key]['positive y(tic)'] for key in keys]) #Upward deflection in ticks.
y_down = np.array([df[key]['negative y(tic)'] for key in keys])#Downward
#Convert ticks to meters
y_up = y_up*tic_length
y_down = y_down*tic_length
#Correct for offset
y_up = y_up - offsets
y_down = y_down - offsets
#Average to account for alignment
ypoints = (1/2)*(y_up + abs(y_down))
currents = np.array([df[key]['current (A)'][0] for key in keys]) #Amps
voltages = np.array([df[key]['voltage (V)'][0] for key in keys], dtype=int) #Voltes
"""
Explanation: Read/Sort Data
End of explanation
"""
plt.figure(figsize=(10,6));
plt.errorbar(xpoints[1]*100,ypoints[1]*100,y_uncertainty[1]*100,linestyle = '',marker = 'o');
plt.xlabel('x (cm)',fontsize=20);
plt.ylabel('y deflection (cm)',fontsize = 20);
plt.xlim(0,12);
plt.xticks(size = 13);
plt.yticks(size = 13);
plt.savefig('Sample')
"""
Explanation: Plot Data
End of explanation
"""
def myfun(x,a,b,c):
ans = -np.sqrt(a**2-(x-b)**2)+c # this is y, "the function to be fit"
return ans
p0 = [.15, 0, .15]
#Initialize arrays to hold a,b,c for all data
a = np.zeros(len(xpoints)) #Radius
ea = np.zeros(len(xpoints)) #uncertainty
b = np.zeros(len(xpoints)) #x shift
eb = np.zeros(len(xpoints))
c = np.zeros(len(xpoints)) #y shift
ec = np.zeros(len(xpoints))
xlots = np.linspace(.5e-2,11e-2) # need lots of data points for smooth curve
yfit = np.zeros((len(xpoints),xlots.size))
for i in np.arange(0,len(xpoints)):
plsq, pcov = curve_fit(myfun, xpoints[i], ypoints[i], p0, y_uncertainty[i]) # curve fit returns p and covariance matrix
# these give the parameters and the uncertainties
a[i] = plsq[0]
ea[i] = np.sqrt(pcov[0,0])
b[i] = plsq[1]
eb[i] = np.sqrt(pcov[1,1])
c[i] = plsq[2]
ec[i] = np.sqrt(pcov[2,2])
yfit[i] = myfun(xlots,plsq[0],plsq[1],plsq[2]) # use fit results for a, b, c
print('a = %.3f +/- %.3f' % (plsq[0], np.sqrt(pcov[0,0])))
print('b = %.3f +/- %.3f' % (plsq[1], np.sqrt(pcov[1,1])))
print('c = %.3f +/- %.3f' % (plsq[2], np.sqrt(pcov[2,2])))
"""
Explanation: Curve fit to find the radius of curvature
The electron is deflected radially by the field.
The equation of a circle with radius $a$ centered at $(x,y)=(b,c)$ is given by
$$(x-b)^2+(y-c)^2 = a^2$$
Let's rewrite this in terms of $y$,
$$y=-\sqrt{a^2-(x-b)^2}+c$$
We define the function and then want to find the best estimates for $a, b, c$ consistent with our data.
End of explanation
"""
plt.figure(figsize=(10,6));
plt.errorbar(xpoints[1]*100,ypoints[1]*100,y_uncertainty[1]*100,linestyle = '',marker = 'o');
plt.xlabel('x (mm)');
plt.ylabel('y (mm)');
plt.plot(xlots*100,yfit[1]*100);
plt.title('Least-squares fit to data');
plt.legend(['data','Fit'],loc='best');
plt.xlabel('x (cm)',fontsize=20);
plt.ylabel('y deflection (cm)',fontsize = 20);
plt.xlim(0,12);
plt.xticks(size = 13);
plt.yticks(size = 13);
plt.savefig('Sample')
"""
Explanation: Now we use the fitted parameters in our function to compare with the data.
Least Squares Fit
End of explanation
"""
mu_o = 4*np.pi*1e-7
D = 21.3*1e-2
D_uncertainty = 1e-3
N=124
acceptedvalue = 1.76*1e11 #C/kg
eperm = np.array([(125/128)*voltages[i]*(D**2)/( (mu_o**2)*(N**2)*(currents[i]**2)*(a[i]**2) ) for i in np.arange(0,len(xpoints))])
"""
Explanation: Calculating e/m
From the Lorentz force law,
\begin{equation}
\frac{e}{m} = \frac{2V_a}{B^2 r^2}
\end{equation}
and the magnetic field of a Helmholtz coil,
\begin{equation}
B = \frac{16 {\mu}_o N I}{\sqrt{125}D}
\end{equation}
we calculate the charge per mass in terms of measured quantities,
\begin{equation}
\frac{e}{m} = \frac{125}{128}\frac{V_a D^2}{{{\mu}_o}^2 N^2 I^2 r^2}
\end{equation}
End of explanation
"""
plt.scatter(np.arange(0,len(xpoints)),eperm);
xplaces = np.linspace(-.5,5.5,50);
plt.plot(xplaces,np.array([acceptedvalue]*len(xplaces)));
plt.xlim(-.5,4.5);
plt.legend(['Accepted Value','Measured Value'],loc='lower left');
plt.ylabel('Charge to Mass Ratio (C/kg)');
plt.xlabel('Accelerating Voltage')
labels = np.array([(str(entry)+'V') for entry in voltages]);
plt.xticks(np.arange(0,len(xpoints)), labels, rotation='vertical');
"""
Explanation: Plot vs Accepted Value
End of explanation
"""
std = np.std(eperm)
std
u = (np.array([np.sqrt(np.sqrt( (v_uncertainty/voltages[i])**2 + 4*(i_uncertainty/currents[i])**2 + 4*(ea[0]/a[0])**2 +
4*(D_uncertainty/D)))*eperm[i] for i in np.arange(0,len(voltages))]))
avg_u = np.mean(u)
mean_std = avg_u/np.sqrt(len(xpoints))
"""
Explanation: Uncertainty
End of explanation
"""
plt.figure(figsize=(10,6));
plt.errorbar(np.arange(0,len(xpoints)),eperm,u, linestyle = '',marker = 'o');
xplaces = np.linspace(-1,5.5,50);
plt.plot(xplaces,np.array([acceptedvalue]*len(xplaces)));
plt.xlim(-1,5);
plt.ylim(0,2.8*1e11)
plt.legend(['Measured Values','Accepted Value'],loc='lower left');
plt.ylabel('Charge to Mass Ratio (C/kg)',size=20);
plt.xlabel('Accelerating Voltage',size=20)
labels = np.array([(str(entry)+'V') for entry in voltages]);
plt.xticks(np.arange(0,len(xpoints)), labels,size=13);
plt.yticks(size=13);
plt.savefig('Results')
np.mean(eperm)
mean_std
"""
Explanation: Results
End of explanation
"""
print('e/m = (%.3f +/- %.3f)x%s C/kg' % (np.mean(eperm)/1e11, mean_std/1e11,1e11))
acceptedvalue/1e11
"""
Explanation: Final Value
End of explanation
"""
plt.figure(figsize=(10,6));
plt.scatter(currents**2,voltages);
plt.ylabel('Accelerating Voltage (V)');
plt.xlabel('Square of Current ($A^2$)');
def myfun(x,qpm): #Where x is (I*r) and qpm = e/m
ans = qpm*(128/125)*(mu_o**2)*(N**2)*(1/D**2)*x # this is y, "the function to be fit"
return ans
p0 = [acceptedvalue]
(currents*a)**2
xlots = np.linspace(.01,.035) # need lots of data points for smooth curve
plsq, pcov = curve_fit(myfun, (currents*a)**2, voltages, p0) # curve fit returns p and covariance matrix
# these give the parameters and the uncertainties
epm = plsq[0]
eepm = np.sqrt(pcov[0,0])
yfit = myfun(xlots,epm) # use fit results for a, b, c
print('epm = %.3f +/- %.3f' % (plsq[0]/1e11, np.sqrt(pcov[0,0])/1e11))
epm/acceptedvalue
plt.figure(figsize=(10,6));
plt.scatter((currents*a)**2,voltages);
plt.ylabel('Accelerating Voltage (V)');
plt.xlabel('(Current*Radius) Squared $A^2$$m^2$');
plt.plot(xlots,yfit);
plt.title('Least-squares fit to data');
plt.legend(['data','Fit'],loc='best');
"""
Explanation: Plot Accelerating Voltage vs Square of Current
End of explanation
"""
|
moonbury/pythonanywhere | learn_scipy/7702OS_Chap_01_rev20150118.ipynb | gpl-3.0 | import numpy
import scipy
scores=numpy.array([114, 100, 104, 89, 102, 91, 114, 114, 103, 105, 108, 130, 120, 132, 111, 128, 118, 119, 86,
72, 111, 103, 74, 112, 107, 103, 98, 96, 112, 112, 93])
"""
Explanation: <center><font color=red>Learning SciPy for Numerical and Scientific Computing</font></center>
Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c)2014 Sergio Rojas (srojas@usb.ve) and Erik A Christensen (erikcny@aol.com).
<b><font color='red'> NOTE: This IPython notebook should be read alonside the corresponding chapter in the book, where each piece of code is fully explained. </font></b> <br>
<center> CHAPTER 1: Introduction to SciPy </center>
CHAPTER SUMMARY
In this chapter we'll learn the benefits of using the combination of Python, NumPy, SciPy, and Matplotlib as a programming environment for any scientific endeavor that requires mathematics; in particular, anything related to numerical computations. We'll explore the environment, learn how to download and install the required libraries, use them for some quick computations, and figure out a few good ways to search for help.
What is SciPy?
A few links with documentation that can help to enhance the discussion presented on this section of the book are as follows:
<ul>
The Scipy main site:
<li>
[http://www.scipy.org/](http://www.scipy.org/)
</li><br>
Scipy : high-level scientific computing
<li>
[https://scipy-lectures.github.io/intro/scipy.html](https://scipy-lectures.github.io/intro/scipy.html)
</li><br>
Archives of the SciPy mailing discussion list
<li>
[http://mail.scipy.org/pipermail/scipy-user/](http://mail.scipy.org/pipermail/scipy-user/)
</li>
</ul>
How to install SciPy
<ul>
Installing the SciPy Stack and Scientific Python distributions:
<li>
[http://www.scipy.org/install.html](http://www.scipy.org/install.html)
</li><br>
Building From Source on Linux:
<li>
[http://www.scipy.org/scipylib/building/linux.html](http://www.scipy.org/scipylib/building/linux.html)
</li><br>
Unofficial Windows Binaries for Python Extension Packages:
<li>
[http://www.lfd.uci.edu/~gohlke/pythonlibs/](http://www.lfd.uci.edu/~gohlke/pythonlibs/)
</li><br>
</ul>
SciPy organization
<ul>
An exhaustive list of SciPy modules is avalilable at:
<li>
http://docs.scipy.org/doc/scipy/reference/py-modindex.html
</li><br>
</ul>
End of explanation
"""
nbins = 6
sortedscores = numpy.sort(scores)
intervals = numpy.linspace(sortedscores[0],sortedscores[-1],nbins+1);
leftEdges = intervals[0:-1];
rightEdges = intervals[1:];
middleEdges = ( leftEdges + rightEdges ) / 2.0;
j=0
temp=sortedscores[j]
count = numpy.zeros([nbins,1])
i=0
istop = len(leftEdges) - 1
while j<len(sortedscores):
while ( ((leftEdges[i] <= temp) & (temp < rightEdges[i])) ):
count[i] = count[i]+1
j=j+1
temp = sortedscores[j]
if i < istop:
i=i+1
else:
j=j+1
if temp == rightEdges[i]:
count[i] = count[i]+1
%matplotlib inline
import matplotlib.pylab as plt
plt.stem(middleEdges, count)
plt.show()
"""
Explanation: <b> <font color=red>What follows generates the <i> stem plot</i> mentioned in the text</font></b>
End of explanation
"""
dir(scores)
xmean = scipy.mean(scores)
sigma = scipy.std(scores)
n = scipy.size(scores)
print ("({0:0.14f}, {1:0.15f}, {2:0.14f})".format(xmean, xmean - 2.576*sigma/scipy.sqrt(n),
xmean + 2.576*sigma/scipy.sqrt(n) ))
from scipy import stats
result=scipy.stats.bayes_mvs(scores)
"""
Explanation: <b> <font color=red>NOTE</font>: to shorten the output of the next command, click the leftmost mouse button on the left of the output to activate a scrolling window. Do the same to anyother help output which follows after this one </b>
End of explanation
"""
help(scipy.stats.bayes_mvs)
print(result[0])
"""
Explanation: <b> <font color=red>NOTE</font>: to shorten the output of the next command, click the leftmost mouse button on the left of the output to activate a scrolling window. Do the same to anyother help output which follows after this one </b>
End of explanation
"""
help(scipy.stats.bayes_mvs)
numpy.info('random')
help(numpy.random)
help(scipy.stats)
help(scipy.stats.kurtosis)
"""
Explanation: How to find documentation
<ul>
General SciPy documentation is available at:
<li>
[http://docs.scipy.org/doc/](http://docs.scipy.org/doc/)
</li><br>
General SciPy documentation index is available at:
<li>
[http://docs.scipy.org/doc/scipy/reference/index.html](http://docs.scipy.org/doc/scipy/reference/index.html)
</li><br>
</ul>
End of explanation
"""
%matplotlib inline
import numpy
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (12.0, 8.0)
x=numpy.linspace(0,2*numpy.pi,32)
fig = plt.figure()
plt.plot(x, numpy.sin(x))
fig.savefig('sine.png')
plt.show()
"""
Explanation: Scientific visualization
<ul>
General Matplotlib references:<br>
<li>
[Matplotlib main web site: http://matplotlib.org/](http://matplotlib.org/)
</li><br>
<li>
[Introduction to Matplotlib at: http://scipy-lectures.github.io/matplotlib/matplotlib.html](http://scipy-lectures.github.io/matplotlib/matplotlib.html)
</li><br>
<li>
[Matplotlib example gallery at: http://matplotlib.org/gallery.html ](http://matplotlib.org/gallery.html)
</li><br>
</ul>
<ul>
Other Python-based visualization tools:<br>
<li>
[https://wiki.python.org/moin/NumericAndScientific/Plotting](https://wiki.python.org/moin/NumericAndScientific/Plotting)
</li><br>
<li>
[Plotly: https://plot.ly/](https://plot.ly/)
</li><br>
<li>
[Mayavi: http://code.enthought.com/projects/mayavi/ ](http://code.enthought.com/projects/mayavi/)
</li><br>
</ul>
End of explanation
"""
|
liufuyang/ManagingBigData_MySQL_DukeUniv | week3/MySQL_Exercise_07_Inner_Joins.ipynb | mit | %load_ext sql
%sql mysql://studentuser:studentpw@mysqlserver/dognitiondb
%sql USE dognitiondb
%config SqlMagic.displaylimit=25
"""
Explanation: Copyright Jana Schaich Borg/Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
MySQL Exercise 7: Joining Tables with Inner Joins
Before completing these exercises, I strongly recommend that you watch the video called "What are Joins?" that describe what joins are, and how different types of joins work.
As one of the last building blocks we need to address our Dognition analysis questions, in this lesson we will learn how to combine tables using inner joins.
1. Inner Joins between 2 tables
To begin, load the sql library, connect to the Dognition database, and set the Dognition database as the default.
End of explanation
"""
%%sql
SELECT d.dog_guid AS DogID, d.user_guid AS UserID, AVG(r.rating) AS AvgRating,
COUNT(r.rating) AS NumRatings, d.breed, d.breed_group, d.breed_type
FROM dogs d, reviews r
WHERE d.dog_guid=r.dog_guid AND d.user_guid=r.user_guid
GROUP BY d.user_guid
HAVING NumRatings >= 10
ORDER BY AvgRating DESC
LIMIT 200
"""
Explanation: Recall that tables in relational databases are linked through primary keys and sometimes other fields that are common to multiple tables (as is the case with our Dognition data set). Our goal when we execute a JOIN or make a joined table is to use those common columns to let the database figure out which rows in one table match up to which rows in another table. Once that mapping is established using at least one common field or column, the database can pull any columns you want out of the mapped, or joined, tables and output the matched data to one common table.
An inner join is a join that outputs only rows that have an exact match in both tables being joined:
<img src="https://duke.box.com/shared/static/xazeqtyq6bjo12ojvgxup4bx0e9qcn5d.jpg" width=400 alt="INNER_JOIN" />
To illustrate how this works, let's find out whether dog owners that are particularly surprised by their dog's performance on Dognition tests tend to own similar breeds (or breed types, or breed groups) of dogs. There are many ways to address this question, but let's start by focusing on the dog owners who provided at least 10 ratings for one or more of their dogs in the ratings table. Of these owners, which 200 owners reported the highest average amount of surprise at their dog's performance, and what was the breed, breed_type, and breed_group of each of these owner's dog?
The surprise ratings are stored in the reviews table. The dog breed information is provided in the dogs table. There are two columns that are common to both tables: user_guid and dog_guid. How do we use the common columns to combine information from the two tables?
To join the tables, you can use a WHERE clause and add a couple of details to the FROM clause so that the database knows from what table each field in your SELECT clause comes.
First, start by adding all the columns we want to examine to the SELECT statement:
mySQL
SELECT dog_guid AS DogID, user_guid AS UserID, AVG(rating) AS AvgRating,
COUNT(rating) AS NumRatings, breed, breed_group, breed_type
then list all the tables from which the fields we are interested in come, separated by commas (with no comma at the end of the list):
mySQL
FROM dogs, reviews
then add the other restrictions:
mySQL
GROUP BY user_guid
HAVING NumRatings >= 10
ORDER BY AvgRating DESC
LIMIT 200
Try running this query and see what happens:
End of explanation
"""
%%sql
SELECT d.dog_guid AS DogID, d.user_guid AS UserID, AVG(r.rating) AS AvgRating,
COUNT(r.rating) AS NumRatings, d.breed, d.breed_group, d.breed_type
FROM dogs d, reviews r
WHERE d.dog_guid=r.dog_guid AND d.user_guid=r.user_guid
GROUP BY d.user_guid
HAVING NumRatings >= 10
ORDER BY AvgRating DESC
LIMIT 200
"""
Explanation: You should receive an error message stating that the identity of dog_guid and user_guid in the field list is ambiguous. The reason is that the column title exists in both tables, and MySQL doesn't know which one we want. We have to specify the table name before stating the field name, and separate the two names by a period <mark>(NOTE: read this entire section before deciding whether you want to execute this query)<mark>:
mySQL
SELECT dogs.dog_guid AS DogID, dogs.user_guid AS UserID, AVG(reviews.rating) AS AvgRating,
COUNT(reviews.rating) AS NumRatings, dogs.breed, dogs.breed_group, dogs.breed_type
FROM dogs, reviews
GROUP BY dogs.user_guid
HAVING NumRatings >= 10
ORDER BY AvgRating DESC
LIMIT 200
You can also take advantage of aliases so that you don't have to write out the name of the tables each time. Here I will introduce another syntax for aliases that omits the AS completely. In this syntax, the alias is whatever word (or phrase, if you use quotation marks) follows immediately after the field or table name, separated by a space. So we could write:
mySQL
SELECT d.dog_guid AS DogID, d.user_guid AS UserID, AVG(r.rating) AS AvgRating,
COUNT(r.rating) AS NumRatings, d.breed, d.breed_group, d.breed_type
FROM dogs d, reviews r
GROUP BY d.user_guid
HAVING NumRatings >= 10
ORDER BY AvgRating DESC
LIMIT 200
I am tempted to tell you to run this query so that you will see what happens, but instead, I will explain what will happen and let you decide if you want to see what the output looks...and feels...like.
There is nothing built into the database table definitions that can instruct the server how to combine the tables on its own (remember, this is how relational databases save space and remain flexible). Further, the query as written does not tell the database how the two tables are related. As a consequence, rather than match up the two tables according to the values in the user_id and/or dog_id column, the database will do the only thing it knows how to do which is output every single combination of the records in the dogs table with the records in the reviews table. In other words, every single row of the dogs table will get paired with every single row of the reviews table. This is known as a Cartesian product. Not only will it be a heavy burden on the database to output a table that has the full length of one table multiplied times the full length of another (and frustrating to you, because the query would take a very long time to run), the output would be close to useless.
To prevent this from happening, tell the database how to relate the tables in the WHERE clause:
mySQL
SELECT d.dog_guid AS DogID, d.user_guid AS UserID, AVG(r.rating) AS AvgRating,
COUNT(r.rating) AS NumRatings, d.breed, d.breed_group, d.breed_type
FROM dogs d, reviews r
WHERE d.dog_guid=r.dog_guid
GROUP BY d.user_guid
HAVING NumRatings >= 10
ORDER BY AvgRating DESC
LIMIT 200
To be very careful and exclude any incorrect dog_guid or user_guid entries, you can include both shared columns in the WHERE clause:
mySQL
SELECT d.dog_guid AS DogID, d.user_guid AS UserID, AVG(r.rating) AS AvgRating,
COUNT(r.rating) AS NumRatings, d.breed, d.breed_group, d.breed_type
FROM dogs d, reviews r
WHERE d.dog_guid=r.dog_guid AND d.user_guid=r.user_guid
GROUP BY d.user_guid
HAVING NumRatings >= 10
ORDER BY AvgRating DESC
LIMIT 200
Try running this query now:
End of explanation
"""
%%sql
SELECT d.dog_guid AS DogID, d.user_guid AS UserID, AVG(r.rating) AS AvgRating,
COUNT(r.rating) AS NumRatings, d.breed, d.breed_group, d.breed_type
FROM dogs d, reviews r
WHERE d.dog_guid=r.dog_guid AND d.user_guid=r.user_guid
GROUP BY d.user_guid
ORDER BY AvgRating DESC
"""
Explanation: The query should execute quickly. This would NOT have been the case if you did not include the WHERE clause to combine the two tables. If you accidentally request a Cartesian product from datasets with billions of rows, you could be waiting for your query output for days (and will probably get in trouble with your database administrator). <mark>So always remember to tell the database how to join your tables!</mark>
Let's examine our joined table a bit further. The joined table outputted by the query above should have 38 rows, despite the fact that we set our LIMIT at 200. The reason for this is that it turns out that a relatively small number of customers provided 10 or more reviews. If you remove the HAVING and LIMIT BY clause from the query, you should end up with 389 rows. Go ahead and try it:
End of explanation
"""
%%sql
SELECT COUNT(DISTINCT dog_guid) AS uniq_dog_guid, COUNT(DISTINCT user_guid) AS uniq_user_guid
FROM reviews
%%sql
SELECT COUNT(DISTINCT dog_guid) AS uniq_dog_guid, COUNT(DISTINCT user_guid) AS uniq_user_guid
FROM dogs
"""
Explanation: It's clear from looking at this output that (A) not many customers provided ratings, and (B) when they did, they usually were not very surprised by their dog's performance. Therefore, these ratings are probably not going to provide a lot of instructive insight into how to improve Dognition's completion rate. However, the ratings table still provides a great opportunity to illustrate the results of different types of joins.
To help prepare us for this:
Questions 1-4: How many unique dog_guids and user_guids are there in the reviews and dogs table independently?
End of explanation
"""
%%sql
SELECT d.dog_guid AS DogID, d.user_guid AS UserID, AVG(r.rating) AS AvgRating,
COUNT(r.rating) AS NumRatings, d.breed, d.breed_group, d.breed_type
FROM dogs d, reviews r
WHERE d.dog_guid=r.dog_guid
GROUP BY d.user_guid
ORDER BY AvgRating DESC
%%sql
SELECT d.dog_guid AS DogID, d.user_guid AS UserID, AVG(r.rating) AS AvgRating,
COUNT(r.rating) AS NumRatings, d.breed, d.breed_group, d.breed_type
FROM dogs d, reviews r
WHERE d.user_guid=r.user_guid
GROUP BY d.user_guid
ORDER BY AvgRating DESC
"""
Explanation: These counts indicate some important things:
Many customers in both the reviews and the dogs table have multiple dogs
There are many more unique dog_guids and user_guids in the dogs table than the reviews table
There are many more unique dog_guids and user_guids in the reviews table than in the output of our inner join
Let's test one more thing.
Try the inner join query once with just the dog_guid or once with just the user_guid clause in the WHERE statement:
End of explanation
"""
%%sql
SELECT d.user_guid, d.dog_guid, d.breed, d.breed_type, d.breed_group
FROM dogs d, complete_tests t
WHERE d.dog_guid = t.dog_guid AND t.test_name='Yawn Warm-up'
%%sql
show tables
%%sql
Describe complete_tests
"""
Explanation: When you run the query by joining on the dog_guid only, you still get 389 rows in your output. When you run the query by joining on the user_guid only, you get 5586 rows in your output. This means that:
All of the user_guids in the reviews table are in the dogs table
Only 389 of the over 5000 dog_guids in the reviews table are in the dogs table
Perhaps most importantly for our current purposes, these COUNT queries show you that <mark>inner joins only output the data from rows that have equivalent values in both tables being joined.</mark> If you wanted to include all the dog_guids or user_guids in one or both of the tables, you would have to use an outer join, which we will practice in the next lesson.
Try an inner join on your own.
Question 5: How would you extract the user_guid, dog_guid, breed, breed_type, and breed_group for all animals who completed the "Yawn Warm-up" game (you should get 20,845 rows if you join on dog_guid only)?
End of explanation
"""
%%sql
SELECT DISTINCT u.user_guid, u.membership_type, d.dog_guid
FROM users u, dogs d, complete_tests t
WHERE
d.dog_guid = t.dog_guid
AND u.user_guid = d.user_guid
AND d.breed = 'Golden Retriever'
%%sql
show tables
%%sql
Describe complete_tests
%%sql
Describe users
%%sql
Describe dogs
%%sql
SELECT count(breed)
from dogs
where breed = 'Golden Retriever'
limit 5
%%sql
SELECT distinct d.dog_guid
FROM dogs d, complete_tests t
WHERE
d.dog_guid = t.dog_guid
AND d.breed = 'Golden Retriever'
"""
Explanation: 2. Joining More than 2 Tables
In theory, you can join as many tables together as you want or need. To join multiple tables you take the same approach as we took when we were joining two tables together: list all the fields you want to extract in the SELECT statement, specify which table they came from in the SELECT statement, list all the tables from which you will need to extract the fields in the FROM statement, and then tell the database how to connect the tables in the WHERE statement.
To extract the user_guid, user's state of residence, user's zip code, dog_guid, breed, breed_type, and breed_group for all animals who completed the "Yawn Warm-up" game, you might be tempted to query:
mySQL
SELECT c.user_guid AS UserID, u.state, u.zip, d.dog_guid AS DogID, d.breed, d.breed_type, d.breed_group
FROM dogs d, complete_tests c, users u
WHERE d.dog_guid=c.dog_guid
AND c.user_guid=u.user_guid
AND c.test_name="Yawn Warm-up";
This query focuses the relationships primarily on the complete_tests table. However, it turns out that our Dognition dataset has only NULL values in the user_guid column of the complete_tests table. If you were to execute the query above, you would not get an error message, but your output would have 0 rows. However, the power of relational databases will come in handy here. You can use the dogs table to link the complete_tests and users table (pay attention to the difference between the WHERE statement in this query vs. the WHERE statement in the query above):
mySQL
SELECT d.user_guid AS UserID, u.state, u.zip, d.dog_guid AS DogID, d.breed, d.breed_type, d.breed_group
FROM dogs d, complete_tests c, users u
WHERE d.dog_guid=c.dog_guid
AND d.user_guid=u.user_guid
AND c.test_name="Yawn Warm-up";
Of note, joins are very resource intensive, so try not to join unnecessarily. In general, the more joins you have to execute, the slower your query performance will be.
Question 6: How would you extract the user_guid, membership_type, and dog_guid of all the golden retrievers who completed at least 1 Dognition test (you should get 711 rows)?
End of explanation
"""
%%sql
SELECT COUNT(distinct d.dog_guid)
FROM users u, dogs d
WHERE
u.user_guid = d.user_guid
AND d.breed = 'Golden Retriever'
AND u.state = 'NC'
%%sql
SELECT COUNT(distinct d.dog_guid)
FROM users u, dogs d
WHERE
u.user_guid = d.user_guid
AND d.breed = 'Golden Retriever'
GROUP BY u.state
HAVING u.state='NC'
"""
Explanation: Practice inner joining your own tables!
Question 7: How many unique Golden Retrievers who live in North Carolina are there in the Dognition database (you should get 30)?
End of explanation
"""
%%sql
SELECT COUNT(DISTINCT u.user_guid)
FROM users u, reviews r
WHERE u.user_guid = r.user_guid
GROUP BY u.membership_type
LIMIT 5;
"""
Explanation: Question 8: How many unique customers within each membership type provided reviews (there should be 3208 in the membership type with the greatest number of customers, and 18 in the membership type with the fewest number of customers)?
End of explanation
"""
%%sql
SELECT d.breed, COUNT(s.script_detail_id) AS site_activity_amount
FROM dogs d, site_activities s
WHERE d.dog_guid = s.dog_guid
AND s.script_detail_id IS NOT NULL
GROUP BY d.breed
ORDER BY site_activity_amount DESC
LIMIT 0, 5
%%sql
Describe site_activities
"""
Explanation: Question 9: For which 3 dog breeds do we have the greatest amount of site_activity data, (as defined by non-NULL values in script_detail_id)(your answers should be "Mixed", "Labrador Retriever", and "Labrador Retriever-Golden Retriever Mix"?
End of explanation
"""
|
superliaoyong/plist-forsource | python 第四课课件 一.ipynb | apache-2.0 | import array
a = array.array('i', range(10))
# 数据类型必须统一
a[1] = 's'
a
import numpy as np
"""
Explanation: 人生苦短,我用python
python第四课
课程安排
1、numpy
2、pandas
3、matplotlib
numpy
数组跟列表,列表可以存储任意类型的数据,而数组只能存储一种类型数据
End of explanation
"""
a_list = list(range(10))
b = np.array(a_list)
type(b)
"""
Explanation: 从原有列表转换为数组
End of explanation
"""
a = np.zeros(10, dtype=int)
print(type(a))
# 查看数组类型
a.dtype
a = np.zeros((4,4), dtype=int)
print(type(a))
# 查看数组类型
print(a.dtype)
a
np.ones((4,4), dtype=float)
np.full((3,3), 3.14)
a
np.zeros_like(a)
np.ones_like(a)
np.full_like(a, 4.12, dtype=float)
"""
Explanation: 生成数组
End of explanation
"""
print(random.randint(5,10))
print(random.random())
np.random.random((3,3))
# 经常会用到
np.random.randint(0,10, (5,5))
"""
Explanation: random
End of explanation
"""
list(range(0,10,2))
np.arange(0,3,2)
# 经常用到
np.linspace(0, 3, 10)
# n维的单位矩阵
np.eye(5)
"""
Explanation: 范围取值
End of explanation
"""
# 嵌套列表的元素访问
var = [[1,2,3], [3,4,5], [5,6,7]]
var[0][0]
# 数组中元素的访问
a = np.array(var)
a[-1][0]
a
# 这两种访问方式是等价的
a[2, 0], a[2][0]
# 数组切片
a[:2, :2]
# 同上边的方式是不等价的
a[:2][:2]
"""
Explanation: | Data type | Description |
|:---------------|:-------------|
| bool_ | Boolean (True or False) stored as a byte |
| int_ | Default integer type (same as C long; normally either int64 or int32)|
| intc | Identical to C int (normally int32 or int64)|
| intp | Integer used for indexing (same as C ssize_t; normally either int32 or int64)|
| int8 | Byte (-128 to 127)|
| int16 | Integer (-32768 to 32767)|
| int32 | Integer (-2147483648 to 2147483647)|
| int64 | Integer (-9223372036854775808 to 9223372036854775807)|
| uint8 | Unsigned integer (0 to 255)|
| uint16 | Unsigned integer (0 to 65535)|
| uint32 | Unsigned integer (0 to 4294967295)|
| uint64 | Unsigned integer (0 to 18446744073709551615)|
| float_ | Shorthand for float64.|
| float16 | Half precision float: sign bit, 5 bits exponent, 10 bits mantissa|
| float32 | Single precision float: sign bit, 8 bits exponent, 23 bits mantissa|
| float64 | Double precision float: sign bit, 11 bits exponent, 52 bits mantissa|
| complex_ | Shorthand for complex128.|
| complex64 | Complex number, represented by two 32-bit floats|
| complex128| Complex number, represented by two 64-bit floats|
访问数组中元素
End of explanation
"""
a
# 维度
print(a.ndim)
# shape
print(a.shape)
# size
print(a.size)
# dtype
print(a.dtype)
# a.itemsize
print(a.itemsize)
# nbytes
print(a.nbytes)
"""
Explanation: 数组属性
End of explanation
"""
a = np.array(list(range(10)))
a
print(a + 10)
print(a - 10)
print(a * 100)
a = np.full((3,3), 1.0, dtype=float)
a + 10 # 等价于 np.add(a, 10)
"""
Explanation: 运算
End of explanation
"""
a = np.linspace(0, np.pi, 5)
b = np.sin(a)
print(a)
print(b)
"""
Explanation: | Operator | Equivalent ufunc | Description |
|---------------|---------------------|---------------------------------------|
|+ |np.add |Addition (e.g., 1 + 1 = 2) |
|- |np.subtract |Subtraction (e.g., 3 - 2 = 1) |
|- |np.negative |Unary negation (e.g., -2) |
|* |np.multiply |Multiplication (e.g., 2 * 3 = 6) |
|/ |np.divide |Division (e.g., 3 / 2 = 1.5) |
|// |np.floor_divide |Floor division (e.g., 3 // 2 = 1) |
|** |np.power |Exponentiation (e.g., 2 ** 3 = 8) |
|% |np.mod |Modulus/remainder (e.g., 9 % 4 = 1)|
End of explanation
"""
# 求和
print(sum([1,2,3,4,5,6]))
# 数组一维求和
a = np.full(10, 2.3)
print(sum(a))
# 数组多维求和
a = np.array([[1,2],[3,4]])
print(sum(a))
# np.sum 求和
np.sum(a)
np.sum(a, axis=1)
np.max(a, axis=1)
n = np.random.rand(10000)
"""
Explanation: 统计类型
End of explanation
"""
%timeit sum(n)
%timeit np.sum(n)
"""
Explanation: notebook使用小技巧
%timeit 代码 ; 此方法来判断程序的执行效率
End of explanation
"""
a = np.array(range(10))
a
a > 3
a != 3
a == a
"""
Explanation: 由上代码可已看出np.sum的执行效率高,推荐使用
比较
End of explanation
"""
np.all(a>-1)
np.any(a>-1)
"""
Explanation: | Operator | Equivalent ufunc || Operator | Equivalent ufunc |
|---------------|---------------------||---------------|---------------------|
|== |np.equal ||!= |np.not_equal |
|< |np.less ||<= |np.less_equal |
|> |np.greater ||>= |np.greater_equal |
End of explanation
"""
a = np.full((2,10), 1, dtype=float)
a
a.reshape(4, 5)
"""
Explanation: 变形
End of explanation
"""
l = [
[1,2,3],
[34,12,4],
[32,2,33]
]
a = np.array(l)
a
np.sort(a)
a.sort(axis=0)
a
"""
Explanation: 排序
End of explanation
"""
a = np.array([1, 2, 3])
b = np.array([[0, 2, 4], [1, 3, 5]])
# 按行去连接
np.concatenate([b,b,b], axis=0)
# 按列去连接
np.concatenate([b,b,b], axis=1)
"""
Explanation: 拼接
End of explanation
"""
|
Raag079/self-driving-car | Term01-Computer-Vision-and-Deep-Learning/Labs/02-CarND-TensorFlow-Lab/.ipynb_checkpoints/lab-checkpoint.ipynb | mit | import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
"""
Explanation: <h1 align="center">TensorFlow Neural Network Lab</h1>
<img src="image/notmnist.png">
In this lab, you'll use all the tools you learned from Introduction to TensorFlow to label images of English letters! The data you are using, <a href="http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html">notMNIST</a>, consists of images of a letter from A to J in differents font.
The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!
To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "All modules imported".
End of explanation
"""
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
"""
Explanation: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
End of explanation
"""
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
"""
Explanation: <img src="image/mean_variance.png" style="height: 75%;width: 75%; position: relative; right: 5%">
Problem 1
The first problem involves normalizing the features for your training and test data.
Implement Min-Max scaling in the normalize() function to a range of a=0.1 and b=0.9. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.
Since the raw notMNIST image data is in grayscale, the current values range from a min of 0 to a max of 255.
Min-Max Scaling:
$
X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}
$
If you're having trouble solving problem 1, you can view the solution here.
End of explanation
"""
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
"""
Explanation: Checkpoint
All your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
End of explanation
"""
features_count = 784
labels_count = 10
# TODO: Set the features and labels tensors
# features =
# labels =
# TODO: Set the weights and biases tensors
# weights =
# biases =
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.initialize_all_variables()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
"""
Explanation: <img src="image/weight_biases.png" style="height: 60%;width: 60%; position: relative; right: 10%">
Problem 2
For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/resources/dims_types.html#data-types">float32</a> tensors:
- features
- Placeholder tensor for feature data (train_features/valid_features/test_features)
- labels
- Placeholder tensor for label data (train_labels/valid_labels/test_labels)
- weights
- Variable Tensor with random numbers from a truncated normal distribution.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal">tf.truncated_normal() documentation</a> for help.
- biases
- Variable Tensor with all zeros.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#zeros"> tf.zeros() documentation</a> for help.
If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available here.
End of explanation
"""
# TODO: Find the best parameters for each configuration
# epochs =
# batch_size =
# learning_rate =
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
"""
Explanation: <img src="image/learn_rate_tune.png" style="height: 60%;width: 60%">
Problem 3
Below are 3 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.
Parameter configurations:
Configuration 1
* Epochs: 1
* Batch Size:
* 2000
* 1000
* 500
* 300
* 50
* Learning Rate: 0.01
Configuration 2
* Epochs: 1
* Batch Size: 100
* Learning Rate:
* 0.8
* 0.5
* 0.1
* 0.05
* 0.01
Configuration 3
* Epochs:
* 1
* 2
* 3
* 4
* 5
* Batch Size: 100
* Learning Rate: 0.2
The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.
If you're having trouble solving problem 3, you can view the solution here.
End of explanation
"""
# TODO: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3
# epochs =
# batch_size =
# learning_rate =
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
"""
Explanation: Test
Set the epochs, batch_size, and learning_rate with the best learning parameters you discovered in problem 3. You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
End of explanation
"""
|
rizar/attention-lvcsr | libs/Theano/doc/library/d3viz/index.ipynb | mit | import theano as th
import theano.tensor as T
import numpy as np
"""
Explanation: d3viz: Interactive visualization of Theano compute graphs
Requirements
d3viz requires the pydot package, which can be installed with pip:
pip install pydot
Overview
d3viz extends Theano’s printing module to interactively visualize compute graphs. Instead of creating a static picture, it creates an HTML file, which can be opened with current web-browsers. d3viz allows
to zoom to different regions and to move graphs via drag and drop,
to position nodes both manually and automatically,
to retrieve additional information about nodes and edges such as their data type or definition in the source code,
to edit node labels,
to visualizing profiling information, and
to explore nested graphs such as OpFromGraph nodes.
End of explanation
"""
ninputs = 1000
nfeatures = 100
noutputs = 10
nhiddens = 50
rng = np.random.RandomState(0)
x = T.dmatrix('x')
wh = th.shared(rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)
bh = th.shared(np.zeros(nhiddens), borrow=True)
h = T.nnet.sigmoid(T.dot(x, wh) + bh)
wy = th.shared(rng.normal(0, 1, (nhiddens, noutputs)))
by = th.shared(np.zeros(noutputs), borrow=True)
y = T.nnet.softmax(T.dot(h, wy) + by)
predict = th.function([x], y)
"""
Explanation: As an example, consider the following multilayer perceptron with one hidden layer and a softmax output layer.
End of explanation
"""
from theano.printing import pydotprint
import os
if not os.path.exists('examples'):
os.makedirs('examples')
pydotprint(predict, 'examples/mlp.png')
from IPython.display import Image
Image('examples/mlp.png', width='80%')
"""
Explanation: The function predict outputs the probability of 10 classes. You can visualize it with pydotprint as follows:
End of explanation
"""
import theano.d3viz as d3v
d3v.d3viz(predict, 'examples/mlp.html')
"""
Explanation: To visualize it interactively, import the d3viz function from the d3viz module, which can be called as before:
End of explanation
"""
predict_profiled = th.function([x], y, profile=True)
x_val = rng.normal(0, 1, (ninputs, nfeatures))
y_val = predict_profiled(x_val)
d3v.d3viz(predict_profiled, 'examples/mlp2.html')
"""
Explanation: Open visualization!
When you open the output file mlp.html in your web-browser, you will see an interactive visualization of the compute graph. You can move the whole graph or single nodes via drag and drop, and zoom via the mouse wheel. When you move the mouse cursor over a node, a window will pop up that displays detailed information about the node, such as its data type or definition in the source code. When you left-click on a node and select Edit, you can change the predefined node label. If you are dealing with a complex graph with many nodes, the default node layout may not be perfect. In this case, you can press the Release node button in the top-left corner to automatically arrange nodes. To reset nodes to their default position, press the Reset nodes button.
Profiling
Theano allows function profiling via the profile=True flag. After at least one function call, the compute time of each node can be printed in text form with debugprint. However, analyzing complex graphs in this way can be cumbersome.
d3viz can visualize the same timing information graphically, and hence help to spot bottlenecks in the compute graph more easily! To begin with, we will redefine the predict function, this time by using profile=True flag. Afterwards, we capture the runtime on random data:
End of explanation
"""
formatter = d3v.formatting.PyDotFormatter()
pydot_graph = formatter(predict_profiled)
pydot_graph.write_png('examples/mlp2.png');
pydot_graph.write_pdf('examples/mlp2.pdf');
Image('./examples/mlp2.png')
"""
Explanation: Open visualization!
When you open the HTML file in your browser, you will find an additional Toggle profile colors button in the menu bar. By clicking on it, nodes will be colored by their compute time, where red corresponds to a high compute time. You can read out the exact timing information of a node by moving the cursor over it.
Different output formats
Internally, d3viz represents a compute graph in the Graphviz DOT language, using the pydot package, and defines a front-end based on the d3.js library to visualize it. However, any other Graphviz front-end can be used, which allows to export graphs to different formats.
End of explanation
"""
x, y, z = T.scalars('xyz')
e = T.nnet.sigmoid((x + y + z)**2)
op = th.OpFromGraph([x, y, z], [e])
e2 = op(x, y, z) + op(z, y, x)
f = th.function([x, y, z], e2)
d3v.d3viz(f, 'examples/ofg.html')
"""
Explanation: Here, we used the PyDotFormatter class to convert the compute graph into a pydot graph, and created a PNG and PDF file. You can find all output formats supported by Graphviz here.
OpFromGraph nodes
An OpFromGraph node defines a new operation, which can be called with different inputs at different places in the compute graph. Each OpFromGraph node defines a nested graph, which will be visualized accordingly by d3viz.
End of explanation
"""
x, y, z = T.scalars('xyz')
e = x * y
op = th.OpFromGraph([x, y], [e])
e2 = op(x, y) + z
op2 = th.OpFromGraph([x, y, z], [e2])
e3 = op2(x, y, z) + z
f = th.function([x, y, z], [e3])
d3v.d3viz(f, 'examples/ofg2.html')
"""
Explanation: Open visualization!
In this example, an operation with three inputs is defined, which is used to build a function that calls this operations twice, each time with different input arguments.
In the d3viz visualization, you will find two OpFromGraph nodes, which correspond to the two OpFromGraph calls. When you double click on one of them, the nested graph appears with the correct mapping of its input arguments. You can move it around by drag and drop in the shaded area, and close it again by double-click.
An OpFromGraph operation can be composed of further OpFromGraph operations, which will be visualized as nested graphs as you can see in the following example.
End of explanation
"""
|
darkomen/TFG | medidas/12082015/Análisis de datos Ensayo 2.ipynb | cc0-1.0 | #Importamos las librerías utilizadas
import numpy as np
import pandas as pd
import seaborn as sns
#Mostramos las versiones usadas de cada librerías
print ("Numpy v{}".format(np.__version__))
print ("Pandas v{}".format(pd.__version__))
print ("Seaborn v{}".format(sns.__version__))
#Abrimos el fichero csv con los datos de la muestra
datos = pd.read_csv('ensayo2.CSV')
%pylab inline
#Almacenamos en una lista las columnas del fichero con las que vamos a trabajar
columns = ['Diametro X','Diametro Y', 'RPM TRAC']
#Mostramos un resumen de los datos obtenidoss
datos[columns].describe()
#datos.describe().loc['mean',['Diametro X [mm]', 'Diametro Y [mm]']]
"""
Explanation: Análisis de los datos obtenidos
Uso de ipython para el análsis y muestra de los datos obtenidos durante la producción.Se implementa un regulador experto. Los datos analizados son del día 12 de Agosto del 2015
Los datos del experimento:
* Hora de inicio: 12:00
* Hora final : 12:30
* Filamento extruido: 425cm
* $T: 150ºC$
* $V_{min} tractora: 1.5 mm/s$
* $V_{max} tractora: 3.4 mm/s$
* Los incrementos de velocidades en las reglas del sistema experto son distintas:
* En los casos 3 a 6 se pasa de un incremento de velocidad de +1 a un incremento de +2.
End of explanation
"""
datos.ix[:, "Diametro X":"Diametro Y"].plot(figsize=(16,10),ylim=(0.5,3)).hlines([1.85,1.65],0,3500,colors='r')
#datos['RPM TRAC'].plot(secondary_y='RPM TRAC')
datos.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
"""
Explanation: Representamos ambos diámetro y la velocidad de la tractora en la misma gráfica
End of explanation
"""
plt.scatter(x=datos['Diametro X'], y=datos['Diametro Y'], marker='.')
"""
Explanation: Con esta tercera aproximación se ha conseguido estabilizar los datos y reducir la desviación estandar, sin embargo, la medía del filamento y de la velocidad de tracción ha disminuido también.
Como tercera aproximación, vamos a modificar los incrementos en los que el diámetro se encuentra entre $1.80mm$ y $1.70 mm$, en sentido de subida. (casos 3 y 5) el sentido de bajada se mantendrá con incrementos de +1.
Se ha detectado también que el eje de giro de la tractora está algo suelto. Se va a apretar para el siguiente ensayo.
Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento
End of explanation
"""
datos_filtrados = datos[(datos['Diametro X'] >= 0.9) & (datos['Diametro Y'] >= 0.9)]
#datos_filtrados.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes')
"""
Explanation: Filtrado de datos
Las muestras tomadas $d_x >= 0.9$ or $d_y >= 0.9$ las asumimos como error del sensor, por ello las filtramos de las muestras tomadas.
End of explanation
"""
plt.scatter(x=datos_filtrados['Diametro X'], y=datos_filtrados['Diametro Y'], marker='.')
"""
Explanation: Representación de X/Y
End of explanation
"""
ratio = datos_filtrados['Diametro X']/datos_filtrados['Diametro Y']
ratio.describe()
rolling_mean = pd.rolling_mean(ratio, 50)
rolling_std = pd.rolling_std(ratio, 50)
rolling_mean.plot(figsize=(12,6))
# plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5)
ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5))
"""
Explanation: Analizamos datos del ratio
End of explanation
"""
Th_u = 1.85
Th_d = 1.65
data_violations = datos[(datos['Diametro X'] > Th_u) | (datos['Diametro X'] < Th_d) |
(datos['Diametro Y'] > Th_u) | (datos['Diametro Y'] < Th_d)]
data_violations.describe()
data_violations.plot(subplots=True, figsize=(12,12))
"""
Explanation: Límites de calidad
Calculamos el número de veces que traspasamos unos límites de calidad.
$Th^+ = 1.85$ and $Th^- = 1.65$
End of explanation
"""
|
DestrinStorm/deep-learning | dcgan-svhn/DCGAN.ipynb | mit | %matplotlib inline
import pickle as pkl
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import loadmat
import tensorflow as tf
!mkdir data
"""
Explanation: Deep Convolutional GANs
In this notebook, you'll build a GAN using convolutional layers in the generator and discriminator. This is called a Deep Convolutional GAN, or DCGAN for short. The DCGAN architecture was first explored last year and has seen impressive results in generating new images, you can read the original paper here.
You'll be training DCGAN on the Street View House Numbers (SVHN) dataset. These are color images of house numbers collected from Google street view. SVHN images are in color and much more variable than MNIST.
So, we'll need a deeper and more powerful network. This is accomplished through using convolutional layers in the discriminator and generator. It's also necessary to use batch normalization to get the convolutional networks to train. The only real changes compared to what you saw previously are in the generator and discriminator, otherwise the rest of the implementation is the same.
End of explanation
"""
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
data_dir = 'data/'
if not isdir(data_dir):
raise Exception("Data directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(data_dir + "train_32x32.mat"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar:
urlretrieve(
'http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
data_dir + 'train_32x32.mat',
pbar.hook)
if not isfile(data_dir + "test_32x32.mat"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Testing Set') as pbar:
urlretrieve(
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
data_dir + 'test_32x32.mat',
pbar.hook)
"""
Explanation: Getting the data
Here you can download the SVHN dataset. Run the cell above and it'll download to your machine.
End of explanation
"""
trainset = loadmat(data_dir + 'train_32x32.mat')
testset = loadmat(data_dir + 'test_32x32.mat')
"""
Explanation: These SVHN files are .mat files typically used with Matlab. However, we can load them in with scipy.io.loadmat which we imported above.
End of explanation
"""
idx = np.random.randint(0, trainset['X'].shape[3], size=36)
fig, axes = plt.subplots(6, 6, sharex=True, sharey=True, figsize=(5,5),)
for ii, ax in zip(idx, axes.flatten()):
ax.imshow(trainset['X'][:,:,:,ii], aspect='equal')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
plt.subplots_adjust(wspace=0, hspace=0)
"""
Explanation: Here I'm showing a small sample of the images. Each of these is 32x32 with 3 color channels (RGB). These are the real images we'll pass to the discriminator and what the generator will eventually fake.
End of explanation
"""
def scale(x, feature_range=(-1, 1)):
# scale to (0, 1)
x = ((x - x.min())/(255 - x.min()))
# scale to feature_range
min, max = feature_range
x = x * (max - min) + min
return x
class Dataset:
def __init__(self, train, test, val_frac=0.5, shuffle=False, scale_func=None):
split_idx = int(len(test['y'])*(1 - val_frac))
self.test_x, self.valid_x = test['X'][:,:,:,:split_idx], test['X'][:,:,:,split_idx:]
self.test_y, self.valid_y = test['y'][:split_idx], test['y'][split_idx:]
self.train_x, self.train_y = train['X'], train['y']
self.train_x = np.rollaxis(self.train_x, 3)
self.valid_x = np.rollaxis(self.valid_x, 3)
self.test_x = np.rollaxis(self.test_x, 3)
if scale_func is None:
self.scaler = scale
else:
self.scaler = scale_func
self.shuffle = shuffle
def batches(self, batch_size):
if self.shuffle:
idx = np.arange(len(dataset.train_x))
np.random.shuffle(idx)
self.train_x = self.train_x[idx]
self.train_y = self.train_y[idx]
n_batches = len(self.train_y)//batch_size
for ii in range(0, len(self.train_y), batch_size):
x = self.train_x[ii:ii+batch_size]
y = self.train_y[ii:ii+batch_size]
yield self.scaler(x), y
"""
Explanation: Here we need to do a bit of preprocessing and getting the images into a form where we can pass batches to the network. First off, we need to rescale the images to a range of -1 to 1, since the output of our generator is also in that range. We also have a set of test and validation images which could be used if we're trying to identify the numbers in the images.
End of explanation
"""
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
"""
Explanation: Network Inputs
Here, just creating some placeholders like normal.
End of explanation
"""
def generator(z, output_dim, reuse=False, alpha=0.2, training=True):
with tf.variable_scope('generator', reuse=reuse):
# First fully connected layer
x1 = tf.layers.dense(z, 4*4*512)
# Reshape it to start the convolutional stack
x1 = tf.reshape(x1, (-1, 4, 4, 512))
x1 = tf.layers.batch_normalization(x1, training=training)
x1 = tf.maximum(alpha * x1, x1)
# 4x4x512 now
x2 = tf.layers.conv2d_transpose(x1, 256, 5, strides=2, padding='same')
x2 = tf.layers.batch_normalization(x2, training=training)
x2 = tf.maximum(alpha * x2, x2)
# 8x8x256 now
x3 = tf.layers.conv2d_transpose(x2, 128, 5, strides=2, padding='same')
x3 = tf.layers.batch_normalization(x3, training=training)
x3 = tf.maximum(alpha * x3, x3)
# 16x16x128 now
# Output layer
logits = tf.layers.conv2d_transpose(x3, output_dim, 5, strides=2, padding='same')
# 32x32x3 now
out = tf.tanh(logits)
return out
"""
Explanation: Generator
Here you'll build the generator network. The input will be our noise vector z as before. Also as before, the output will be a $tanh$ output, but this time with size 32x32 which is the size of our SVHN images.
What's new here is we'll use convolutional layers to create our new images. The first layer is a fully connected layer which is reshaped into a deep and narrow layer, something like 4x4x1024 as in the original DCGAN paper. Then we use batch normalization and a leaky ReLU activation. Next is a transposed convolution where typically you'd halve the depth and double the width and height of the previous layer. Again, we use batch normalization and leaky ReLU. For each of these layers, the general scheme is convolution > batch norm > leaky ReLU.
You keep stacking layers up like this until you get the final transposed convolution layer with shape 32x32x3. Below is the archicture used in the original DCGAN paper:
Note that the final layer here is 64x64x3, while for our SVHN dataset, we only want it to be 32x32x3.
End of explanation
"""
def discriminator(x, reuse=False, alpha=0.2):
with tf.variable_scope('discriminator', reuse=reuse):
# Input layer is 32x32x3
x1 = tf.layers.conv2d(x, 64, 5, strides=2, padding='same')
relu1 = tf.maximum(alpha * x1, x1)
# 16x16x64
x2 = tf.layers.conv2d(relu1, 128, 5, strides=2, padding='same')
bn2 = tf.layers.batch_normalization(x2, training=True)
relu2 = tf.maximum(alpha * bn2, bn2)
# 8x8x128
x3 = tf.layers.conv2d(relu2, 256, 5, strides=2, padding='same')
bn3 = tf.layers.batch_normalization(x3, training=True)
relu3 = tf.maximum(alpha * bn3, bn3)
# 4x4x256
# Flatten it
flat = tf.reshape(relu3, (-1, 4*4*256))
logits = tf.layers.dense(flat, 1)
out = tf.sigmoid(logits)
return out, logits
"""
Explanation: Discriminator
Here you'll build the discriminator. This is basically just a convolutional classifier like you've build before. The input to the discriminator are 32x32x3 tensors/images. You'll want a few convolutional layers, then a fully connected layer for the output. As before, we want a sigmoid output, and you'll need to return the logits as well. For the depths of the convolutional layers I suggest starting with 16, 32, 64 filters in the first layer, then double the depth as you add layers. Note that in the DCGAN paper, they did all the downsampling using only strided convolutional layers with no maxpool layers.
You'll also want to use batch normalization with tf.layers.batch_normalization on each layer except the first convolutional and output layers. Again, each layer should look something like convolution > batch norm > leaky ReLU.
Note: in this project, your batch normalization layers will always use batch statistics. (That is, always set training to True.) That's because we are only interested in using the discriminator to help train the generator. However, if you wanted to use the discriminator for inference later, then you would need to set the training parameter appropriately.
End of explanation
"""
def model_loss(input_real, input_z, output_dim, alpha=0.2):
"""
Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z input
:param out_channel_dim: The number of channels in the output image
:return: A tuple of (discriminator loss, generator loss)
"""
g_model = generator(input_z, output_dim, alpha=alpha)
d_model_real, d_logits_real = discriminator(input_real, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, alpha=alpha)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))
d_loss = d_loss_real + d_loss_fake
return d_loss, g_loss
"""
Explanation: Model Loss
Calculating the loss like before, nothing new here.
End of explanation
"""
def model_opt(d_loss, g_loss, learning_rate, beta1):
"""
Get optimization operations
:param d_loss: Discriminator loss Tensor
:param g_loss: Generator loss Tensor
:param learning_rate: Learning Rate Placeholder
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:return: A tuple of (discriminator training operation, generator training operation)
"""
# Get weights and bias to update
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_vars = [var for var in t_vars if var.name.startswith('generator')]
# Optimize
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)
return d_train_opt, g_train_opt
"""
Explanation: Optimizers
Not much new here, but notice how the train operations are wrapped in a with tf.control_dependencies block so the batch normalization layers can update their population statistics.
End of explanation
"""
class GAN:
def __init__(self, real_size, z_size, learning_rate, alpha=0.2, beta1=0.5):
tf.reset_default_graph()
self.input_real, self.input_z = model_inputs(real_size, z_size)
self.d_loss, self.g_loss = model_loss(self.input_real, self.input_z,
real_size[2], alpha=0.2)
self.d_opt, self.g_opt = model_opt(self.d_loss, self.g_loss, learning_rate, beta1)
"""
Explanation: Building the model
Here we can use the functions we defined about to build the model as a class. This will make it easier to move the network around in our code since the nodes and operations in the graph are packaged in one object.
End of explanation
"""
def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)):
fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,
sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.axis('off')
img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8)
ax.set_adjustable('box-forced')
im = ax.imshow(img, aspect='equal')
plt.subplots_adjust(wspace=0, hspace=0)
return fig, axes
"""
Explanation: Here is a function for displaying generated images.
End of explanation
"""
def train(net, dataset, epochs, batch_size, print_every=10, show_every=100, figsize=(5,5)):
saver = tf.train.Saver()
sample_z = np.random.uniform(-1, 1, size=(72, z_size))
samples, losses = [], []
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in dataset.batches(batch_size):
steps += 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(net.d_opt, feed_dict={net.input_real: x, net.input_z: batch_z})
_ = sess.run(net.g_opt, feed_dict={net.input_z: batch_z, net.input_real: x})
if steps % print_every == 0:
# At the end of each epoch, get the losses and print them out
train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x})
train_loss_g = net.g_loss.eval({net.input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
if steps % show_every == 0:
gen_samples = sess.run(
generator(net.input_z, 3, reuse=True, training=False),
feed_dict={net.input_z: sample_z})
samples.append(gen_samples)
_ = view_samples(-1, samples, 6, 12, figsize=figsize)
plt.show()
saver.save(sess, './checkpoints/generator.ckpt')
with open('samples.pkl', 'wb') as f:
pkl.dump(samples, f)
return losses, samples
"""
Explanation: And another function we can use to train our network. Notice when we call generator to create the samples to display, we set training to False. That's so the batch normalization layers will use the population statistics rather than the batch statistics. Also notice that we set the net.input_real placeholder when we run the generator's optimizer. The generator doesn't actually use it, but we'd get an error without it because of the tf.control_dependencies block we created in model_opt.
End of explanation
"""
real_size = (32,32,3)
z_size = 100
learning_rate = 0.0002
batch_size = 128
epochs = 25
alpha = 0.2
beta1 = 0.5
# Create the network
net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1)
dataset = Dataset(trainset, testset)
losses, samples = train(net, dataset, epochs, batch_size, figsize=(10,5))
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator', alpha=0.5)
plt.plot(losses.T[1], label='Generator', alpha=0.5)
plt.title("Training Losses")
plt.legend()
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator', alpha=0.5)
plt.plot(losses.T[1], label='Generator', alpha=0.5)
plt.title("Training Losses")
plt.legend()
_ = view_samples(-1, samples, 6, 12, figsize=(10,5))
_ = view_samples(-1, samples, 6, 12, figsize=(10,5))
"""
Explanation: Hyperparameters
GANs are very sensitive to hyperparameters. A lot of experimentation goes into finding the best hyperparameters such that the generator and discriminator don't overpower each other. Try out your own hyperparameters or read the DCGAN paper to see what worked for them.
End of explanation
"""
|
datacommonsorg/api-python | notebooks/intro_data_science/Introduction_to_Clustering.ipynb | apache-2.0 | !pip install datacommons --upgrade --quiet
!pip install datacommons_pandas --upgrade --quiet
import datacommons
import datacommons_pandas
import numpy as np
import pandas as pd
# for visualization
import matplotlib.pyplot as plt
import seaborn as sns
# for clustering
from sklearn.cluster import KMeans
"""
Explanation: <a href="https://colab.research.google.com/github/datacommonsorg/api-python/blob/master/notebooks/intro_data_science/Introduction_to_Clustering.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Copyright 2022 Google LLC.
SPDX-License-Identifier: Apache-2.0
Introduction to Clustering
Previously, we've seen ways to develop models for supervised learning, or where we have labels, categories, or some other ground-truth values we can use to learn relationships in our data from. But what if we're interested in unsupervised learning? That is, if we have some data without matching labels, is there a way to elucidate patterns inherent in the data itself, without known what those relationships are a-priori?
One common unsupervised learning technique is clustering, sometimes also known as cluster analysis. The idea behind clustering is to divide your datapoints into groups (clusters), such that data points in each group are more similar to each other than to datapoints in other clusters. Clustering is used in many fields for data exploration.
In this assignment, we'll build some intuition through case studies where you'll apply clustering.
Learning Goals
Understand what clustering is and does
See how clustering can be used to find patterns in the data
Build intuition into how clustering works
Get some hands-on experience clustering real-world data
Part 0: A Brief Primer on K-Means
There are many different algorithms for clustering data. For this assignment, we'll be using the k-means algorithm, one of the simplest and most popular clustering algorithms out there.
Most notably, in k-means, you need to specify how many clusters the algorithm should divide your data into, so the number of clusters you select is a tunable hyperparameter. Then, the k-means algorithm attempts to create that many clusters by minimizing the euclidean distance between points in the same cluster.
Don't worry too much about implementation details right now, the goal of this assignment is to build intuition about clustering. We'll look more into the mechanics of how things work in part 2. If you're interested in the details, you can take a look at the wikipedia entry on k-means in the meantime.
Run the following code blocks to install and load all the packages you'll need for this assignment.
End of explanation
"""
# load dataset
from keras.datasets import fashion_mnist
(fashion_mnist, labels), _ = fashion_mnist.load_data()
# limit to first 2000 images so runtimes are reasonable
fashion_mnist = fashion_mnist[:2000, :, :]
# Show the first 25 images as a sample
print(f"Loaded {fashion_mnist.shape[0]} images. The first 25 are:")
sns.set(rc={'figure.figsize':(11,8)})
for i in range(25):
# define subplot
plt.subplot(5, 5, i+1)
# plot raw pixel data
plt.imshow(fashion_mnist[i], cmap=plt.get_cmap('gray'))
plt.axis("off")
plt.show()
"""
Explanation: Part 1: Images - Building Intuition with FashionMNIST
We'll start exploring clustering with a simple, intuitive case: clustering the FashionMNIST dataset.
The FashionMNIST dataset is a collection of over 60,000 (28x28) greyscale images of various clothing items (e.g. shoes, shirts, bags) that's often used as a nice toy dataset in computer vision circles. While this particular dataset already has labels for each image, we'll be working with the images only. That is, we'll see what patterns in FashionMNIST we can recover without any labels.
Let's start by loading the dataset and viewing some sample images.
End of explanation
"""
# squash images into 1D
fashion_data = fashion_mnist.reshape((-1, 28*28))
"""
Explanation: To cluster the images, we'll need to convert the images into a format we can pass into our KMeans model, which expects 1D feature vectors. For this assignment, we'll just flatten our image. This is akin to cutting each image up into rows, and concatenating the rows end-to-end to form one long, skinny image.
Note that this is a rather naive way to vecotrize our images, and there are definitely better ways to represent images for clustering. We'll stick with this for now to keep things simple.
End of explanation
"""
# set number of clusters
k = 2 #Change me!
# cluster the images
kmeans = KMeans(n_clusters=k).fit(fashion_data)
# create a dictionary of clusters to images
clusters = {n:[] for n in range(k)}
for i in range(fashion_data.shape[0]):
key = kmeans.labels_[i]
value = fashion_data[i,:].reshape(1, 28, 28).squeeze()
clusters[key].append(value)
# display images from each cluster
sns.set(rc={'figure.figsize':(6,4)})
for cluster_num, images in clusters.items():
print(f'Cluster {cluster_num} contains {len(images)} images. The first 25 are:')
for i in range(min(25, len(images))):
# define subplot
plt.subplot(5, 5, i+1)
# plot raw pixel data
plt.imshow(images[i], cmap=plt.get_cmap('gray'))
plt.axis("off")
# show the figure
plt.show()
"""
Explanation: Now run the code box below using different numbers of clusters, and note how the clustering results change.
End of explanation
"""
dcids = [
'country/AGO', 'country/ALB', 'country/ARG', 'country/ARM', 'country/AUS',
'country/AZE', 'country/BDI', 'country/BGD', 'country/BGR', 'country/BIH',
'country/BLZ', 'country/BOL', 'country/BRA', 'country/BTN', 'country/BWA',
'country/CAN', 'country/CHL', 'country/CHN', 'country/CMR', 'country/COD',
'country/COG', 'country/COL', 'country/CRI', 'country/CZE', 'country/DOM',
'country/DZA', 'country/ECU', 'country/EGY', 'country/ETH', 'country/FJI',
'country/GAB', 'country/GEO', 'country/GHA', 'country/GTM', 'country/GUY',
'country/HND', 'country/IDN', 'country/IND', 'country/IRN', 'country/JAM',
'country/JOR', 'country/JPN', 'country/KAZ', 'country/KEN', 'country/KGZ',
'country/KIR', 'country/KOR', 'country/LAO', 'country/LBN', 'country/LCA',
'country/LSO', 'country/MAR', 'country/MDA', 'country/MDG', 'country/MEX',
'country/MKD', 'country/MLI', 'country/MMR', 'country/MNE', 'country/MNG',
'country/MOZ', 'country/MRT', 'country/MWI', 'country/MYS', 'country/NAM',
'country/NER', 'country/NGA', 'country/NIC', 'country/NPL', 'country/PAK',
'country/PAN', 'country/PER', 'country/PHL', 'country/PNG', 'country/PRY',
'country/ROU', 'country/RWA', 'country/SDN', 'country/SLV', 'country/SRB',
'country/SWZ', 'country/SYR', 'country/THA', 'country/TJK', 'country/TKM',
'country/TLS', 'country/TON', 'country/TTO', 'country/TUN', 'country/TUR',
'country/TZA', 'country/UGA', 'country/UKR', 'country/USA', 'country/UZB',
'country/VNM', 'country/VUT', 'country/WSM', 'country/YEM', 'country/ZAF',
'country/ZMB', 'country/ZWE'
]
stat_vars_to_query = [
"Amount_Emissions_CarbonDioxide_PerCapita",
"LifeExpectancy_Person",
"Count_Person_IsInternetUser_PerCapita",
"GrowthRate_Count_Person",
"Count_Person_Upto4Years_Overweight_AsFractionOf_Count_Person_Upto4Years",
"GiniIndex_EconomicActivity",
"Count_Product_MobileCellularSubscription_AsFractionOf_Count_Person",
"Amount_EconomicActivity_GrossDomesticProduction_Nominal_PerCapita",
"FertilityRate_Person_Female",
"Count_Death_AsAFractionOfCount_Person"
]
df = datacommons_pandas.build_multivariate_dataframe(dcids,stat_vars_to_query)
# swap index for names
df.insert(0, 'name', df.index.map(datacommons.get_property_values(df.index, 'name')).str[0])
df.set_index('name', drop=True, inplace=True)
display(df)
"""
Explanation: 1.1) Effect of number of clusters
Q1: Try the settings the settings k=2, k=10, and k=15. What value of k results in clusters that are most meaningful?
A1: We should expect k=10 to work best out of the options given. (FashionMNIST had 10 categories). Students might rightfully point out that the "best" k can depend on what you're looking for (e.g. if k=2 is just shoes vs non-shoes, this might be the right level of nuance for your application!)
Q2: What happens if k is too small? What happens if k is too large?
A2: Too small and the clusters will still look heterogenous. Too large and we'll have clusters that seem to represent the same object (e.g. multiple clusters of shirts).
1.2) What traits get elucidated?
Q3: Let's go back to k=2. What features of the images does KMeans seem to be using to split the data into two groups? Why do you think this was the feaure used?
A3: k=2 tends to cluster based on image intensity (e.g. dark vs light images). Because k-means uses euclidean distance between pixel values, high on average and low on average pixel values would be the most distinguishing feature at the k=2 level.
Q4: Now compare k=10 with k=8. Which items or traits group together in k=8 compared to k=10?
A4: Students should notice that items that look similar (e.g. bag with similar area and shading to shirts) will group first with those items that are most similar.
1.3) Extension Question
Q5 (Extension): Imagine you have a set of 1 million unlabeled images that look just like these fashion MNIST images, and you want to generate categorical labels for each of the images. However, labeling each of those 1 million images is too time intensive and costly.
Devise a strategy for how you could use clustering to help you label each of the images.
A5: Cluster the 1 million images. Label each of the clusters, and assign every image the label corresponding to its cluster
This is semi-supervised learning!
Part 2: Real-World Data
Now that we've built up some intuition on what k-means does, let's try clustering on some more complex, real world data. We've provided a list of the DCIDs of just over 100 different countries around the world, along with a list of DCIDs statistical variables to start out with.
In case the DCID names are unclear, we'll be looking at the following statistics for each country:
* CO2 emissions per capita
* Life expectancy
* Number of internet users per capita
* Population growth rate
* Percentage of population that is overweight
* Gini Index (A measure of economic inequality)
* Percentage of population with mobile phone subscriptions
* Gross domestic product per captia
* Fertility rate
* Number of deaths per year, normalized by population
Run the following code boxes to load and cluster the data associated with each country.
End of explanation
"""
# normalize the values
mean_df = df.mean()
std_df = df.std()
normalized_df = ((df-mean_df)/std_df)
display(normalized_df)
"""
Explanation: 2.1) The Importance of Normalization
Scroll through the dataframe generated by the codebox above, and take note of the scales of each of the features. They can vary drastically in magnitude! Let's normalize our data before clustering.
Q1: What do you think would happen if we didn't normalize our data before clustering?
A1:Generally the features with higher magnitudes will end up dominating the clustering.
End of explanation
"""
# Clustering using K-means
n_clusters = 3
kmeans_model = KMeans(n_clusters).fit(normalized_df)
labels_df = pd.DataFrame(data=np.transpose(kmeans_model.labels_), index=normalized_df.index, columns=['cluster'])
# list countries in each cluster:
for i in range(n_clusters):
print(f'Countries in Cluster {i}:')
print(labels_df.index[labels_df['cluster']==i].tolist())
print(len(labels_df.index[labels_df['cluster']==i].tolist()))
"""
Explanation: 2.2) Interpreting Clusters
Now let's cluster our data! Once again, play around with $k$ to see how the results change.
End of explanation
"""
# Get centroids of each cluster.
for i in range(n_clusters):
print(f'\nCluster {i}:')
# display non-normalized mean values
mean_to_display = df[labels_df['cluster']==i].mean()
display(mean_to_display)
"""
Explanation: Analyzing Centroids
What characterizes each of the clusters?
It can be a little hard to tell what characteristics each cluster has latched on to from cluster membership alone. One way to characterize clusters is to look at the centroids, which are the average values of each cluster. One can think of the centroids as describing the average group member.
Run the following code box to display the values of the centroids of each cluster. Note: We're displaying the non-normalized values for better interpretability. The clustering was still performed on normalized values.
End of explanation
"""
# get normalized values
normalized_means = []
for i in range(n_clusters):
# calculate normalized values for the next part
mean_normalized = normalized_df[labels_df['cluster']==i].mean()
normalized_means.append(mean_normalized.to_frame().transpose())
normalized_means_df = pd.concat(normalized_means)
# For better visualization, we'll multiply the following features by -1
# so that a higher value is associated with more development.
features_to_flip = [
"GiniIndex_EconomicActivity",
"GrowthRate_Count_Person",
"FertilityRate_Person_Female"
]
for column in features_to_flip:
normalized_means_df[column] *= -1
# Plot heatmap
sns.set(rc={'figure.figsize':(11.7,8.27)})
sns.set(font_scale = 1.5)
ax = sns.heatmap(normalized_means_df.to_numpy(), linewidth=0.5, xticklabels=normalized_means_df.columns, center=0)
ax.set_ylabel("Cluster")
plt.show()
"""
Explanation: Visualizing Centroids
These values can be difficult to compare on their own. One good way to visualize cluster centroids is by using a color-coded heatmap of normalized values. Use the code box below to generate such a heat map.
End of explanation
"""
dcids_holdout = ['country/HTI','country/DEU', 'country/IRQ']
# get values for each holdout
df_holdout = datacommons_pandas.build_multivariate_dataframe(dcids_holdout,stat_vars_to_query)
df_holdout = df_holdout.dropna()
# swap index for names
df_holdout.insert(0, 'name', df_holdout.index.map(datacommons.get_property_values(df_holdout.index, 'name')).str[0])
df_holdout.set_index('name', drop=True, inplace=True)
# show values
display(df_holdout)
# normalized version for clustering later
normalized_df_holdout = ((df_holdout-mean_df)/std_df)
"""
Explanation: For Q2-Q4, please answer using k=3.
Q2: What does each cluster seem to represent?
Now, here's a list of 3 held-out countries (i.e., countries that weren't part of the original countries we used to create the clusters).
Germany
Haiti
Iraq
Q3: For each of the countries listed above, make a prediction for which cluster the country would be a member of.
Run the code box below to get feature values for each country that can help you answer Q3.
End of explanation
"""
# find which cluster centroid is closest
for country in df_holdout.index:
country_data = normalized_df_holdout.loc[country].to_numpy()
country_data = country_data[np.newaxis, :]
difference = normalized_means_df.to_numpy() - country_data
distance = np.linalg.norm(difference,axis=1)
cluster_membership = np.argmin(distance, axis=0)
print(f"{country} belongs to cluster {cluster_membership}")
"""
Explanation: Now, let's see where these countries would actually have clustered.
End of explanation
"""
|
jornvdent/WUR-Geo-Scripting-Course | Lesson 9/Excercise 9.ipynb | gpl-3.0 | from osgeo import ogr
from osgeo import osr
import os
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName( driverName )
if drv is None:
print "%s driver not available.\n" % driverName
else:
print "%s driver IS available.\n" % driverName
"""
Explanation: Solution Excercise 9 Team Hadochi
Jorn van der Ent
Michiel Voermans
23 January 2017
Load Modules and check for presence of ESRI Shapefile drive
End of explanation
"""
os.chdir('./data')
"""
Explanation: Set working directory to 'data'
End of explanation
"""
layername = raw_input("Name of Layer: ")
pointnumber = raw_input("How many points do you want to insert? ")
pointcoordinates = []
for number in range(1, (int(pointnumber)+1)):
x = raw_input(("What is the Latitude (WGS 84) of Point %s ? " % str(number)))
y = raw_input(("What is the Longitude (WGS 84) of Point %s ? " % str(number)))
pointcoordinates += [(float(x), float(y))]
# e.g.:
# pointcoordinates =[(4.897070, 52.377956), (5.104480, 52.092876)]
"""
Explanation: Interactive input system
End of explanation
"""
# Set filename
fn = layername + ".shp"
ds = drv.CreateDataSource(fn)
# Set spatial reference
spatialReference = osr.SpatialReference()
spatialReference.ImportFromEPSG(4326)
## Create Layer
layer=ds.CreateLayer(layername, spatialReference, ogr.wkbPoint)
# Get layer Definition
layerDefinition = layer.GetLayerDefn()
for pointcoord in pointcoordinates:
## Create a point
point = ogr.Geometry(ogr.wkbPoint)
## SetPoint(self, int point, double x, double y, double z = 0)
point.SetPoint(0, pointcoord[0], pointcoord[1])
## Feature is defined from properties of the layer:e.g:
feature = ogr.Feature(layerDefinition)
## Lets add the points to the feature
feature.SetGeometry(point)
## Lets store the feature in a layer
layer.CreateFeature(feature)
ds.Destroy()
"""
Explanation: Create shape file from input
End of explanation
"""
bashcommand = 'ogr2ogr -f KML -t_srs crs:84 points.kml points.shp'
os.system(bashcommand)
"""
Explanation: Convert shapefile to KML with bash
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.