code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS
# %matplotlib inline
import nltk
from io import StringIO
import collections as co
from datetime import datetime
import datetime as dt
import matplotlib.pyplot as plt
df = pd.read_csv('Replaced.csv',encoding='ISO-8859-1')
df.head()
# #### Total number of ratings
print('In all, there are ',df['rating'].nunique(),'types of ratings in the dataset: ',df['rating'].unique())
# #### Total number of products
print('In all, there are ',df['name'].nunique(),'products in the dataset: ',df['name'].unique())
# ***How rating is distributed***
plt.figure(figsize=(12,8))
df.rating.value_counts().plot(kind='pie',autopct='%1.1f%%')
plt.title('Number of appearances in dataset')
plt.show()
# **Year-wise Distribution of reviews**
df['reviewsdate'] = pd.to_datetime(df['reviewsdate'], errors='coerce')
df['reviewsdate']=df.reviewsdate.dt.year
df[:9]
def lineplot(x_data, y_data, x_label="", y_label="", title=""):
# Create the plot object
_, ax = plt.subplots()
# Plot the best fit line, set the linewidth (lw), color and
# transparency (alpha) of the line
ax.plot(x_data, y_data, lw = 2, color = '#539caf', alpha = 1)
# Label the axes and provide a title
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
print('The Ratings along with their occurence in every year:')
df.groupby((['reviewsdate','rating'])).size()
print("The Year-wise distribution of products")
year_no_of_shows=df["reviewsdate"].value_counts().sort_values(ascending=False)
plt.figure(figsize=(12,4))
year_no_of_shows.plot(title='Years with the number of products sold',kind="bar")
# ***Review Level***
df1=df[df['title'].notnull()]
# #### Remvoing rows with no values
string=StringIO()
df1['title'].apply(lambda x: string.write(x))
x=string.getvalue()
string.close()
x=x.lower()
x=x.split()
nltk.download('words')
nltk.download('stopwords')
words = co.Counter(nltk.corpus.words.words())
stopWords =co.Counter( nltk.corpus.stopwords.words() )
x=[i for i in x if i in words and i not in stopWords]
string=" ".join(x)
c = co.Counter(x)
# ***Most Common Words in reviews***
most_common_10=c.most_common(10)
print('The 10 Most Common Words in reviews are: ')
most_common_10
# **Popular Products**
wordcloud1 = WordCloud(
background_color='black',
width=8000,
height=5000,
relative_scaling = 1.0
).generate(" ".join(df['name']))
plt.imshow(wordcloud1)
plt.axis('off')
plt.show()
|
.ipynb_checkpoints/Part 1 - Data Visualization-checkpoint.ipynb
|
# # 14.7. Creating a route planner for a road network
import io
import zipfile
import requests
import networkx as nx
import numpy as np
import pandas as pd
import json
import smopy
import matplotlib.pyplot as plt
# %matplotlib inline
url = ('https://github.com/ipython-books/'
'cookbook-2nd-data/blob/master/'
'road.zip?raw=true')
r = io.BytesIO(requests.get(url).content)
zipfile.ZipFile(r).extractall('data')
g = nx.read_shp('data/tl_2013_06_prisecroads.shp')
sgs = list(nx.connected_component_subgraphs(
g.to_undirected()))
i = np.argmax([len(sg) for sg in sgs])
sg = sgs[i]
len(sg)
pos0 = (36.6026, -121.9026)
pos1 = (34.0569, -118.2427)
def get_path(n0, n1):
"""If n0 and n1 are connected nodes in the graph,
this function returns an array of point
coordinates along the road linking these two
nodes."""
return np.array(json.loads(sg[n0][n1]['Json'])
['coordinates'])
# +
# from https://stackoverflow.com/a/8859667/1595060
EARTH_R = 6372.8
def geocalc(lat0, lon0, lat1, lon1):
"""Return the distance (in km) between two points
in geographical coordinates."""
lat0 = np.radians(lat0)
lon0 = np.radians(lon0)
lat1 = np.radians(lat1)
lon1 = np.radians(lon1)
dlon = lon0 - lon1
y = np.sqrt((np.cos(lat1) * np.sin(dlon)) ** 2 +
(np.cos(lat0) * np.sin(lat1) - np.sin(lat0) *
np.cos(lat1) * np.cos(dlon)) ** 2)
x = np.sin(lat0) * np.sin(lat1) + \
np.cos(lat0) * np.cos(lat1) * np.cos(dlon)
c = np.arctan2(y, x)
return EARTH_R * c
# -
def get_path_length(path):
return np.sum(geocalc(path[1:, 1], path[1:, 0],
path[:-1, 1], path[:-1, 0]))
# Compute the length of the road segments.
for n0, n1 in sg.edges:
path = get_path(n0, n1)
distance = get_path_length(path)
sg.edges[n0, n1]['distance'] = distance
nodes = np.array(sg.nodes())
# Get the closest nodes in the graph.
pos0_i = np.argmin(
np.sum((nodes[:, ::-1] - pos0)**2, axis=1))
pos1_i = np.argmin(
np.sum((nodes[:, ::-1] - pos1)**2, axis=1))
# Compute the shortest path.
path = nx.shortest_path(
sg,
source=tuple(nodes[pos0_i]),
target=tuple(nodes[pos1_i]),
weight='distance')
len(path)
# + podoc={"output_text": "Output"}
roads = pd.DataFrame(
[sg.edges[path[i], path[i + 1]]
for i in range(len(path) - 1)],
columns=['FULLNAME', 'MTFCC',
'RTTYP', 'distance'])
roads
# -
roads['distance'].sum()
m = smopy.Map(pos0, pos1, z=7, margin=.1)
def get_full_path(path):
"""Return the positions along a path."""
p_list = []
curp = None
for i in range(len(path) - 1):
p = get_path(path[i], path[i + 1])
if curp is None:
curp = p
if (np.sum((p[0] - curp) ** 2) >
np.sum((p[-1] - curp) ** 2)):
p = p[::-1, :]
p_list.append(p)
curp = p[-1]
return np.vstack(p_list)
linepath = get_full_path(path)
x, y = m.to_pixels(linepath[:, 1], linepath[:, 0])
# + podoc={"output_text": "<matplotlib.figure.Figure at 0x14957cc0>"}
ax = m.show_mpl(figsize=(8, 8))
# Plot the itinerary.
ax.plot(x, y, '-k', lw=3)
# Mark our two positions.
ax.plot(x[0], y[0], 'ob', ms=20)
ax.plot(x[-1], y[-1], 'or', ms=20)
|
chapter14_graphgeo/07_gps.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
import numba
import numba.cffi_support
import importlib
import sunode
from sunode import lambdify
import sympy as sy
import matplotlib.pyplot as plt
state_dtype = np.dtype([('X', (np.float64, ())), ('S', (np.float64, ()))], align=True)
state_ndtype = numba.from_dtype(state_dtype)
# +
user_dtype = np.dtype(
[
('deriv_params', [
('mu_max', (np.float64, ())),
('Y_XS', (np.float64, ())),
]),
('params', [
('K_S', (np.float64, ())),
]),
],
align=True
)
user_ndtype = numba.from_dtype(user_dtype)
user_ndtype
user_ndtype_p = numba.types.CPointer(user_ndtype)
user_data = np.zeros((1,), dtype=user_dtype)[0]
user_data['deriv_params']['mu_max'] = 0.4
user_data['deriv_params']['Y_XS'] = 0.5
user_data['params']['K_S'] = 0.02
user_data
# -
# log scale
@numba.njit
def rhs(out_state, t, state, deriv_params, params, section):
mu_max = deriv_params.mu_max
Y_XS = deriv_params.Y_XS
K_S = params.K_S
dXdt = mu_max / (np.exp(state.S) + K_S)
out_state.S = -1 / Y_XS * dXdt * np.exp(state.X)
out_state.X = dXdt * np.exp(state.S)
return 0
@numba.njit
def rhs(out_state, t, state, deriv_params, params, section):
mu_max = deriv_params.mu_max
Y_XS = deriv_params.Y_XS
K_S = params.K_S
dXdt = mu_max * state.X * state.S / (state.S + K_S)
out_state.S = -1 / Y_XS * dXdt
out_state.X = dXdt
return 0
def sympy_func(t, state, grad_params, params, section):
S = state['S']
X = state['X']
return {
'S': 0,
'X': 0,
}
def make_numba(sympy_func, arg_dtypes, out_dtypes):
...
(
solver()
.coords({
'time': np.linspace(0, 10)
})
.states({
'S': (),
'X': (),
})
.deriv_params({
'mu_max': (),
'Y_XS': (),
})
.params({
'K_S': (),
})
.rhs(numba_rhs, backend='numba')
)
ffi = sunode._cvodes.ffi
lib = sunode._cvodes.lib
numba.cffi_support.register_module(sunode._cvodes)
numba.cffi_support.register_type(
ffi.typeof('N_Vector').item,
numba.types.Opaque('N_Vector'))
numba.cffi_support.register_type(
ffi.typeof('SUNMatrix').item,
numba.types.Opaque('SUNMatrix'))
ndim = 2
y0 = sunode.empty_vector(ndim)
y0.data[:] = 0
abstol = sunode.empty_vector(ndim)
abstol.data[:] = [1e-8, 1e-8]
reltol = 1e-8
ode = lib.CVodeCreate(lib.CV_BDF)
#ode = lib.CVodeCreate(lib.CV_ADAMS)
user_data_p = ffi.cast('void *', ffi.addressof(ffi.from_buffer(user_data.data)))
# +
N_VGetArrayPointer_Serial = lib.N_VGetArrayPointer_Serial
func_type = numba.cffi_support.map_type(ffi.typeof('CVRhsFn'))
func_type = func_type.return_type(*(func_type.args[:-1] + (user_ndtype_p,)))
func_type
@numba.cfunc(func_type)
def rhs_wrapper(t, y_, out_, user_data_):
y_ptr = N_VGetArrayPointer_Serial(y_)
out_ptr = N_VGetArrayPointer_Serial(out_)
y = numba.carray(y_ptr, (ndim,))
out = numba.carray(out_ptr, (ndim,))
y = y.view(state_dtype)[0]
out = out.view(state_dtype)[0]
user_data = numba.carray(user_data_, (1,))[0]
#grad_vars = user_data.grad_vars
#p = user_data.p
section = 0
return rhs(out, t, y, user_data.deriv_params, user_data.params, section)
#return rhs(t, y, out, user_data)
# -
print(lib.CVodeInit(ode, rhs_wrapper.cffi, 0, y0.c_ptr))
#lib.CVodeSVtolerances(ode, reltol, abstol.c_ptr)
lib.CVodeSStolerances(ode, 1e-8, 1e-8)
A = lib.SUNDenseMatrix(ndim, ndim)
linsolver = lib.SUNLinSol_Dense(y0.c_ptr, A)
lib.CVodeSetLinearSolver(ode, linsolver, A)
# +
#lib.CVodeSetJacFn(ode, Jac.cffi)
# -
lib.CVodeSetUserData(ode, user_data_p)
nparam = 2
yS = lib.N_VCloneVectorArray(nparam, y0.c_ptr)
vecs = [sunode.basic.Vector(yS[i]) for i in range(nparam)]
for vec in vecs:
vec.data[:] = 0
np.array([vec.data for vec in vecs])
# +
func_type = numba.cffi_support.map_type(ffi.typeof('CVSensRhsFn'))
args = list(func_type.args)
args[-3] = user_ndtype_p
func_type = func_type.return_type(*args)
func_type
@numba.cfunc(func_type)
def sens_rhs(n_params, t, y_, ydot_, yS_, out_, user_data_, tmp1_, tmp2_):
y_ptr = N_VGetArrayPointer_Serial(y_)
y = numba.carray(y_ptr, (ndim,))
y_dot_ptr = N_VGetArrayPointer_Serial(ydot_)
ydot = numba.carray(y_dot_ptr, (ndim,))
out = []
for i in range(n_params):
out_i_ptr = N_VGetArrayPointer_Serial(out_[i])
out_i = numba.carray(out_i_ptr, (ndim,))
out.append(out_i)
yS = []
for i in range(n_params):
yS_i_ptr = N_VGetArrayPointer_Serial(yS_[i])
yS_i = numba.carray(yS_i_ptr, (ndim,))
yS.append(yS_i)
user_data = numba.carray(user_data_, (1,))[0]
p = user_data.p
df_dp = df_dp_comp((), p.reshape((-1, 1)), y.reshape((-1, 1)))
for i in range(n_params):
s = yS[i]
ySdot = out[i]
jacobian_prod(t, s, y, None, ySdot, user_data)
ySdot[:] += df_dp[:, i]
return 0
# +
#assert 0 == lib.CVodeSensInit(ode, nparam, lib.CV_STAGGERED, sens_rhs.cffi, yS)
#assert 0 == lib.CVodeSensInit(ode, nparam, lib.CV_STAGGERED, ffi.cast('void*', 0), yS)
#assert 0 == lib.CVodeSensEEtolerances(ode)
#assert 0 == lib.CVodeSetSensErrCon(ode, 1)
# +
#pbar = np.ones(nparam)
#NULL_D = ffi.cast('double *', 0)
#NULL_I = ffi.cast('int *', 0)
#pbar_p = ffi.cast('double *', ffi.addressof(ffi.from_buffer(pbar.data)))
#p = ffi.cast('double *', ffi.addressof(ffi.from_buffer(user_data['deriv_params'])))
# +
#lib.CVodeSetSensParams(ode, p, pbar_p, NULL_I)
# -
time_p = ffi.new('double*')
time_p[0] = 0.
# +
tvals = np.linspace(0, 20, 200)
y_vals = []
sens_vals = []
#user_data['p'][...] = [0.4, 0.02, 0.5]
# -
#start = np.log([0.05, 20])
start = np.array([0.05, 20])
y_vals = np.empty((len(tvals), len(start)))
# +
# #%%timeit
CVodeReInit = lib.CVodeReInit
CVode = lib.CVode
CVodeGetSens = lib.CVodeGetSens
y0_ptr = y0.c_ptr
y0_data = y0.data
sens_data = tuple(vec.data for vec in vecs)
@numba.njit()
def solve(ode, y0_ptr, time_p, y0_data, start, t0, y_vals):
#y_vals.clear()
#sens_vals.clear()
y0_data[:] = start
#y0.data[:] = np.log(y0.data)
#for vec in vecs:
# vec.data[:] = 0
#current_time[0] = t0
CVodeReInit(ode, t0, y0_ptr)
for i, t in enumerate(tvals):
retval = -1
while retval == -1:
retval = CVode(ode, t, y0_ptr, time_p, lib.CV_NORMAL)
#y_vals.append(y0_data.copy())
y_vals[i, :] = y0_data
CVodeGetSens(ode, time_p, yS)
for
#sens_vals.append(np.array([vec.data.copy() for vec in vecs]).T)
solve(ode, y0_ptr, time_p, y0_data, start, 0., y_vals)
# -
import xarray as xr
import pandas as pd
# %timeit xr.DataArray(y_vals)
data = y_vals.view(state_dtype)[:, 0]
# %timeit pd.DataFrame.from_records(y_vals.view(state_dtype)[:, 0])
state_dtype.fields.keys()
state_dtype['X'].fields
data_xr = xr.Dataset({name: (('time'), data[name]) for name in state_dtype.fields})
xr.Dataset({name: (('time'), data[name]) for name in state_dtype.fields}).X.plot.line()
xr.Dataset({name: (('time'), data[name]) for name in state_dtype.fields}).S.plot.line()
data_xr.to_dataframe().plot()
data_xr
# %timeit xr.Dataset({name: data[name] for name in state_dtype.fields})
# %%prun -stime
for _ in range(1000):
pd.DataFrame.from_records(y_vals.view(state_dtype)[:, 0]).to_xarray()
user_data.reshape((1,))
plt.plot(tvals, y_vals[:, 0])
plt.plot(tvals, y_vals[:, 1])
plt.plot(tvals, np.exp(np.array(y_vals)[:, 0]))
plt.plot(tvals, np.exp(np.array(y_vals)[:, 1]))
plt.plot(tvals, np.array(sens_vals)[:, 0, 0])
plt.plot(tvals, np.array(sens_vals)[:, 0, 1])
plt.plot(tvals, np.array(sens_vals)[:, 0, 2])
plt.plot(tvals, np.array(sens_vals)[:, 1, 0])
plt.plot(tvals, np.array(sens_vals)[:, 1, 1])
plt.plot(tvals, np.array(sens_vals)[:, 1, 2])
num_evals_p = ffi.new('long*')
num_evals_p[0] = 0
lib.CVodeGetNumRhsEvals(ode, num_evals_p)
num_evals_p[0]
lib.CVodeGetNumSteps(ode, num_evals_p)
num_evals_p[0]
time_p[0]
from scipy import integrate
def rhs_(t, y):
out = np.empty_like(y)
assert rhs(t, y, out, user_data) == 0
return out
def jac_(t, y):
out = np.empty((ndim, ndim))
assert jacobian(t, y, None, out, user_data) == 0
return out
from scipy import integrate
# %%timeit
out = integrate.solve_ivp(
rhs_,
jac=jac_,
t_span=(0, tvals.max()),
t_eval=tvals, y0=np.array([0.5, 0.6]), method='BDF',
rtol=reltol, atol=abstol.data.copy())
out = integrate.solve_ivp(
rhs_,
jac=jac_,
t_span=(0, tvals.max()),
t_eval=tvals, y0=np.array([0.5, 0.6]), method='BDF',
rtol=reltol, atol=abstol.data.copy())
# +
plt.plot(tvals, out.y[0, :])
plt.plot(tvals, out.y[1, :])
plt.plot(tvals, np.array(y_vals)[:, 0], '.-.')
plt.plot(tvals, np.array(y_vals)[:, 1], '.-.')
# -
out
|
notebooks/pysundials-demo-sens-analytical.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
import panel as pn
pn.extension('terminal')
# When developing applications that are to be used by multiple users and which may process a lot of data it is important to ensure the application is well optimized. Additionally complex applications may have very complex callbacks which are difficult to trace and debug. In this user guide section we will walk you some of the best practices to debug your applications and profile your application to maximize performance.
# ## Caching
#
# The Panel architecture ensures that multiple user sessions can run in the same process and therefore have access to the same global state. This means that we can cache data in Panel's global `state` object, either by directly assigning to the `pn.state.cache` dictionary object or by using the `pn.state.as_cached` helper function.
#
# To assign to the cache manually, simply put the data load or expensive calculation in an `if`/`else` block which checks whether the custom key is already present:
#
# ```python
# if 'data' in pn.state.cache:
# data = pn.state.cache['data']
# else:
# pn.state.cache['data'] = data = ... # Load some data or perform an expensive computation
# ```
#
# The `as_cached` helper function on the other hand allows providing a custom key and a function and automatically caching the return value. If provided the `args` and `kwargs` will also be hashed making it easy to cache (or memoize) on the arguments to the function:
#
# ```python
# def load_data(*args, **kwargs):
# return ... # Load some data
#
# data = pn.state.as_cached('data', load_data, *args, **kwargs)
# ```
#
# The first time the app is loaded the data will be cached and subsequent sessions will simply look up the data in the cache, speeding up the process of rendering. If you want to warm up the cache before the first user visits the application you can also provide the `--warm` argument to the `panel serve` command, which will ensure the application is initialized once on launch.
# ## Concurrent processing
#
# When deploying a Panel application to be accessed by multiple users they will often access the same server simultaneously. To maintain responsiveness of the application when multiple users are interacting with it at the same time there are multiple approaches to concurrency, each with their own drawbacks and advantages:
#
# 1. `Load balancing`: A load balancer launches multiple instances of the Panel application and distributes network traffic between them. This is ensures that users the load is distributed across multiple servers but also requires a lot configuration and resources.
# 2. `Multiple processes`: Launches multiple processes on a single machine, effectively load balancing across the processes. Much simpler to set up than a load balancer but you are limited by the compute and memory resources on one machine.
# 2. `Threading`: Attempts to distribute processing across multiple threads. Effectiveness depends on the operations being performed, I/O bound and CPU bound operations that release the GIL can easily be made concurrent in this way.
# 3. `AsyncIO`: Allows asynchronously processing I/O bound operations. Effective for many concurrent I/O operations but requires rewriting your application and callbacks to make use of `async`/`await` paradigm.
#
# ### Scaling across processes
#
# Both load balancing and starting multiple processes effectively spin up multiple copies of the same application and distribute the load across the processes. This results in duplication and therefore significantly higher overhead (basically scaling linearly with the number of processes). In applications where you are relying on global state (e.g. the `pn.state.cache`) this can introduce significant challenges to ensure that application state stays synchronized.
#
# #### Load balancing
#
# Setting up load balancing is a huge topic dependent on the precise system you are using so we won't go into any specific implementation here. In most cases you set up a reverse proxy (like NGINX) to distribute the load across multiple application servers. If you are using a system like Kubernetes it will also handle spinning up the servers for you and can even do so dynamically depending on the amount of concurrent users to ensure that you are not wasting resources when there are fewer users.
#
# <figure>
# <img src="https://www.nginx.com/wp-content/uploads/2014/07/what-is-load-balancing-diagram-NGINX-1024x518.png" width="768"></img>
# <figcaption>Diagram showing concept of load balacing (NGINX)</figcaption>
# </figure>
#
# Load balancing is the most complex approach to set up but is guaranteed to improve concurrent usage of your application since different users are not contending for access to the same process or even necessarily the same physical compute and memory resources. At the same time it is more wasteful of resources since it potentially occupies multiple machines and since each process is isolated there is no sharing of cached data or global state.
#
# #### Multiple processes
#
# Launching a Panel application on multiple processes is a effectively a simpler version of load balancing with many of the same advantages and drawbacks. One major advantage is that it is easy to set up, when deploying your application with `panel serve` simply configure `--num-procs N`, where N is the number of processes. Generally choose an `N` that is no larger than the number of processors on your machine. This still uses significantly more resources since each process has the same overhead and all processes will be contending for the same memory and compute resources. However if your application is single-threaded and you have sufficient memory this is a simple way to make your application scale.
#
# ### Scaling within a single process
#
# Threading and async are both approaches to speed up processing in Python using concurrency in a single Python process. Since we can't provide a complete primer on either threading or asynchronous processing here, if you are not familiar with these concepts we recommend reading up on them before continuing. Read about [threading in Python here](https://realpython.com/intro-to-python-threading/) and [AsyncIO here](https://realpython.com/async-io-python/).
#
# When to use which approach cannot be answered easily and is never completely clear cut. As a general guide however use `asyncio` can scale almost arbitrarily allowing you to perform thousands or even millions of IO bound operations concurrently, while threading limits you to the number of available threads. In practice this may never actually become relevant so the other main differences are that `async` coroutines are significantly more lightweight but that you have to carefully consider accessing shared objects across threads. Using `async` coroutines makes it very clear where concurrency occurs and therefore can make it easier to avoid race conditions and avoid having to think about locking a thread to access shared objects. However, in some situations threading can also be useful for CPU intensive operations where the operation being executed [releases the GIL](https://realpython.com/python-gil/), this includes many NumPy, Pandas and Numba functions.
#
# ### Threading
#
# Using threading in Panel can either be enabled manually, e.g. by managing your own thread pool and dispatching concurrent tasks to it, or it can be managed by Panel itself by setting the `config.nthreads` parameter (or equivalently by setting it with `pn.extension(nthreads=...)`. This will start a `ThreadPoolExecutor` with the specified number of threads (or if set to `0` it will set the number of threads based on your system, i.e. `min(32, os.cpu_count() + 4)`).
#
# Whenever an event is generated or a periodic callback fires Panel will then automatically dispatch the event to the executor. An event in this case refers to any action generated on the frontend such as the manipulation of a widget by a user or the interaction with a plot. If you are launching an application with `panel serve` you should enable this option configure this option on the CLI by setting `--num-threads`.
#
# To demonstrate the effect of enabling threading take this example below:
#
# ```python
# import panel as pn
#
# pn.extension(nthreads=2)
#
# def button_click(event):
# print('Button clicked for the {event.new}th time.')
# time.sleep(2) # Simulate long running operation
# print('Finished processing {event.new}th click.')
#
# button = pn.widgets.Button(name='Click me!')
#
# button.on_click(button_click)
# ```
#
# When we click the button twice successively in a single-threaded context we will see the following output:
#
# ```
# > Button clicked for the 1th time.
# ... 2 second wait
# > Finished processing 1th click.
# > Button clicked for the 2th time.
# ... 2 second wait
# > Finished processing 2th click.
# ```
#
# In a threaded context on the other hand the two clicks will be processed concurrently:
#
# ```
# > Button clicked for the 1th time.
# > Button clicked for the 2th time.
# ... 2 second wait
# > Finished processing 1th click.
# > Finished processing 2th click.
# ```
#
# ### AsyncIO
#
# When using Python>=3.8 you can use async callbacks wherever you would ordinarily use a regular synchronous function. For instance you can use `pn.bind` on an async function:
# +
import aiohttp
widget = pn.widgets.IntSlider(start=0, end=10)
async def get_img(index):
async with aiohttp.ClientSession() as session:
async with session.get(f"https://picsum.photos/800/300?image={index}") as resp:
return pn.pane.JPG(await resp.read())
pn.Column(widget, pn.bind(get_img, widget))
# -
# In this example Panel will invoke the function and update the output when the function returns while leaving the process unblocked for the duration of the `aiohttp` request.
#
# Similarly you can attach asynchronous callbacks using `.param.watch`:
# +
widget = pn.widgets.IntSlider(start=0, end=10)
image = pn.pane.JPG()
async def update_img(event):
async with aiohttp.ClientSession() as session:
async with session.get(f"https://picsum.photos/800/300?image={event.new}") as resp:
image.object = await resp.read()
widget.param.watch(update_img, 'value')
widget.param.trigger('value')
pn.Column(widget, image)
# -
# In this example Param will await the asynchronous function and the image will be updated when the request completes.
# ## Admin Panel
#
# The `/admin` panel provides an overview of the current application and provides tools for debugging and profiling. It can be enabled by passing the ``--admin`` argument to the `panel serve` command.
# ### Overview
#
# The overview page provides some details about currently active sessions, running versions and resource usage (if `psutil` is installed).
#
# <img src="../assets/admin_overview.png" width="70%"></img>
# ### Launch Profiler
#
# The launch profiler profiles the execution time of the initialization of a particular application. It can be enabled by setting a profiler using the commandline ``--profiler`` option. Available profilers include:
#
# - [`pyinstrument`](https://pyinstrument.readthedocs.io): A statistical profiler with nice visual output
# - [`snakeviz`](https://jiffyclub.github.io/snakeviz/): SnakeViz is a browser based graphical viewer for the output of Python’s cProfile module and an alternative to using the standard library pstats module.
#
# Once enabled the launch profiler will profile each application separately and provide the profiler output generated by the selected profiling engine.
#
# <img src="../assets/launch_profiler.png" width="80%"></img>
# ### User profiling
#
# In addition to profiling the launch step of an application it is often also important to get insight into the interactive performance of an application. For that reason Panel also provides the `pn.io.profile` decorator that can be added to any callback and will report the profiling results in the `/admin` panel. The `profile` helper takes to arguments, the name to record the profiling results under and the profiling `engine` to use.
#
# ```python
# @pn.io.profile('clustering', engine='snakeviz')
# def get_clustering(event):
# # some expensive calculation
# ...
#
# widget.param.watch(my_callback, 'value')
# ```
#
# <img src="../assets/user_profiling.png" width="80%"></img>
# The user profiling may also be used in an interactive session, e.g. we might decorate a simple callback with the `profile` decorator:
# +
import time
slider = pn.widgets.FloatSlider(name='Test')
@pn.depends(slider)
@pn.io.profile('formatting')
def format_value(value):
time.sleep(1)
return f'Value: {value+1}'
pn.Row(slider, format_value)
# -
# Then we can request the named profile 'formatting' using the `pn.state.get_profile` function:
pn.state.get_profile('formatting')
# ### Logs
#
# The Logs page provides a detailed breakdown of the user interaction with the application. Additionally users may also log to this logger using the `pn.state.log` function, e.g. in this example we log the arguments to the clustering function:
#
# ```python
# def get_clusters(x, y, n_clusters):
# pn.state.log(f'clustering {x!r} vs {y!r} into {n_clusters} clusters.')
# ...
# return ...
# ```
#
#
#
# <img src="../assets/admin_logs.png" width="80%"></img>
#
#
# The logging terminal may also be used interactively, however you have to ensure that the 'terminal' extension is loaded with `pn.extension('terminal')`. If the extension is initialized it can be rendered by accessing it on `pn.state.log_terminal`:
# +
slider = pn.widgets.FloatSlider(name='Test')
@pn.depends(slider)
def format_value(value):
pn.state.log(f'formatting value {value}')
return f'Value: {value+1}'
pn.Column(
pn.Row(slider, format_value),
pn.state.log_terminal,
sizing_mode='stretch_both'
)
|
examples/user_guide/Performance_and_Debugging.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Timetable
# **09:15 - 09:30** $\quad$ Update materials
# **09:30 - 10:30** $\quad$ Exercise 04: _More Lists_
# **10:30 - 11:00** $\quad$ Input: _Functions_
# **11:00 - 11:15** $\quad$ Coffee break
# **11:15 - 12:00** $\quad$ Exercise 05: _Functions_
#
# **12:00 - 13:00** $\quad$ Lunch break
#
# **13:00 - 13:30** $\quad$ Input: _Standard Libraries_
# **13:30 - 14:30** $\quad$ Exercise 06: _Standard Libraries_
# **14:30 - 14:45** $\quad$ Input: _Matrixes_
# **14:45 - 15:00** $\quad$ Coffee break
# **15:00 - 15:15** $\quad$ Input: _Plots_
# **15:15 - 15:45** $\quad$ Exercise 07: _Matrixes_
# **15:15 - 15:45** $\quad$ Exercise 08: _Plots_
# **15:45 - 16:00** $\quad$ Input: _Examples_
# **16:15 - 16:00** $\quad$ Evaluation & Feedback
# <a name="top"></a>Overview: Functions
# ===
# * [Functions](#funktionen)
# * [Anatomy of a function](#anatomie)
# * [Syntax](#syntax)
# * [Namespaces](#namespaces)
# * [Return values](#return)
# * [Examples](#beispiele)
#
#
# * [Arguments](#argumente)
# * [Positional arguments](#positional)
# * [Default arguments](#default)
# * [Keyword arguments](#keyword)
# * [Advantages of functions](#vorteile)
#
#
# * [Exercises 05: Functions](#uebung05)
# **Learning Goals:** After this lecture, you will
# * know why using functions is a good idea
# * be able to create your own functions
# * understand how you can make your functions extremely versatile using default and keyword arguments
# <a name="funktionen"></a>Functions
# ===
# We often want to execute the same action in our coed multiple times. Of course, we can simply copy & paste the code for that action, but this quickly gets confusing. Additionally, this can lead to very complicated code, if we need to slightly change how things get executed in every copy.
# The way to solve this problem (in nearly every programming language) is using _functions_.
# <a name="anatomie"></a>Anatomy of a function
# ===
# We already got to know quite a few functions during the course until now.
# Every time we used a command followed by round brackets, such as ```print()```, we were really using a function.
# Now, let's define what a function is and how it works.
# * Functions are a defined set of actions with a specific name.
# * We use a function by _calling_ it, which means writing its name, followed by round brackets.
# * Functions can take variables as _arguments_ and manipulate them.
# * Functions can _return_ variables as output to the rest of the program.
# ##### A simple function
word = 'hello'
print(word)
# * ```print``` is the name of the function
# * "word" is the variable we give to the function, the _argument_
# * the function then does its job: displaying the _argument_ in the command line
# ##### Another example
# +
numbers = [1, 2, 3, 4]
summe = sum(numbers)
print(summe)
# -
# * ```sum``` is the name of the function
# * "numbers" is the _argument_ we give to the function
# * this function sums all elements of the argument
#
# **IMPORTANT:** we can see that _not_ every variable can be an argument for every function - summing over a string doesn't make any sense, for example:
sum(['a','b'])
# If the argument we gave the function, does not work with the action of the function, the computer throws a **TypeError**, which tells us that the type was wrong.
# [top](#top)
# <a name="syntax"></a>Syntax
# ---
# Until now we only used built-in Python functions, with predefined actions and arguments.
# However, we can also define and write our own functions!
#
# It is commonplace during programming to use functions for actions we need to use multiple times.
# There is no need to define everything yourself though - why would you write your own ```sum``` function for example?
# To define a new function we use the ```def``` keyword:
# +
# this is the header of the function
# in which we define its name and
# the arguments it takes
def my_function(argument):
# this is the body of the function
# in which the actions are defined
cube = argument ** 3
print(cube)
def square(argument):
squar = argument ** 2
print(squar)
# thus we have two functions:
# one which takes an argument, cubes it and prints the cube
# and one which does the same for the square
# -
square(10)
my_function(10)
my_function(3)
blubb = 10
my_function(blubb)
# It is important that you stick to the syntax shown here! Just as with loops and if-else statements, you need to close the control part of the function with a colon and indent the body of the function to seperate it from the normal code.
# Defining a function does not produce any output, it only allows us to use the function in our code.
# To use it, we have to _call_ a function just as we did with the built-in functions
x = 3
# call the function we defined with x as an argument
my_function(x)
# **Important:** executing the function does not change the value of the argument:
# +
# set the value of y
y = 4
# call our function with y as the argument
my_function(y)
# ceck whether the value of y changed
print('Value of y: {}'.format(y))
# -
# [top](#top)
# <a name="namespaces"></a>Namespaces
# ---
# **IMPORTANT:** within the function we defined the variable 'cube'. This variable is only defined _locally_ and thus not known to the program in general! Let's check this:
# +
# the function is here as a reminder, since it is already defined from the code above
a = 10
def my_function(argument):
cube = argument ** 3
print(cube)
print('a: {}'.format(a))
my_function(4)
print(cube)
# -
# We call the code within the function its _body_. All variables defined _within_ a function body are in the function's namespace, and are known **only** within the function, not outside of it.
#
# Python knows different namespaces:
# * the global namespace, which is known everywhere
# * the namespace of functions
#
# The arguments we define for a function are also known within it (and with exactly the name we defined them as). So, in my_function the variable 'argument' is known and we can use it in its body.
# [top](#top)
# <a name="return"></a>Return values
# ---
# Sometimes we might want to use a variable from within the function outside of it. This basically means moving the variable from the function namespace to the global one. The normal way to do this is by using a _return value_, which we can do by using the ```return``` keyword:
# we slightly changed the function from before
# instead of displaying cube in the command line,
# it now returns the vaule of cube to the code
def my_function(argument):
cube = argument ** 3
# here, we return cube
return cube
# As before, we have to call the function, to test it:
# +
# define a variable
x = 10
# pass the variable to a function,
# and save its return value in a variable
what_we_got_back = my_function(x)
# check the result
print(what_we_got_back)
# -
# [top](#top)
# <a name="beispiele"></a>Examples
# ---
# Here are some more examples to illustrate the use of functions:
# +
# a simple polynomial
def f(x):
# note: we also can return the result
# directly, without saving it to a
# variable first
result = 2 * x**2 + 4 * x + 10
return result
x = 10
calculate = f(x)
print(calculate)
# +
# a function that could write emails
def print_greeting(person):
# we can return any value; it doesn't need to be in
# a variable first.
return 'Dear Prof {},\nI would like to do a PhD with you.\n'\
.format(person)
email_body = 'very convincing text'
statement1 = print_greeting('Bodenschatz') + email_body
statement2 = print_greeting('Tilgner')
statement3 = print_greeting('Grubmueller')
# -
print(statement1)
print(statement2)
print(statement3)
# +
# a function, which prints a help text
def print_help():
# note: the function does not have an argument
print('''*** important keyboard shortcuts: ***
edit mode - ENTER
command mode - ESC
cell to markdown - m
cell to code - y''')
print_help()
# -
# [top](#top)
# <a name="Argumente"></a>Arguments
# ===
# So far we have passed one or zero arguments to each function we used.
# Of course, we can use more arguments than just one - in fact, there is no limit to the number we can use!
# +
# a function with two arguments
def lunch(main, side):
# this function only prints a message
print('today for lunch there is {} with {} as a side'\
.format(main, side))
# +
lunch('schnitzel','potatoes')
lunch('curry','rice')
# this function does accept numbers as arguments
# the result doesn't make a lot of sense though...
lunch(3, 'apple')
# -
lunch('potatoes','schnitzel')
# <a name="positional"></a>Positional arguments
# ---
# If we pass more than one argument to a function, the order of arguments is very important! The function will interpret the arguments in the order they are defined in its header:
lunch('rice', 'schnitzel')
# +
# This function takes name, surname and age of a person
# and then prints a short description of the person.
def describe_person(first_name, surname, age):
# Note: title() makes a string begin with a capital letter
print("First name: {}".format(first_name.title()))
print("Surname: {}".format(surname.title()))
print("Age: {}\n".format(age))
describe_person('jana', 'lasser', 26)
describe_person('nina', 'merz', 32)
describe_person('simon', 'mueller', 68)
# -
# When we call the function, the arguments get assigned to the varibles according to their order:
# * position 1 $\rightarrow$ first_name
# * position 2 $\rightarrow$ surname
# * position 3 $\rightarrow$ age
# Thus, if we pass (jana, lasser, 26) to the function, the result we get is right:
# * First name: Jana
# * Surname: Lasser
# * Age: 26
# However, if we pass (lasser, jana, 26) instead, we get a wrong result:
# * First name: Lasser
# * Surname: Jana
# * Age: 26
# It gets even worse, if we pass the wrong type of variable, since not all actions of the function have to make sense for each type of variable:
describe_person(26, 'jana', 'lasser')
# [top](#top)
# <a name="default"></a>Default arguments
# ---
# Sometimes, we want to write a function, which we do not _need_ to pass an argument to, but _can_.
# For example, we might want to execute a type of default action, unless we specify something else. To do this, we can define _default values_:
# +
# This function displays a message to "name"
# if "name" is given, if not, the message is
# addressed to 'everyone' instead (default case)
def thank_you(name='everyone'):
print("\nYou are doing good work, {}!".format(name))
thank_you('Bianca')
thank_you('Katrin')
thank_you()
# -
# Of course we can mix positional arguments and default values.
#
# **IMPORTANT:** in the header, positional arguments without default values go **before** positional arguments with default values!
# +
# a function, which expontianites an argument
# if no exponent is given, two is used as a default
def power(base, exponent=2):
return base ** exponent
# we pass both base and exponent
print(power(2,4))
# we pass only the base, 2 is used for the exponent automatically
print(power(2))
result = (power(2,8) + 3)*4
print(result)
# -
power()
# ##### Excursus: None
# In nearly every programming language, there is a value, which represents 'nothing'. This value is neither 0 nor **False**. It is neither a number, nor a boolean value, but a special object representing 'nothing'.
#
# In Python this value is **none**. It is used if something explicitly _is not defined_. Be aware, that the thruth value of **none** is **False**, while every other object (number, string, list) has a truth value of **True**. This allows us to easily check the existence of something.
# [top](#top)
# <a name="keyword"></a>Keyword arguments
# ---
# We've just seen _positional arguments_. They are called _positional_, because their position matters.
#
# There is another type of argument: _keyword arguments_. Keyword arguments are called that way, because they explicitly specify to what variable they belong. Keyword arguments are often given default values (which can also be **none**), which means that if you call a function, you only need to use those keyword arguments that you want to use.
#
# One example of using keyword-arguments is allowing to use more information, without requiring it.
# Let's say we want to augment our describe_person function to allow for mother tongue, age and day of death. However, we don't want to use all of these possibilities all the time, so we use keyword-arguments:
# +
# Function header with positional and keyword-arguments
def describe_person(first_name, surname, age=None, \
mother_tongue=None, date_of_death=None):
# this part is not optional
print("First name: {}".format(first_name))
print("Surname: {}".format(surname))
# optional information:
if age:
print("Age: {}".format(age))
if mother_tongue:
print("Mother tonuge: {}".format(mother_tongue))
if date_of_death:
print("Date of death: {}".format(date_of_death))
# add an empty line at the end.
print("")
describe_person('torsten', 'eckstein')
describe_person('jana', 'lasser', mother_tongue='German')
describe_person('adele', 'goldberg', age=68, mother_tongue='Hebrew')
describe_person('michael', 'jackson', mother_tongue='English', date_of_death=2009)
# -
# [top](#top)
# <a name="vorteile"></a>Advantages of functions
# ---
# The approach of structuring programs by using functions is called _procedural programming_. It is a style of writing code (called _programming paradigm_) just like _object-oriented programming_.
#
# Advantages of procedural programming:
# * Instructions are written only once, packaged into a function and then can get reused, wherever they are needed $\rightarrow$ this saves time.
# * Having all instructions in one place makes it easier to write error free code.
# * Changes have only to be implemented in one place and work in the entire code.
# * Functions can make the code simpler and easier to read - important aspects!
# [top](#top)
# <a name="uebung05"></a>Exercise 05: Functions
# ===
# 1. **Functions**
# 1. Write a function that accepts two numbers as arguments and displays their sum, difference, product, and quotient to the command line.
# 2. Write a function that accepts a list of numbers as an argument and returns the sum of the squares of the numbers in the list.
# 3. **(Optional)** Write a function that accepts a list of numbers as an argument, then sorts and returns the list. Hint: you *may* use the min() function.
# 2. **Arguments**
# 1. Write a function, which accepts five arguments (x, a, b, c, d), calulates the polynomial
# $f(x) = ax^3 + bx^2 + cx + d$
# with them and returns the value of $f$.
# 2. Experiment with the function and try different polynomials.
# 3. Modify the function so that the arguments a, b, c have default values.
# 4. **(Optional)** add a keyword argument "display" to the function.
# * If display = False the function acts as before.
# * If display = True the function displays the values of the coefficients neatly in the command line.
# [top](#top)
|
05-functions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Benchmark Algo on a simple problem
# Minimizing the sinus between 0 and 2pi leads to 2pi / 3
# https://stackoverflow.com/questions/53217771/jupyter-making-3d-matplotlib-graphs-extremely-small
# %matplotlib inline
import sys
sys.path.append("../")
from base import benchmark_simple
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # This import has side effects required for the kwarg projection='3d' in the call to fig.add_subplot
# +
optimization_problem = [
{
"name": "x",
"category": "uniform",
"search_space": {
"low": 0,
"high": 2 * np.pi,
}
},
{
"name": "y",
"category": "uniform",
"search_space": {
"low": 0,
"high": 2 * np.pi,
}
},
]
def function_to_optimize(x, y):
return np.cos(x) + np.cos(y) + np.cos(2 * x + 1)
x = np.linspace(0, 2 * np.pi, 200)
y = np.linspace(0, 2 * np.pi, 200)
X, Y = np.meshgrid(x, y)
zs = np.array([function_to_optimize(x, y) for x, y in zip(np.ravel(X), np.ravel(Y))])
x_target, y_target = list(zip(np.ravel(X), np.ravel(Y)))[np.argmin(zs)]
target = {"x": x_target, "y": y_target}
# +
# plot target function
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 1, 1, projection='3d')
x = np.linspace(0, 2 * np.pi, 100)
y = np.linspace(0, 2 * np.pi, 100)
X, Y = np.meshgrid(x, y)
zs = np.array([function_to_optimize(x, y) for x, y in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
ax.plot_surface(X, Y, Z, alpha=0.5)
ax.plot([target["x"]], [target["y"]], [function_to_optimize(**target)], color="r", marker="o", alpha=1, lw=3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('Function value')
plt.show()
# +
seeds = list(range(30))
methods = ["random", "parzen_estimator"]
number_of_evaluations = [5, 10, 20, 30, 35, 40, 45, 50, 55, 60, 70, 80, 100]
# -
results = benchmark_simple(
function_to_optimize=function_to_optimize,
optimization_problem=optimization_problem,
target=target,
methods=methods,
number_of_evaluations=number_of_evaluations,
seeds=seeds,
parallel=True,
)
plt.figure(figsize=(16, 8))
for method in methods:
plt.errorbar(
x=number_of_evaluations,
y=[results[method][number_of_evaluation]["mean"] for number_of_evaluation in number_of_evaluations],
yerr=[results[method][number_of_evaluation]["std"] for number_of_evaluation in number_of_evaluations],
label=method,
alpha=0.7
)
plt.legend()
plt.yscale("log")
# +
from matplotlib import animation, rc
from IPython.display import HTML
from benderopt import minimize
def get_animation(number_of_evaluation=70, seed=None):
lines = []
samples_list = []
fig = plt.figure(figsize=(14, 18))
for k, method in enumerate(methods):
samples_tmp = minimize(
f=function_to_optimize,
optimization_problem=optimization_problem,
optimizer_type=method,
number_of_evaluation=number_of_evaluation,
seed=seed,
debug=True
)
samples = np.array([[sample["x"], sample["y"], function_to_optimize(**sample)]
for sample in samples_tmp]).T
samples_list.append(samples)
ax = fig.add_subplot(len(methods), 1, k + 1, projection="3d")
ax.set_title(method, fontsize=32, y=1.12)
line, = ax.plot(samples[0, :1], samples[1, :1], samples[2, :1], lw=2, marker="o", color="g", alpha=0.65, linestyle="")
lines.append(line)
ax.plot_surface(X, Y, Z, alpha=0.4, label=method)
ax.plot([target["x"]], [target["y"]], [function_to_optimize(**target)], color="r", marker="o", alpha=1, lw=3)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel("loss")
plt.close()
def animate(i):
for line, samples in zip(lines, samples_list):
line.set_data(samples[0:2, :i])
line.set_3d_properties(samples[2, :i])
return lines
anim = animation.FuncAnimation(fig, animate,
frames=number_of_evaluation, interval=200, blit=True)
return anim
# -
anim = get_animation(number_of_evaluation=200, seed=13)
HTML(anim.to_html5_video())
|
benchmark/benchmark_sinus2D.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Restricted spectral approximation experiment (Section 5.1)
#
# The script exhaustively compares different coarsening schemes with respect to their ability to preserve the action of the combinatorial Laplacian matrix $L$ for every vector $ x \in U_K$, where the latter is the principal eigenspace of size $K$.
#
# The code accompanies paper [Graph reduction with spectral and cut guarantees](http://www.jmlr.org/papers/volume20/18-680/18-680.pdf) by <NAME> published at JMLR/2019 ([bibtex](http://www.jmlr.org/papers/v20/18-680.bib)).
#
# This work was kindly supported by the Swiss National Science Foundation (grant number PZ00P2 179981).
#
# 15 March 2019
#
# [<NAME>](https://andreasloukas.blog)
#
# [](https://zenodo.org/badge/latestdoi/175851068)
#
# Released under the Apache license 2.0
# !pip install networkx
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
# +
from graph_coarsening.coarsening_utils import *
import graph_coarsening.graph_lib as graph_lib
import graph_coarsening.graph_utils as graph_utils
import numpy as np
import scipy as sp
from scipy import io
from scipy.linalg import circulant
import time
import os
import matplotlib
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
import pygsp as gsp
gsp.plotting.BACKEND = 'matplotlib'
# +
# Parameters
graphs = ['yeast','minnesota', 'bunny', 'airfoil']
methods = ['heavy_edge', 'variation_edges', 'variation_neighborhoods', 'algebraic_JC', 'affinity_GS', 'kron']
K_all = np.array([10,40], dtype=np.int32)
r_all = np.linspace(0.1, 0.9, 17, dtype=np.float32)
print('k: ', K_all, '\nr: ', r_all)
# -
# ### The actual experiment code (this will take long)
# If one needs to just see the results, skip running this part.
# +
rerun_all = False
rewrite_results = False
if rerun_all:
algorithm = 'greedy'
max_levels = 20
n_methods = len(methods)
n_graphs = len(graphs)
for graphIdx, graph in enumerate(graphs):
N = 4000 # this is only an upper bound (the actual size depends on the graph)
G = graph_lib.real(N, graph)
N = G.N
if N<100: continue
# precompute spectrum needed for metrics
if K_all[-1] > N/2:
[Uk,lk] = eig(G.L)
else:
offset = 2*max(G.dw)
T = offset*sp.sparse.eye(G.N, format='csc') - G.L
lk, Uk = sp.sparse.linalg.eigsh(T, k=K_all[-1], which='LM', tol=1e-6)
lk = (offset-lk)[::-1]
Uk = Uk[:,::-1]
subspace = np.zeros((n_methods, len(K_all), len(r_all)))
failed = np.zeros((n_methods, len(K_all), len(r_all)))
ratio = np.zeros((n_methods, len(K_all), len(r_all)))
for KIdx, K in enumerate(K_all):
print('{} {}| K:{:2.0f}'.format(graph, N, K))
for rIdx,r in enumerate(r_all):
n_target = int(np.floor(N*(1-r)))
if K > n_target:
print('Warning: K={}>n_target={}. skipping'.format(K, n_target))
continue # K = n_target
for methodIdx,method in enumerate(methods):
# algorithm is not deterministic: run a few times
if method == 'kron':
if KIdx == 0:
n_iterations = 2
n_failed = 0
r_min = 1.0
for iteration in range(n_iterations):
Gc, iG = kron_coarsening(G, r=r, m=None)
metrics = kron_quality(iG, Gc, kmax=K_all[-1], Uk=Uk[:,:K_all[-1]], lk=lk[:K_all[-1]])
if metrics['failed']: n_failed += 1
else:
r_min = min(r_min, metrics['r'])
for iKIdx, iK in enumerate(K_all):
subspace[ methodIdx, iKIdx, rIdx] += metrics['error_subspace'][iK-1]
subspace[ methodIdx, :, rIdx] /= (n_iterations-n_failed)
failed[ methodIdx, :, rIdx] = 1 if (r_min < r - 0.05) else 0
ratio[ methodIdx, :, rIdx] = r_min
if np.abs(r_min - r) > 0.02: print('Warning: ratio={} instead of {} for {}'.format(r_min, r, method))
else:
C, Gc, Call, Gall = coarsen(G, K=K, r=r, max_levels=max_levels, method=method, algorithm=algorithm, Uk=Uk[:,:K], lk=lk[:K])
metrics = coarsening_quality(G, C, kmax=K, Uk=Uk[:,:K], lk=lk[:K])
subspace[ methodIdx, KIdx, rIdx] = metrics['error_subspace'][-1]
failed[methodIdx, KIdx, rIdx] = 1 if (metrics['r'] < r - 0.05) else 0
ratio[methodIdx, KIdx, rIdx] = metrics['r']
if np.abs(metrics['r'] - r) > 0.02:
print('Warning: ratio={} instead of {} for {}'.format(metrics['r'], r, method))
if rewrite_results:
filepath = os.path.join('..', 'results', 'experiment_approximation_'+ graph +'.npz')
print('.. saving to "' + filepath + '"')
np.savez(filepath, methods=methods, K_all=K_all, r_all=r_all, subspace=subspace, failed=failed)
print('done!')
# -
# ### Figure 2 of the paper
# Plot $\epsilon$ as a function of $r$ for a few different graphs (for $K=10,40$)
# +
matplotlib.rcParams.update({'font.size': 25})
from matplotlib import cm
colors = [ cm.ocean(x) for x in np.linspace(0, 0.95, len(methods)+1) ]
colors[1] = [0.8,0,0]
colors[-2] = (np.array([127, 77, 34])/255).tolist()
size = 2.7*2.8;
graphs = ['yeast','airfoil', 'minnesota', 'bunny']
n_graphs = len(graphs)
print('The figures are drawn in the following in order:')
for KIdx in [0,1]:
for graphIdx, graph in enumerate(graphs):
# load results
filepath = os.path.join('..', 'results', 'experiment_approximation_'+ graph +'.npz')
data = np.load(filepath)
methods, K_all, r_all, subspace, failed = data['methods'], data['K_all'], data['r_all'], data['subspace'], data['failed']
K = K_all[KIdx]
fig, axes = plt.subplots(1, 1, figsize=(1.618*size, size)); # 1/1.618
for methodIdx,method in reversed(list(enumerate(methods))):
lineWidth = 1.5; marker = 's'
method = method.replace('_', ' ')
if method == 'heavy edge':
method = 'heavy edge'
cIdx, line, marker = 0, ':', 's'
elif 'variation edges' in method:
method = 'local var. (edges)'
cIdx, line, marker, lineWidth = 2, '-', 'o', 1.5
elif (method == 'variation neighborhoods') or (method == 'variation neighborhood'):
method = 'local var. (neigh)'
cIdx, line, marker, lineWidth = 1, '-', 'o', 1.5
elif 'algebraic' in method:
method = 'algebraic dist.'
cIdx, line = 3, ':'
elif 'affinity' in method:
method = 'affinity'
cIdx, line = 4, ':'
elif method == 'kron':
method = 'kron'
cIdx, line, marker = 5, '--', 'x'
else:
continue
style = line + marker
color = colors[cIdx]
tmp = subspace[methodIdx,KIdx,:]
tmp[tmp==0] = np.NaN # requested k was larger than n_target
tmp[failed[methodIdx,KIdx,:]==1] = np.NaN # there was a failure to reach the given ratio
axes.plot(r_all, tmp, style, label='{}'.format(method), color=color, lineWidth=lineWidth, markersize=7)
axes.set_xlabel('reduction $r$')
axes.set_ylabel('$\epsilon$')
loc, lab = [0.1,0.3,0.5,0.7,0.9], []
for i,iloc in enumerate(loc): lab.append(matplotlib.text.Text(0,0,'{:2d}%'.format(int(iloc*100))))
plt.xticks(loc, lab)
axes.set_yscale('log')
if K == 10:
axes.set_ylim([0.0002, 50])
else:
axes.set_ylim([0.0002, 50])
axes.set_xlim([0.05, .95])
legend0 = axes.legend(fontsize=21.2, loc='lower right', edgecolor=[1,1,1])
axes.plot([0.05, 0.95], [1,1], 'k:', lineWidth=0.5)
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
fig.tight_layout()
print('* experiment_approximation_'+ graph + '_K=' + str(K))
filepath = os.path.join('..', 'results', 'experiment_approximation_'+ graph + '_K=' + str(K) + '.pdf')
# fig.savefig(filepath)
# -
|
examples/experiment_approximation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="eXlW6p2OSnNe" colab_type="text"
# ## ML Challenges
#
# This notebook includes various code snippets mentioned in the first chapter of our Machine Learning Design Patterns book.
# + id="4WeoSZ4MTv6d" colab_type="code" colab={}
import pandas as pd
import tensorflow as tf
from sklearn.utils import shuffle
from google.cloud import bigquery
# + [markdown] id="XesQj5QcTepp" colab_type="text"
# ### Repeatability
#
# Because of the inherent randomness in ML, there are additional measures required to ensure repeatability and reproducability between training and evaluation runs.
# + id="0pDreGYoSkPz" colab_type="code" colab={}
# Setting a random seed in TensorFlow
# Do this before you run training to ensure reproducible evaluation metrics
# You can use whatever value you'd like for the seed
tf.random.set_seed(2)
# + [markdown] id="-Khu4PaGUS74" colab_type="text"
# You also need to consider randomness when preparing your training, test, and validation datasets. To ensure consistency, prepare a shuffled dataset before training by setting a random seed value.
#
# First, let's look at an example without shuffling. We'll grab some data from the NOAA storms public dataset in BigQuery. You'll need a Google Cloud account to run the cells that use this dataset.
# + id="QJ48sMDHWI_b" colab_type="code" colab={}
from google.colab import auth
auth.authenticate_user()
# + [markdown] id="LfmHA0LvZNVA" colab_type="text"
# Replace `your-cloud-project` below with the name of your Google Cloud project.
# + id="fUaKDisOWM9u" colab_type="code" colab={}
# %%bigquery storms_df --project your-cloud-project
SELECT
*
FROM
`bigquery-public-data.noaa_historic_severe_storms.storms_*`
LIMIT 1000
# + [markdown] id="vGGdKpsvXBxt" colab_type="text"
# Run the cell below multiple times, and notice that the order of the data changes each time.
# + id="7RhuiAsqWh13" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="8971fdc3-9010-427e-ec40-3e4e25e59052"
storms_df = shuffle(storms_df)
storms_df.head()
# + [markdown] id="jbfrylA_XGTE" colab_type="text"
# Next, repeat the above but set a random seed. Note that the data order stays the same even when run multiple times.
#
#
# + id="U5a4WF88XFyk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="88e33fee-4e10-4d8e-f5c8-0a096ced8d5e"
shuffled_df = shuffle(storms_df, random_state=2)
shuffled_df.head()
# + [markdown] id="g_5dEbgmT8yX" colab_type="text"
# ### Data drift
#
# It's important to analyze how data is changing over time to ensure your ML models are trained on accurate data. To demonstrate this, we'll use the same NOAA storms dataset as above with a slightly different query.
#
# Let's look at how the number of reported storms has increased over time.
# + id="LC2ozP3PYT1i" colab_type="code" colab={}
# %%bigquery storm_trends --project your-cloud-project
SELECT
SUBSTR(CAST(event_begin_time AS string), 1, 4) AS year,
COUNT(*) AS num_storms
FROM
`bigquery-public-data.noaa_historic_severe_storms.storms_*`
GROUP BY
year
ORDER BY
year ASC
# + id="u-6l9cqoYXFM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="4d19bbef-56ca-45f4-9b7d-11cb2b35a8c6"
storm_trends.head()
# + [markdown] id="8iPDWrM_Yv3Y" colab_type="text"
# As seen below, training a model on data before 2000 to predict storms now would result in incorrect predictions.
# + id="k3MA7oScYaDl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="ad788e12-eeea-4dec-8ff5-a01aae20a7bf"
storm_trends.plot(title='Storm trends over time', x='year', y='num_storms')
# + [markdown] id="zj8SOtBNZcKG" colab_type="text"
# Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
01_need_for_design_patterns/ml_challenges.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Multiple Linear Regression Model
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
import pandas as pd
from sklearn.metrics import mean_squared_error
from math import sqrt
# Load CSV and columns
df = pd.read_csv("Housing.csv")
df.head()
Y = df['price']
Y=Y.values
Y=Y.reshape(len(Y),1)
print(Y)
X = df[['lotsize','bedrooms','bathrms','stories','garagepl']]
print(type(X))
print(X)
from sklearn.preprocessing import MinMaxScaler
df=pd.read_csv('Housing.csv')
df.head()
lot=df[['lotsize']]
scaler = MinMaxScaler(feature_range=(0, 1))
rescaledlot = scaler.fit_transform(lot)
df['rlot']=rescaledlot
X = df[['rlot','bedrooms','bathrms','stories','garagepl']]
X.head()
# +
# Split the data into training/testing sets
X_train = X[:-46]
X_test = X[-46:]
# Split the targets into training/testing sets
Y_train = Y[:-46]
Y_test = Y[-46:]
# +
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
Y_pred=regr.predict(X_test)
# -
rms = sqrt(mean_squared_error(Y_test, Y_pred))
print(rms)
#using PCA
from sklearn import decomposition
pca = decomposition.PCA(n_components=1)
x=df[['lotsize','bedrooms','bathrms','stories','garagepl']]
principalComponents = pca.fit_transform(x)
print(principalComponents)
df['pca1']=principalComponents
x=df['pca1']
x=x.values
x=x.reshape(len(x),1)
print(x)
# +
# Split the data into training/testing sets
x_train = x[:-46]
x_test = x[-46:]
# Split the targets into training/testing sets
Y_train = Y[:-46]
Y_test = Y[-46:]
# +
# Create linear regression object
regr1 = linear_model.LinearRegression()
# Train the model using the training sets
regr1.fit(x_train, Y_train)
Y_pred=regr1.predict(x_test)
# -
rms = sqrt(mean_squared_error(Y_test, Y_pred))
print(rms)
|
Sem4/DataAnalytics/DA Lab - students copy/Regression/LR-Housing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pratikshitvas/GDP-prediction_mutiple_models/blob/main/GDP_Dataset_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="90f3bd87"
import pandas as pd
from sklearn.linear_model import LinearRegression
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="856dfc2e" outputId="209d65bb-dffd-4e7d-dd00-09b0cead4b0b"
data = pd.read_csv('edited_Copy of PANEL data for regression (GDP, X and M as Variables).csv')
data.head()
# + id="478d9756"
data.COUNTRY = [s.strip() for s in data.COUNTRY]
# + id="af1063cd"
country_names = list(data.COUNTRY.unique())
#country_names
# + colab={"base_uri": "https://localhost:8080/", "height": 286} id="8b871ec1" outputId="4a6f4d45-50f1-4ae0-bf09-53fe58fc2863"
sns.heatmap(data.corr(), annot=data.corr())
# + id="6219296b"
def item_counter(df,split_features,unique_attributes):
lst = []
for i in unique_attributes:
item = df.loc[df[split_features]==i]
lst.append(item)
return lst
# + id="5f42d1c4"
country_wize_list = item_counter(data,'COUNTRY',country_names)
#print('\n\n'.join('{}' for _ in range(len(country_wize_list))).format(*country_wize_list))
# + colab={"base_uri": "https://localhost:8080/"} id="df965799" outputId="b34bf210-9577-4d08-ffb4-f37e2d05ed52"
print(len(country_wize_list),len(country_names))
# + colab={"base_uri": "https://localhost:8080/"} id="b45587fc" outputId="de910946-6804-40f1-f2b1-683b0e5d8924"
type(country_wize_list[0])
# + [markdown] id="hbErgUhwUjZ6"
# **linear regression**
# + id="2fb95ef4"
dictionary = {}
for i in country_names:
for j in country_wize_list:
if j.COUNTRY.unique() == i:
model = LinearRegression().fit(j[['COUNTRY ID','YEAR','EXPORTS','GDP']],j.IMPORTS)
dictionary[i] = model
# + id="4bd83ffa"
a = dictionary['Afghanistan']
# + colab={"base_uri": "https://localhost:8080/"} id="86817476" outputId="89c0f126-2148-4ed8-b465-2f1f8e1738d3"
a.predict([[1,2020,6000.27,13000]])
# + colab={"base_uri": "https://localhost:8080/"} id="a0552885" outputId="8013739d-f702-4e1f-dd60-d047ee84490b"
from sklearn.metrics import r2_score
for i in dictionary.keys():
for j in country_wize_list:
if j.COUNTRY.unique() == i:
prediction = dictionary[i].predict(j[['COUNTRY ID','YEAR','EXPORTS','GDP']])
print(r2_score(prediction,j.IMPORTS))
# + [markdown] id="7T40DkgJVy_Q"
# **PRIDECTING imports USING ONLY GDP**
# + id="25bc0077"
dictionary_1 = {}
for i in country_names:
for j in country_wize_list:
if j.COUNTRY.unique() == i:
model = LinearRegression().fit(j[['COUNTRY ID','YEAR','GDP']],j.IMPORTS)
dictionary_1[i] = model
# + id="TOK-7xKoVmmK"
a = dictionary_1['Afghanistan']
# + colab={"base_uri": "https://localhost:8080/"} id="cGM__DYhVrVq" outputId="bb65255b-0db1-437c-ac91-fab72b467544"
a.predict([[1,2020,600.27]])
# + [markdown] id="SCbFjkPbV2Gz"
# **PREDICTING IMPORTS using exports**
# + id="hxTZFJUTV6JI"
dictionary_2= {}
for i in country_names:
for j in country_wize_list:
if j.COUNTRY.unique() == i:
model = LinearRegression().fit(j[['COUNTRY ID','YEAR','EXPORTS']],j.IMPORTS)
dictionary_2[i] = model
# + id="M-DPAEEpWCqw"
a = dictionary_2['Afghanistan']
# + colab={"base_uri": "https://localhost:8080/"} id="kBFlLL6YWEcO" outputId="07e9cb8d-1cc0-4b42-9a57-45b0121a5687"
a.predict([[1,1997,400.27]])
# + [markdown] id="XM6TjwAepWaV"
# **PREDICTING EXPORTS**
# + id="i25oshOkpaS4"
dictionary_E= {}
for i in country_names:
for j in country_wize_list:
if j.COUNTRY.unique() == i:
model = LinearRegression().fit(j[['COUNTRY ID','YEAR','IMPORTS','GDP']],j.EXPORTS)
dictionary_E[i] = model
# + id="PA6yWjjJpy6u"
a = dictionary_E['Afghanistan']
# + colab={"base_uri": "https://localhost:8080/"} id="5rgxyRTvp0F2" outputId="c13e0b8a-f80f-4aad-a157-a93db0de2c90"
a.predict([[1,2020,6000.27,13000]])
# + [markdown] id="W66qxUFBp84X"
# **EXPORTS USING GDP ONLY**
# + id="HN8hxcppp2gF"
dictionary_G = {}
for i in country_names:
for j in country_wize_list:
if j.COUNTRY.unique() == i:
model = LinearRegression().fit(j[['COUNTRY ID','YEAR','GDP']],j.EXPORTS)
dictionary_G[i] = model
# + id="-iXRslcQqTDV"
a = dictionary_G['Afghanistan']
# + colab={"base_uri": "https://localhost:8080/"} id="Yo52f023qY0Q" outputId="1e5f6b3c-3fa2-4b1b-e7b3-4345cffa00f2"
a.predict([[1,2020,13000]])
# + [markdown] id="1iITgKmCqm0H"
# **EXPORTS USING IMPORTS**
# + id="OhjW_uljqqqp"
dictionary_I = {}
for i in country_names:
for j in country_wize_list:
if j.COUNTRY.unique() == i:
model = LinearRegression().fit(j[['COUNTRY ID','YEAR','IMPORTS']],j.EXPORTS)
dictionary_I[i] = model
# + colab={"base_uri": "https://localhost:8080/"} id="roCdFx99vc1j" outputId="ec609b74-590b-4da5-b5dd-3bab715a3206"
a = dictionary_G['Afghanistan']
a.predict([[1,2020,300]])
# + colab={"base_uri": "https://localhost:8080/"} id="m2hIfdsn0n8O" outputId="4270cc69-f573-48da-8164-5c7b0e717db5"
print('MAE:', metrics.mean_absolute_error(prediction,j.EXPORTS))
print('RMSE:', np.sqrt(metrics.mean_squared_error(prediction,j.EXPORTS)))
print('R2_Score: ', metrics.r2_score(prediction,j.EXPORTS))
# + [markdown] id="Mo-Ly6m7Q5po"
# ***SVM(EXPORTS)***
# + colab={"base_uri": "https://localhost:8080/"} id="ZzBkjcFbQ9Dx" outputId="0eb96c5f-3e4e-4b88-d85f-ff40361a2911"
data.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="TjuY5hOzR8ZG" outputId="ea1c087f-5d83-4258-b587-52d17b65f3c8"
data['COUNTRY'].value_counts()
# + id="3LcrFCunSvNx"
x = data['GDP']
y = data['EXPORTS']
# + colab={"base_uri": "https://localhost:8080/"} id="l6_sTTxbSZWs" outputId="1b077ce3-a262-405c-fb44-0d6bfb5d2272"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.2, random_state=4)
x_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="WyS_RumrXo2G" outputId="8840f25b-a867-410f-fed1-bee8508c4b63"
x_test.shape
# + colab={"base_uri": "https://localhost:8080/"} id="JsBV9_p6Y7L5" outputId="2a5828b9-1922-4207-f3c7-b76ccd2f5a1e"
y_train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="dBM-pYwmYb4y" outputId="46b65172-bf2a-46cd-8385-72654d40b44e"
from sklearn import svm
svm.SVC(kernel='linear', gamma='auto', C=2)
# + colab={"base_uri": "https://localhost:8080/"} id="UrKBxuAIgOEn" outputId="b55bbd68-ba17-4ecb-b5ea-83fa5d16ed75"
x_train[:5]
# + colab={"base_uri": "https://localhost:8080/"} id="a2aszz2mgYPX" outputId="198fa48e-754b-44d7-a8a6-9a9cdbb07f52"
y_train[:5]
# + colab={"base_uri": "https://localhost:8080/"} id="j3E66N-ykhmJ" outputId="7327e351-ab9b-47e6-c639-f3e3939fb10b"
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
svm.SVR(kernel='linear', gamma='scale', C=2)
# + colab={"base_uri": "https://localhost:8080/"} id="NiKT29jlkkBR" outputId="cd0a8205-e252-42f9-85a3-d697fd145b74"
regressor=SVR(kernel='sigmoid',gamma = 'scale').fit(np.array(x_train).reshape(-1,1),np.array(y_train).reshape(-1,1))
# + id="wtU8Sd3Kcsvy"
# + id="us4NmxWQmHU-"
svm_pred = regressor.predict(np.array(x_test).reshape(-1,1))
# + colab={"base_uri": "https://localhost:8080/"} id="_aeJLhrzMayP" outputId="dffaa454-5e21-4695-90ac-4cfb43feacf1"
print(svm_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="M6bt1GZTmbp1" outputId="e994dd99-1e67-408f-83c3-5a7cbff08f25"
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, svm_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, svm_pred)))
print('R2_Score: ', metrics.r2_score(y_test, svm_pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="16vVsXVkMqlH" outputId="41269cba-bf9e-4362-bec3-81fc48f165cc"
fig = plt.figure(figsize=(12, 6))
plt.scatter(y_test,svm_pred, linewidths=2, edgecolors='b', color='coral')
plt.xlabel('True GDP')
plt.ylabel('EXPORTS Predictions')
plt.title('Unoptimized SVM prediction Performance (with scaling)')
plt.grid()
plt.show()
# + [markdown] id="hFYcMMBwMLUY"
# **using RBF**
# + colab={"base_uri": "https://localhost:8080/"} id="g4OEZ_FcuuOe" outputId="fcc2927e-a69e-4a06-a527-9e61dd72e61a"
param_grid = {'C': [1, 10, 100], 'gamma': [0.01,0.001,0.0001], 'kernel': ['rbf']}
grid = GridSearchCV(SVR(),param_grid,refit=True,verbose=3)
grid.fit(np.array(x_train).reshape(-1,1),np.array(y_train).reshape(-1,1))
# + id="4KfFSKTyOMpq"
grid.best_params_
grid.best_estimator_
grid_predictions = grid.predict(np.array(x_test).reshape(-1,1))
# + colab={"base_uri": "https://localhost:8080/"} id="w8DF0GQVOfGa" outputId="75727e83-5ee3-4abe-9c37-d715d46a12c7"
print('MAE:', metrics.mean_absolute_error(y_test, grid_predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, grid_predictions)))
print('R2_Score: ', metrics.r2_score(y_test, grid_predictions))
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="KB0voOHWOkqd" outputId="9f4c90ec-0cc1-45b9-bc1a-05c527a6b0b2"
fig = plt.figure(figsize=(12, 6))
plt.scatter(y_test,grid_predictions,color='coral', linewidths=2, edgecolors='k')
plt.xlabel('GDP')
plt.ylabel('Predictions')
plt.title('Optimized SVM prediction Performance')
plt.grid()
plt.show()
# + [markdown] id="phIkpA8z-LOa"
# **random forest**
# + id="EB3r_E77_O2X"
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
# + id="DpOQlWzqP_ac"
y = data['EXPORTS']
X = data['GDP']
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101)
# + id="A_MTwFDcQU53"
sc_X = StandardScaler()
x1_train = sc_X.fit_transform(np.array(x_train).reshape(-1,1))
x1_test = sc_X.fit_transform(np.array(x_test).reshape(-1,1))
y1_train = y_train
y1_test = y_test
# + colab={"base_uri": "https://localhost:8080/"} id="lq34IbhKRGhL" outputId="dcbd20b9-4af4-42fc-b600-8467e3f66b0e"
rf1 = RandomForestRegressor(random_state=101, n_estimators=200)
rf2 = RandomForestRegressor(random_state=101, n_estimators=200)
rf1.fit((np.array(x_train).reshape(-1,1)), (np.array(y_train).reshape(-1,1)))
rf2.fit(x1_train, y1_train)
# + id="Eu6S0jq8R7Xd"
rf1_pred = rf1.predict(np.array(x_test).reshape(-1,1))
rf2_pred = rf2.predict(x1_test)
# + colab={"base_uri": "https://localhost:8080/"} id="3K8mH3BsSOjN" outputId="9638cf5f-89a8-4f4e-9577-de8b481cf4cb"
print('Random Forest Performance:')
print('MAE:', metrics.mean_absolute_error(y_test, rf1_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, rf1_pred)))
print('R2_Score: ', metrics.r2_score(y_test, rf1_pred))
print('\nselected features, No scaling:')
print('MAE:', metrics.mean_absolute_error(y1_test, rf2_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y1_test, rf2_pred)))
print('R2_Score: ', metrics.r2_score(y1_test, rf2_pred))
# + [markdown] id="-W6Pzq9tSfmz"
# optimization
# + id="j-YmHt9YShBH"
rf_param_grid = {'max_features': ['sqrt', 'auto'],
'min_samples_leaf': [1, 3, 5],
'n_estimators': [100, 500, 1000],
'bootstrap': [False, True]}
# + id="SoLo2pQNSnHE"
rf_grid = GridSearchCV(estimator= RandomForestRegressor(), param_grid = rf_param_grid, n_jobs=-1, verbose=0)
# + colab={"base_uri": "https://localhost:8080/"} id="mP9hXr3iSrmu" outputId="dcfd5596-88d3-4f10-c25a-7a5a3db83b20"
rf_grid.fit((np.array(x_train).reshape(-1,1)),(np.array(y_train).reshape(-1,1)))
# + id="wHzEzFB6TCjU"
rf_grid.best_params_
rf_grid.best_estimator_
rf_grid_predictions = rf_grid.predict(np.array(x_test).reshape(-1,1))
# + id="gR9JQ_RDglP1" colab={"base_uri": "https://localhost:8080/"} outputId="2bfff549-d1c7-4336-ea83-eb6ea41e5212"
print('MAE:', metrics.mean_absolute_error(y_test, rf_grid_predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, rf_grid_predictions)))
print('R2_Score: ', metrics.r2_score(y_test, rf_grid_predictions))
# + id="CoZ6cSskg0tO" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="5d618722-785e-43c9-c2e9-bfa7e7b146b0"
fig = plt.figure(figsize=(12, 6))
plt.scatter(y_test,rf_grid_predictions, linewidths=2, edgecolors='b', color='coral')
plt.xlabel('True GDP')
plt.ylabel('Predictions')
plt.title('Optimized Random Forest prediction Performance')
plt.grid()
plt.show()
# + [markdown] id="_Hj9t5Ph1wAO"
# **COMPARISION B/W THE DIFF. MODELS**
# + id="z3HjzD111t2a"
sns.lmplot(x='')
|
GDP_Dataset_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: v3
# language: python
# name: v-jpt-3
# ---
# # Testing cosmogan
# Aug 25, 2020
#
# Borrowing pieces of code from :
#
# - https://github.com/pytorch/tutorials/blob/11569e0db3599ac214b03e01956c2971b02c64ce/beginner_source/dcgan_faces_tutorial.py
# - https://github.com/exalearn/epiCorvid/tree/master/cGAN
# +
import os
import random
import logging
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
#from torchsummary import summary
from torch.utils.data import DataLoader, TensorDataset
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
# import torch.fft
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
import argparse
import time
from datetime import datetime
import glob
import pickle
import yaml
import collections
import socket
import shutil
# # Import modules from other files
# from utils import *
# from spec_loss import *
# -
# %matplotlib widget
# ## Modules
# +
### Transformation functions for image pixel values
def f_transform(x):
return 2.*x/(x + 4.) - 1.
def f_invtransform(s):
return 4.*(1. + s)/(1. - s)
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# elif classname.find('Linear') != -1:
# nn.init.normal_(m.weight.data, 1.0, 0.02)
# Generator Code
class View(nn.Module):
def __init__(self, shape):
super(View, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(*self.shape)
class Generator(nn.Module):
def __init__(self, gdict):
super(Generator, self).__init__()
## Define new variables from dict
keys=['ngpu','nz','nc','ngf','kernel_size','stride','g_padding']
ngpu, nz,nc,ngf,kernel_size,stride,g_padding=list(collections.OrderedDict({key:gdict[key] for key in keys}).values())
self.main = nn.Sequential(
# nn.ConvTranspose2d(in_channels, out_channels, kernel_size,stride,padding,output_padding,groups,bias, Dilation,padding_mode)
nn.Linear(nz,nc*ngf*8*8*8),# 32768
nn.BatchNorm2d(nc,eps=1e-05, momentum=0.9, affine=True),
nn.ReLU(inplace=True),
View(shape=[-1,ngf*8,8,8]),
nn.ConvTranspose2d(ngf * 8, ngf * 4, kernel_size, stride, g_padding, output_padding=1, bias=False),
nn.BatchNorm2d(ngf*4,eps=1e-05, momentum=0.9, affine=True),
nn.ReLU(inplace=True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 4, ngf * 2, kernel_size, stride, g_padding, 1, bias=False),
nn.BatchNorm2d(ngf*2,eps=1e-05, momentum=0.9, affine=True),
nn.ReLU(inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d( ngf * 2, ngf, kernel_size, stride, g_padding, 1, bias=False),
nn.BatchNorm2d(ngf,eps=1e-05, momentum=0.9, affine=True),
nn.ReLU(inplace=True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, kernel_size, stride,g_padding, 1, bias=False),
nn.Tanh()
)
def forward(self, ip):
return self.main(ip)
class Discriminator(nn.Module):
def __init__(self, gdict):
super(Discriminator, self).__init__()
## Define new variables from dict
keys=['ngpu','nz','nc','ndf','kernel_size','stride','d_padding']
ngpu, nz,nc,ndf,kernel_size,stride,d_padding=list(collections.OrderedDict({key:gdict[key] for key in keys}).values())
self.main = nn.Sequential(
# input is (nc) x 64 x 64
# nn.Conv2d(in_channels, out_channels, kernel_size,stride,padding,output_padding,groups,bias, Dilation,padding_mode)
nn.Conv2d(nc, ndf,kernel_size, stride, d_padding, bias=True),
nn.BatchNorm2d(ndf,eps=1e-05, momentum=0.9, affine=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, kernel_size, stride, d_padding, bias=True),
nn.BatchNorm2d(ndf * 2,eps=1e-05, momentum=0.9, affine=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, kernel_size, stride, d_padding, bias=True),
nn.BatchNorm2d(ndf * 4,eps=1e-05, momentum=0.9, affine=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, kernel_size, stride, d_padding, bias=True),
nn.BatchNorm2d(ndf * 8,eps=1e-05, momentum=0.9, affine=True),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Flatten(),
nn.Linear(nc*ndf*8*8*8, 1)
# nn.Sigmoid()
)
def forward(self, ip):
# print(ip.shape)
results=[ip]
lst_idx=[]
for i,submodel in enumerate(self.main.children()):
mid_output=submodel(results[-1])
results.append(mid_output)
## Select indices in list corresponding to output of Conv layers
if submodel.__class__.__name__.startswith('Conv'):
# print(submodel.__class__.__name__)
# print(mid_output.shape)
lst_idx.append(i)
FMloss=True
if FMloss:
ans=[results[1:][i] for i in lst_idx + [-1]]
else :
ans=results[-1]
return ans
def f_gen_images(gdict,netG,optimizerG,ip_fname,op_loc,op_strg='inf_img_',op_size=500):
'''Generate images for best saved models
Arguments: gdict, netG, optimizerG,
ip_fname: name of input file
op_strg: [string name for output file]
op_size: Number of images to generate
'''
nz,device=gdict['nz'],gdict['device']
try:# handling cpu vs gpu
if torch.cuda.is_available(): checkpoint=torch.load(ip_fname)
else: checkpoint=torch.load(ip_fname,map_location=torch.device('cpu'))
except Exception as e:
print(e)
print("skipping generation of images for ",ip_fname)
return
## Load checkpoint
if gdict['multi-gpu']:
netG.module.load_state_dict(checkpoint['G_state'])
else:
netG.load_state_dict(checkpoint['G_state'])
## Load other stuff
iters=checkpoint['iters']
epoch=checkpoint['epoch']
optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])
# Generate batch of latent vectors
noise = torch.randn(op_size, 1, 1, nz, device=device)
# Generate fake image batch with G
netG.eval() ## This is required before running inference
with torch.no_grad(): ## This is important. fails without it for multi-gpu
gen = netG(noise)
gen_images=gen.detach().cpu().numpy()[:,:,:,:]
print(gen_images.shape)
op_fname='%s_epoch-%s_step-%s.npy'%(op_strg,epoch,iters)
np.save(op_loc+op_fname,gen_images)
print("Image saved in ",op_fname)
def f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc):
''' Checkpoint model '''
if gdict['multi-gpu']: ## Dataparallel
torch.save({'epoch':epoch,'iters':iters,'best_chi1':best_chi1,'best_chi2':best_chi2,
'G_state':netG.module.state_dict(),'D_state':netD.module.state_dict(),'optimizerG_state_dict':optimizerG.state_dict(),
'optimizerD_state_dict':optimizerD.state_dict()}, save_loc)
else :
torch.save({'epoch':epoch,'iters':iters,'best_chi1':best_chi1,'best_chi2':best_chi2,
'G_state':netG.state_dict(),'D_state':netD.state_dict(),'optimizerG_state_dict':optimizerG.state_dict(),
'optimizerD_state_dict':optimizerD.state_dict()}, save_loc)
def f_load_checkpoint(ip_fname,netG,netD,optimizerG,optimizerD,gdict):
''' Load saved checkpoint
Also loads step, epoch, best_chi1, best_chi2'''
try:
checkpoint=torch.load(ip_fname)
except Exception as e:
print(e)
print("skipping generation of images for ",ip_fname)
raise SystemError
## Load checkpoint
if gdict['multi-gpu']:
netG.module.load_state_dict(checkpoint['G_state'])
netD.module.load_state_dict(checkpoint['D_state'])
else:
netG.load_state_dict(checkpoint['G_state'])
netD.load_state_dict(checkpoint['D_state'])
optimizerD.load_state_dict(checkpoint['optimizerD_state_dict'])
optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])
iters=checkpoint['iters']
epoch=checkpoint['epoch']
best_chi1=checkpoint['best_chi1']
best_chi2=checkpoint['best_chi2']
netG.train()
netD.train()
return iters,epoch,best_chi1,best_chi2
# +
####################
### Pytorch code ###
####################
def f_torch_radial_profile(img, center=(None,None)):
''' Module to compute radial profile of a 2D image
Bincount causes issues with backprop, so not using this code
'''
y,x=torch.meshgrid(torch.arange(0,img.shape[0]),torch.arange(0,img.shape[1])) # Get a grid of x and y values
if center[0]==None and center[1]==None:
center = torch.Tensor([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0]) # compute centers
# get radial values of every pair of points
r = torch.sqrt((x - center[0])**2 + (y - center[1])**2)
r= r.int()
# print(r.shape,img.shape)
# Compute histogram of r values
tbin=torch.bincount(torch.reshape(r,(-1,)),weights=torch.reshape(img,(-1,)).type(torch.DoubleTensor))
nr = torch.bincount(torch.reshape(r,(-1,)))
radialprofile = tbin / nr
return radialprofile[1:-1]
def f_torch_get_azimuthalAverage_with_batch(image, center=None): ### Not used in this code.
"""
Calculate the azimuthally averaged radial profile. Only use if you need to combine batches
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
source: https://www.astrobetter.com/blog/2010/03/03/fourier-transforms-of-images-in-python/
"""
batch, channel, height, width = image.shape
# Create a grid of points with x and y coordinates
y, x = np.indices([height,width])
if not center:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
# Get the radial coordinate for every grid point. Array has the shape of image
r = torch.tensor(np.hypot(x - center[0], y - center[1]))
# Get sorted radii
ind = torch.argsort(torch.reshape(r, (batch, channel,-1)))
r_sorted = torch.gather(torch.reshape(r, (batch, channel, -1,)),2, ind)
i_sorted = torch.gather(torch.reshape(image, (batch, channel, -1,)),2, ind)
# Get the integer part of the radii (bin size = 1)
r_int=r_sorted.to(torch.int32)
# Find all pixels that fall within each radial bin.
deltar = r_int[:,:,1:] - r_int[:,:,:-1] # Assumes all radii represented
rind = torch.reshape(torch.where(deltar)[2], (batch, -1)) # location of changes in radius
rind=torch.unsqueeze(rind,1)
nr = (rind[:,:,1:] - rind[:,:,:-1]).type(torch.float) # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csum = torch.cumsum(i_sorted, axis=-1)
# print(csum.shape,rind.shape,nr.shape)
tbin = torch.gather(csum, 2, rind[:,:,1:]) - torch.gather(csum, 2, rind[:,:,:-1])
radial_prof = tbin / nr
return radial_prof
def f_get_rad(img):
''' Get the radial tensor for use in f_torch_get_azimuthalAverage '''
height,width=img.shape[-2:]
# Create a grid of points with x and y coordinates
y, x = np.indices([height,width])
center=[]
if not center:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
# Get the radial coordinate for every grid point. Array has the shape of image
r = torch.tensor(np.hypot(x - center[0], y - center[1]))
# Get sorted radii
ind = torch.argsort(torch.reshape(r, (-1,)))
return r.detach(),ind.detach()
def f_torch_get_azimuthalAverage(image,r,ind):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
source: https://www.astrobetter.com/blog/2010/03/03/fourier-transforms-of-images-in-python/
"""
# height, width = image.shape
# # Create a grid of points with x and y coordinates
# y, x = np.indices([height,width])
# if not center:
# center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
# # Get the radial coordinate for every grid point. Array has the shape of image
# r = torch.tensor(np.hypot(x - center[0], y - center[1]))
# # Get sorted radii
# ind = torch.argsort(torch.reshape(r, (-1,)))
r_sorted = torch.gather(torch.reshape(r, ( -1,)),0, ind)
i_sorted = torch.gather(torch.reshape(image, ( -1,)),0, ind)
# Get the integer part of the radii (bin size = 1)
r_int=r_sorted.to(torch.int32)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = torch.reshape(torch.where(deltar)[0], (-1,)) # location of changes in radius
nr = (rind[1:] - rind[:-1]).type(torch.float) # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csum = torch.cumsum(i_sorted, axis=-1)
tbin = torch.gather(csum, 0, rind[1:]) - torch.gather(csum, 0, rind[:-1])
radial_prof = tbin / nr
return radial_prof
def f_torch_fftshift(real, imag):
for dim in range(0, len(real.size())):
real = torch.roll(real, dims=dim, shifts=real.size(dim)//2)
imag = torch.roll(imag, dims=dim, shifts=imag.size(dim)//2)
return real, imag
def f_torch_compute_spectrum(arr,r,ind):
GLOBAL_MEAN=1.0
arr=(arr-GLOBAL_MEAN)/(GLOBAL_MEAN)
y1=torch.rfft(arr,signal_ndim=2,onesided=False)
real,imag=f_torch_fftshift(y1[:,:,0],y1[:,:,1]) ## last index is real/imag part
# # For pytorch 1.8
# y1=torch.fft.fftn(arr,dim=(-2,-1))
# real,imag=f_torch_fftshift(y1.real,y1.imag)
y2=real**2+imag**2 ## Absolute value of each complex number
# print(y2.shape)
z1=f_torch_get_azimuthalAverage(y2,r,ind) ## Compute radial profile
return z1
def f_torch_compute_batch_spectrum(arr,r,ind):
batch_pk=torch.stack([f_torch_compute_spectrum(i,r,ind) for i in arr])
return batch_pk
def f_torch_image_spectrum(x,num_channels,r,ind):
'''
Data has to be in the form (batch,channel,x,y)
'''
mean=[[] for i in range(num_channels)]
sdev=[[] for i in range(num_channels)]
for i in range(num_channels):
arr=x[:,i,:,:]
batch_pk=f_torch_compute_batch_spectrum(arr,r,ind)
mean[i]=torch.mean(batch_pk,axis=0)
# sdev[i]=torch.std(batch_pk,axis=0)/np.sqrt(batch_pk.shape[0])
# sdev[i]=torch.std(batch_pk,axis=0)
sdev[i]=torch.var(batch_pk,axis=0)
mean=torch.stack(mean)
sdev=torch.stack(sdev)
return mean,sdev
def f_compute_hist(data,bins):
try:
hist_data=torch.histc(data,bins=bins)
## A kind of normalization of histograms: divide by total sum
hist_data=(hist_data*bins)/torch.sum(hist_data)
except Exception as e:
print(e)
hist_data=torch.zeros(bins)
return hist_data
### Losses
def loss_spectrum(spec_mean,spec_mean_ref,spec_std,spec_std_ref,image_size,lambda_spec_mean,lambda_spec_var):
''' Loss function for the spectrum : mean + variance
Log(sum( batch value - expect value) ^ 2 )) '''
idx=int(image_size/2) ### For the spectrum, use only N/2 indices for loss calc.
### Warning: the first index is the channel number.For multiple channels, you are averaging over them, which is fine.
spec_mean=torch.log(torch.mean(torch.pow(spec_mean[:,:idx]-spec_mean_ref[:,:idx],2)))
spec_sdev=torch.log(torch.mean(torch.pow(spec_std[:,:idx]-spec_std_ref[:,:idx],2)))
lambda1=lambda_spec_mean;
lambda2=lambda_spec_var;
ans=lambda1*spec_mean+lambda2*spec_sdev
if torch.isnan(spec_sdev).any(): print("spec loss with nan",ans)
return ans
def loss_hist(hist_sample,hist_ref):
lambda1=1.0
return lambda1*torch.log(torch.mean(torch.pow(hist_sample-hist_ref,2)))
def f_FM_loss(real_output,fake_output,lambda_fm,gdict):
'''
Module to implement Feature-Matching loss. Reads all but last elements of Discriminator ouput
'''
FM=torch.Tensor([0.0]).to(gdict['device'])
for i,j in zip(real_output[:-1][0],fake_output[:-1][0]):
real_mean=torch.mean(i)
fake_mean=torch.mean(j)
FM=FM.clone()+torch.sum(torch.square(real_mean-fake_mean))
return lambda_fm*FM
def f_gp_loss(grads,l=1.0):
'''
Module to implement gradient penalty loss.
'''
loss=torch.mean(torch.sum(torch.square(grads),dim=[1,2,3]))
return l*loss
# -
# ## Train loop
# +
def f_train_loop(dataloader,metrics_df,gdict,fixed_noise,mean_spec_val,sdev_spec_val,hist_val,r,ind):
''' Train epochs '''
print("Inside train loop")
## Define new variables from dict
keys=['image_size','start_epoch','epochs','iters','best_chi1','best_chi2','save_dir','device','flip_prob','nz','batch_size','bns']
image_size,start_epoch,epochs,iters,best_chi1,best_chi2,save_dir,device,flip_prob,nz,batchsize,bns=list(collections.OrderedDict({key:gdict[key] for key in keys}).values())
for epoch in range(start_epoch,epochs):
t_epoch_start=time.time()
for count, data in enumerate(dataloader):
####### Train GAN ########
netG.train(); netD.train(); ### Need to add these after inference and before training
tme1=time.time()
### Update D network: maximize log(D(x)) + log(1 - D(G(z)))
netD.zero_grad()
real_cpu = data[0].to(device)
real_cpu.requires_grad=True
b_size = real_cpu.size(0)
real_label = torch.full((b_size,), 1, device=device,dtype=float)
fake_label = torch.full((b_size,), 0, device=device,dtype=float)
g_label = torch.full((b_size,), 1, device=device,dtype=float) ## No flipping for Generator labels
# Flip labels with probability flip_prob
for idx in np.random.choice(np.arange(b_size),size=int(np.ceil(b_size*flip_prob))):
real_label[idx]=0; fake_label[idx]=1
# Generate fake image batch with G
noise = torch.randn(b_size, 1, 1, nz, device=device)
fake = netG(noise)
# Forward pass real batch through D
real_output = netD(real_cpu)
errD_real = criterion(real_output[-1].view(-1), real_label.float())
errD_real.backward(retain_graph=True)
D_x = real_output[-1].mean().item()
# Forward pass fake batch through D
fake_output = netD(fake.detach()) # The detach is important
errD_fake = criterion(fake_output[-1].view(-1), fake_label.float())
errD_fake.backward(retain_graph=True)
D_G_z1 = fake_output[-1].mean().item()
errD = errD_real + errD_fake
if gdict['lambda_gp']: ## Add gradient - penalty loss
grads=torch.autograd.grad(outputs=real_output[-1],inputs=real_cpu,grad_outputs=torch.ones_like(real_output[-1]),allow_unused=False,create_graph=True)[0]
gp_loss=f_gp_loss(grads,gdict['lambda_gp'])
errD = errD + gp_loss
else:
gp_loss=torch.Tensor([np.nan])
optimizerD.step()
###Update G network: maximize log(D(G(z)))
netG.zero_grad()
output = netD(fake)
errG_adv = criterion(output[-1].view(-1), g_label.float())
# Histogram pixel intensity loss
hist_gen=f_compute_hist(fake,bins=bns)
hist_loss=loss_hist(hist_gen,hist_val.to(device))
# Add spectral loss
mean,sdev=f_torch_image_spectrum(f_invtransform(fake),1,r.to(device),ind.to(device))
spec_loss=loss_spectrum(mean,mean_spec_val.to(device),sdev,sdev_spec_val.to(device),image_size,gdict['lambda_spec_mean'],gdict['lambda_spec_var'])
errG=errG_adv
if gdict['lambda_spec_mean']: errG = errG+ spec_loss
if gdict['lambda_fm']:## Add feature matching loss
fm_loss=f_FM_loss(real_output,fake_output,gdict['lambda_fm'],gdict)
errG= errG+ fm_loss
else:
fm_loss=torch.Tensor([np.nan])
if torch.isnan(errG).any():
logging.info(errG)
raise SystemError
# Calculate gradients for G
errG.backward(retain_graph=True)
D_G_z2 = output[-1].mean().item()
### Implement Gradient clipping
if gdict['grad_clip']:
nn.utils.clip_grad_norm_(netG.parameters(),gdict['grad_clip'])
nn.utils.clip_grad_norm_(netD.parameters(),gdict['grad_clip'])
optimizerG.step()
tme2=time.time()
####### Store metrics ########
# Output training stats
if gdict['world_rank']==0:
if ((count % gdict['checkpoint_size'] == 0)):
logging.info('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_adv: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, epochs, count, len(dataloader), errD.item(), errG_adv.item(),errG.item(), D_x, D_G_z1, D_G_z2)),
logging.info("Spec loss: %s,\t hist loss: %s"%(spec_loss.item(),hist_loss.item())),
logging.info("Training time for step %s : %s"%(iters, tme2-tme1))
# Save metrics
cols=['step','epoch','Dreal','Dfake','Dfull','G_adv','G_full','spec_loss','hist_loss','fm_loss','gp_loss','D(x)','D_G_z1','D_G_z2','time']
vals=[iters,epoch,errD_real.item(),errD_fake.item(),errD.item(),errG_adv.item(),errG.item(),spec_loss.item(),hist_loss.item(),fm_loss.item(),gp_loss.item(),D_x,D_G_z1,D_G_z2,tme2-tme1]
for col,val in zip(cols,vals): metrics_df.loc[iters,col]=val
### Checkpoint the best model
checkpoint=True
iters += 1 ### Model has been updated, so update iters before saving metrics and model.
### Compute validation metrics for updated model
netG.eval()
with torch.no_grad():
#fake = netG(fixed_noise).detach().cpu()
fake = netG(fixed_noise)
hist_gen=f_compute_hist(fake,bins=bns)
hist_chi=loss_hist(hist_gen,hist_val.to(device))
mean,sdev=f_torch_image_spectrum(f_invtransform(fake),1,r.to(device),ind.to(device))
spec_chi=loss_spectrum(mean,mean_spec_val.to(device),sdev,sdev_spec_val.to(device),image_size,gdict['lambda_spec_mean'],gdict['lambda_spec_var'])
# Storing chi for next step
for col,val in zip(['spec_chi','hist_chi'],[spec_chi.item(),hist_chi.item()]): metrics_df.loc[iters,col]=val
# Checkpoint model for continuing run
if count == len(dataloader)-1: ## Check point at last step of epoch
f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc=save_dir+'/models/checkpoint_last.tar')
if (checkpoint and (epoch > 1)): # Choose best models by metric
if hist_chi< best_chi1:
f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc=save_dir+'/models/checkpoint_best_hist.tar')
best_chi1=hist_chi.item()
logging.info("Saving best hist model at epoch %s, step %s."%(epoch,iters))
if spec_chi< best_chi2:
f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc=save_dir+'/models/checkpoint_best_spec.tar')
best_chi2=spec_chi.item()
logging.info("Saving best spec model at epoch %s, step %s"%(epoch,iters))
if iters in gdict['save_steps_list']:
f_save_checkpoint(gdict,epoch,iters,best_chi1,best_chi2,netG,netD,optimizerG,optimizerD,save_loc=save_dir+'/models/checkpoint_{0}.tar'.format(iters))
logging.info("Saving given-step at epoch %s, step %s."%(epoch,iters))
# Save G's output on fixed_noise
if ((iters % gdict['checkpoint_size'] == 0) or ((epoch == epochs-1) and (count == len(dataloader)-1))):
netG.eval()
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_arr=np.array(fake[:,:,:,:])
fname='gen_img_epoch-%s_step-%s'%(epoch,iters)
np.save(save_dir+'/images/'+fname,img_arr)
t_epoch_end=time.time()
if gdict['world_rank']==0:
logging.info("Time taken for epoch %s, count %s: %s for rank %s"%(epoch,count,t_epoch_end-t_epoch_start,gdict['world_rank']))
# Save Metrics to file after each epoch
metrics_df.to_pickle(save_dir+'/df_metrics.pkle')
logging.info("best chis: {0}, {1}".format(best_chi1,best_chi2))
# -
# ## Start
# +
### Setup modules ###
def f_manual_add_argparse():
''' use only in jpt notebook'''
args=argparse.Namespace()
args.config='config_2dgan.yaml'
args.mode='fresh'
args.ip_fldr=''
args.local_rank=0
# args.mode='continue'
# args.ip_fldr='/global/cfs/cdirs/m3363/vayyar/cosmogan_data/results_from_other_code/pytorch/results/128sq/20201211_093818_nb_test/'
return args
def f_parse_args():
"""Parse command line arguments.Only for .py file"""
parser = argparse.ArgumentParser(description="Run script to train GAN using pytorch", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_arg = parser.add_argument
add_arg('--config','-cfile', type=str, default='config_2dgan.yaml', help='Whether to start fresh run or continue previous run')
add_arg('--mode','-m', type=str, choices=['fresh','continue'],default='fresh', help='Whether to start fresh run or continue previous run')
add_arg('--ip_fldr','-ip', type=str, default='', help='The input folder for resuming a checkpointed run')
add_arg("--local_rank", default=0, type=int,help='Local rank of GPU on node. Using for pytorch DDP. ')
return parser.parse_args()
def try_barrier(rank):
"""
Used in Distributed data parallel
Attempt a barrier but ignore any exceptions
"""
print('BAR %d'%rank)
try:
dist.barrier()
except:
pass
def f_init_gdict(args,gdict):
''' Create global dictionary gdict from args and config file'''
## read config file
config_file=args.config
with open(config_file) as f:
config_dict= yaml.load(f, Loader=yaml.SafeLoader)
gdict=config_dict['parameters']
## Add args variables to gdict
for key in ['mode','config','ip_fldr']:
gdict[key]=vars(args)[key]
if gdict['distributed']: assert not gdict['lambda_gp'],"GP couplings is %s. Cannot use Gradient penalty loss in pytorch DDP"%(gdict['lambda_gp'])
return gdict
def f_sample_data(ip_tensor,rank=0,num_ranks=1):
'''
Module to load part of dataset depending on world_rank.
'''
data_size=ip_tensor.shape[0]
size=data_size//num_ranks
print("Using data indices %s-%s for rank %s"%(rank*(size),(rank+1)*size,rank))
dataset=TensorDataset(ip_tensor[rank*(size):(rank+1)*size])
###
if gdict['batch_size']>size:
print("Caution: batchsize %s is less than samples per GPU."%(gdict['batch_size'],size))
raise SystemExit
data_loader=DataLoader(dataset,batch_size=gdict['batch_size'],shuffle=True,num_workers=0,drop_last=True)
return data_loader
def f_load_data_precompute(gdict):
#################################
####### Read data and precompute ######
img=np.load(gdict['ip_fname'],mmap_mode='r')[:gdict['num_imgs']].transpose(0,1,2,3).copy()
t_img=torch.from_numpy(img)
print("%s, %s"%(img.shape,t_img.shape))
# dataset=TensorDataset(t_img)
# data_loader=DataLoader(dataset,batch_size=gdict['batch_size'],shuffle=True,num_workers=0,drop_last=True)
data_loader=f_sample_data(t_img,gdict['world_rank'],gdict['world_size'])
print("Size of dataset for GPU %s : %s"%(gdict['world_rank'],len(data_loader.dataset)))
# Precompute metrics with validation data for computing losses
with torch.no_grad():
val_img=np.load(gdict['ip_fname'],mmap_mode='r')[-3000:].transpose(0,1,2,3).copy()
t_val_img=torch.from_numpy(val_img).to(gdict['device'])
# Precompute radial coordinates
r,ind=f_get_rad(img)
r=r.to(gdict['device']); ind=ind.to(gdict['device'])
# Stored mean and std of spectrum for full input data once
mean_spec_val,sdev_spec_val=f_torch_image_spectrum(f_invtransform(t_val_img),1,r,ind)
hist_val=f_compute_hist(t_val_img,bins=gdict['bns'])
del val_img; del t_val_img; del img; del t_img
return data_loader,mean_spec_val,sdev_spec_val,hist_val,r,ind
def f_init_GAN(gdict,print_model=False):
# Define Models
logging.info("Building GAN networks")
# Create Generator
netG = Generator(gdict).to(gdict['device'])
netG.apply(weights_init)
# Create Discriminator
netD = Discriminator(gdict).to(gdict['device'])
netD.apply(weights_init)
if print_model:
if gdict['world_rank']==0:
print(netG)
# summary(netG,(1,1,64))
print(netD)
# summary(netD,(1,128,128))
print("Number of GPUs used %s"%(gdict['ngpu']))
if (gdict['multi-gpu']):
if not gdict['distributed']:
netG = nn.DataParallel(netG, list(range(gdict['ngpu'])))
netD = nn.DataParallel(netD, list(range(gdict['ngpu'])))
else:
netG=DistributedDataParallel(netG,device_ids=[gdict['local_rank']],output_device=[gdict['local_rank']])
netD=DistributedDataParallel(netD,device_ids=[gdict['local_rank']],output_device=[gdict['local_rank']])
#### Initialize networks ####
# criterion = nn.BCELoss()
criterion = nn.BCEWithLogitsLoss()
if gdict['mode']=='fresh':
optimizerD = optim.Adam(netD.parameters(), lr=gdict['learn_rate'], betas=(gdict['beta1'], 0.999),eps=1e-7)
optimizerG = optim.Adam(netG.parameters(), lr=gdict['learn_rate'], betas=(gdict['beta1'], 0.999),eps=1e-7)
### Initialize variables
iters,start_epoch,best_chi1,best_chi2=0,0,1e10,1e10
### Load network weights for continuing run
elif gdict['mode']=='continue':
iters,start_epoch,best_chi1,best_chi2=f_load_checkpoint(gdict['save_dir']+'/models/checkpoint_last.tar',netG,netD,optimizerG,optimizerD,gdict)
logging.info("Continuing existing run. Loading checkpoint with epoch {0} and step {1}".format(start_epoch,iters))
start_epoch+=1 ## Start with the next epoch
## Add to gdict
for key,val in zip(['best_chi1','best_chi2','iters','start_epoch'],[best_chi1,best_chi2,iters,start_epoch]): gdict[key]=val
return netG,netD,criterion,optimizerD,optimizerG
def f_setup(gdict,log):
'''
Set up directories, Initialize random seeds, add GPU info, add logging info.
'''
torch.backends.cudnn.benchmark=True
# torch.autograd.set_detect_anomaly(True)
## Special declarations
gdict['ngpu']=torch.cuda.device_count()
gdict['device']=torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
gdict['multi-gpu']=True if (gdict['device'].type == 'cuda') and (gdict['ngpu'] > 1) else False
########################
###### Set up Distributed Data parallel ######
if gdict['distributed']:
gdict['local_rank']=args.local_rank
gdict['world_size']=int(os.environ['WORLD_SIZE'])
print(args.local_rank,torch.cuda.device_count(),gdict['world_size'])
# raise SystemExit
torch.cuda.set_device(args.local_rank) ## Very important
dist.init_process_group(backend='nccl', init_method="env://")
gdict['world_rank']= dist.get_rank()
print("World size %s, world rank %s, local rank %s, hostname %s\n"%(gdict['world_size'],gdict['world_rank'],gdict['local_rank'],socket.gethostname()))
device = torch.cuda.current_device()
# Divide batch size by number of GPUs
# gdict['batch_size']=gdict['batch_size']//gdict['world_size']
else:
gdict['world_size'],gdict['world_rank'],gdict['local_rank']=1,0,0
########################
###### Set up directories #######
### sync up so that time is the same for each GPU for DDP
if gdict['mode']=='fresh':
### Create prefix for foldername
if gdict['world_rank']==0: ### For rank=0, create directory name string and make directories
dt_strg=datetime.now().strftime('%Y%m%d_%H%M%S') ## time format
dt_lst=[int(i) for i in dt_strg.split('_')] # List storing day and time
dt_tnsr=torch.Tensor(dt_lst).long().to(gdict['device']) ## Create list to pass to other GPUs
else: dt_tnsr=torch.Tensor([0,0]).long().to(gdict['device'])
### Pass directory name to other ranks
if gdict['distributed']: dist.broadcast(dt_tnsr, src=0)
gdict['save_dir']=gdict['op_loc']+str(int(dt_tnsr[0]))+'_'+str(int(dt_tnsr[1]))+'_'+gdict['run_suffix']
if gdict['world_rank']==0: # Create directories for rank 0
### Create directories
if not os.path.exists(gdict['save_dir']):
os.makedirs(gdict['save_dir']+'/models')
os.makedirs(gdict['save_dir']+'/images')
shutil.copy(gdict['config'],gdict['save_dir'])
elif gdict['mode']=='continue': ## For checkpointed runs
gdict['save_dir']=args.ip_fldr
### Read loss data
with open (gdict['save_dir']+'df_metrics.pkle','rb') as f:
metrics_dict=pickle.load(f)
########################
### Initialize random seed
manualSeed = np.random.randint(1, 10000) if gdict['seed']=='random' else int(gdict['seed'])
# print("Seed",manualSeed,gdict['world_rank'])
random.seed(manualSeed)
np.random.seed(manualSeed)
torch.manual_seed(manualSeed)
torch.cuda.manual_seed_all(manualSeed)
if gdict['deterministic']:
logging.info("Running with deterministic sequence. Performance will be slower")
torch.backends.cudnn.deterministic=True
# torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
########################
if log:
### Write all logging.info statements to stdout and log file
logfile=gdict['save_dir']+'/log.log'
if gdict['world_rank']==0:
logging.basicConfig(level=logging.DEBUG, filename=logfile, filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s")
Lg = logging.getLogger()
Lg.setLevel(logging.DEBUG)
lg_handler_file = logging.FileHandler(logfile)
lg_handler_stdout = logging.StreamHandler(sys.stdout)
Lg.addHandler(lg_handler_file)
Lg.addHandler(lg_handler_stdout)
logging.info('Args: {0}'.format(args))
logging.info('Start: %s'%(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
if gdict['distributed']: try_barrier(gdict['world_rank'])
if gdict['world_rank']!=0:
logging.basicConfig(level=logging.DEBUG, filename=logfile, filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s")
# -
# ## Main
# +
#########################
### Main code #######
#########################
if __name__=="__main__":
# jpt=False
jpt=True ##(different for jupyter notebook)
t0=time.time()
args=f_parse_args() if not jpt else f_manual_add_argparse()
#################################
### Set up global dictionary###
gdict={}
gdict=f_init_gdict(args,gdict)
# gdict['num_imgs']=1200
if jpt: ## override for jpt nbks
gdict['num_imgs']=4000
gdict['run_suffix']='nb_test'
f_setup(gdict,log=(not jpt))
## Build GAN
netG,netD,criterion,optimizerD,optimizerG=f_init_GAN(gdict,print_model=True)
fixed_noise = torch.randn(gdict['batch_size'], 1, 1, gdict['nz'], device=gdict['device']) #Latent vectors to view G progress
if gdict['distributed']: try_barrier(gdict['world_rank'])
## Load data and precompute
dataloader,mean_spec_val,sdev_spec_val,hist_val,r,ind=f_load_data_precompute(gdict)
#################################
########## Train loop and save metrics and images ######
### Set up metrics dataframe
cols=['step','epoch','Dreal','Dfake','Dfull','G_adv','G_full','spec_loss','hist_loss','spec_chi','hist_chi','gp_loss','fm_loss','D(x)','D_G_z1','D_G_z2','time']
metrics_df=pd.DataFrame(columns=cols)
if gdict['distributed']: try_barrier(gdict['world_rank'])
logging.info("Starting Training Loop...")
f_train_loop(dataloader,metrics_df,gdict,fixed_noise,mean_spec_val,sdev_spec_val,hist_val,r,ind)
if gdict['world_rank']==0: ## Generate images for best saved models ######
op_loc=gdict['save_dir']+'/images/'
ip_fname=gdict['save_dir']+'/models/checkpoint_best_spec.tar'
f_gen_images(gdict,netG,optimizerG,ip_fname,op_loc,op_strg='best_spec',op_size=200)
ip_fname=gdict['save_dir']+'/models/checkpoint_best_hist.tar'
f_gen_images(gdict,netG,optimizerG,ip_fname,op_loc,op_strg='best_hist',op_size=200)
tf=time.time()
logging.info("Total time %s"%(tf-t0))
logging.info('End: %s'%(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
# -
# metrics_df.plot('step','time')
metrics_df
# +
# gdict
# -
# ### Debug
# Feature matching loss
# +
# class Generator(nn.Module):
# def __init__(self, gdict):
# super(Generator, self).__init__()
# ## Define new variables from dict
# keys=['ngpu','nz','nc','ngf','kernel_size','stride','g_padding']
# ngpu, nz,nc,ngf,kernel_size,stride,g_padding=list(collections.OrderedDict({key:gdict[key] for key in keys}).values())
# self.main = nn.Sequential(
# # nn.ConvTranspose2d(in_channels, out_channels, kernel_size,stride,padding,output_padding,groups,bias, Dilation,padding_mode)
# nn.Linear(nz,nc*ngf*8*8*8),# 32768
# nn.BatchNorm2d(nc,eps=1e-05, momentum=0.9, affine=True),
# nn.ReLU(inplace=True),
# View(shape=[-1,ngf*8,8,8]),
# nn.ConvTranspose2d(ngf * 8, ngf * 4, kernel_size, stride, g_padding, output_padding=1, bias=False),
# nn.BatchNorm2d(ngf*4,eps=1e-05, momentum=0.9, affine=True),
# nn.ReLU(inplace=True),
# # state size. (ngf*4) x 8 x 8
# nn.ConvTranspose2d( ngf * 4, ngf * 2, kernel_size, stride, g_padding, 1, bias=False),
# nn.BatchNorm2d(ngf*2,eps=1e-05, momentum=0.9, affine=True),
# nn.ReLU(inplace=True),
# # state size. (ngf*2) x 16 x 16
# nn.ConvTranspose2d( ngf * 2, ngf, kernel_size, stride, g_padding, 1, bias=False),
# nn.BatchNorm2d(ngf,eps=1e-05, momentum=0.9, affine=True),
# nn.ReLU(inplace=True),
# # state size. (ngf) x 32 x 32
# nn.ConvTranspose2d( ngf, nc, kernel_size, stride,g_padding, 1, bias=False),
# nn.Tanh()
# )
# def forward(self, ip):
# return self.main(ip)
# class Discriminator(nn.Module):
# def __init__(self, gdict):
# super(Discriminator, self).__init__()
# ## Define new variables from dict
# keys=['ngpu','nz','nc','ndf','kernel_size','stride','d_padding']
# ngpu, nz,nc,ndf,kernel_size,stride,d_padding=list(collections.OrderedDict({key:gdict[key] for key in keys}).values())
# self.main = nn.Sequential(
# # input is (nc) x 64 x 64
# # nn.Conv2d(in_channels, out_channels, kernel_size,stride,padding,output_padding,groups,bias, Dilation,padding_mode)
# nn.Conv2d(nc, ndf,kernel_size, stride, d_padding, bias=True),
# nn.BatchNorm2d(ndf,eps=1e-05, momentum=0.9, affine=True),
# nn.LeakyReLU(0.2, inplace=True),
# # state size. (ndf) x 32 x 32
# nn.Conv2d(ndf, ndf * 2, kernel_size, stride, d_padding, bias=True),
# nn.BatchNorm2d(ndf * 2,eps=1e-05, momentum=0.9, affine=True),
# nn.LeakyReLU(0.2, inplace=True),
# # state size. (ndf*2) x 16 x 16
# nn.Conv2d(ndf * 2, ndf * 4, kernel_size, stride, d_padding, bias=True),
# nn.BatchNorm2d(ndf * 4,eps=1e-05, momentum=0.9, affine=True),
# nn.LeakyReLU(0.2, inplace=True),
# # state size. (ndf*4) x 8 x 8
# nn.Conv2d(ndf * 4, ndf * 8, kernel_size, stride, d_padding, bias=True),
# nn.BatchNorm2d(ndf * 8,eps=1e-05, momentum=0.9, affine=True),
# nn.LeakyReLU(0.2, inplace=True),
# # state size. (ndf*8) x 4 x 4
# nn.Flatten(),
# nn.Linear(nc*ndf*8*8*8, 1)
# # nn.Sigmoid()
# )
# def forward(self, ip):
# # print(ip.shape)
# results=[ip]
# lst_idx=[]
# for i,submodel in enumerate(self.main.children()):
# mid_output=submodel(results[-1])
# results.append(mid_output)
# ## Select indices in list corresponding to output of Conv layers
# if submodel.__class__.__name__.startswith('Conv'):
# # print(submodel.__class__.__name__)
# # print(mid_output.shape)
# lst_idx.append(i)
# FMloss=True
# if FMloss:
# ans=[results[1:][i] for i in lst_idx + [-1]]
# else :
# ans=results[-1]
# return ans
# +
# netG = Generator(gdict).to(gdict['device'])
# netG.apply(weights_init)
# # # # print(netG)
# # summary(netG,(1,1,64))
# # Create Discriminator
# netD = Discriminator(gdict).to(gdict['device'])
# netD.apply(weights_init)
# # print(netD)
# summary(netD,(1,128,128))
# +
# noise = torch.randn(gdict['batchsize'], 1, 1, gdict['nz'], device=gdict['device'])
# fake = netG(noise)
# # Forward pass real batch through D
# output = netD(fake)
# print([i.shape for i in output])
# +
####### Read data and precompute ######
img=np.load(gdict['ip_fname'],mmap_mode='r')[:gdict['num_imgs']].transpose(0,1,2,3).copy()
t_img=torch.from_numpy(img)
print("%s, %s"%(img.shape,t_img.shape))
dataset=TensorDataset(t_img)
data_loader=DataLoader(dataset,batch_size=gdict['batch_size'],shuffle=True,num_workers=0,drop_last=True)
# -
len(data_loader.dataset)
# +
def f_sample_data(ip_tensor,rank=0,num_ranks=1):
data_size=ip_tensor.shape[0]
size=data_size//num_ranks
print(size)
print(rank*(size),(rank+1)*size)
dataset=TensorDataset(ip_tensor[rank*(size):(rank+1)*size])
data_loader=DataLoader(dataset,batch_size=gdict['batch_size'],shuffle=True,num_workers=0,drop_last=True)
print(len(data_loader.dataset))
return data_loader
f_sample_data(t_img,3,5)
# -
dir(dataloader)
dataloader.dataset.tensors[0].shape
for i in dataloader:
print(i[0].shape)
|
code/1_basic_GAN/1_main_code/DDP_new_loss/working_DDP_without_additional_losses/oldway_sc2020script/train_DDP_new_loss_.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Face and Facial Keypoint detection
#
# After you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.
#
# 1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).
# 2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.
# 3. Use your trained model to detect facial keypoints on the image.
#
# ---
# In the next python cell we load in required libraries for this section of the project.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import torch as tch
# %matplotlib inline
# #### Select an image
#
# Select an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.
# +
import cv2
# load in color image for face detection
image = cv2.imread('images/B1079047_Oct-18-2019_02-49-38.jpeg')
# switch red and blue color channels
# --> by default OpenCV assumes BLUE comes first, not RED as in many images
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plot the image
fig = plt.figure(figsize=(9,9))
plt.imshow(image)
# -
# ## Detect all faces in an image
#
# Next, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.
#
# In the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.
#
# An example of face detection on a variety of images is shown below.
#
# <img src='images/haar_cascade_ex.png' width=80% height=80%/>
#
# +
# load in a haar cascade classifier for detecting frontal faces
face_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')
# run the detector
# the output here is an array of detections; the corners of each detection box
# if necessary, modify these parameters until you successfully identify every face in a given image
faces = face_cascade.detectMultiScale(image, 1.2, 2,5)
# make a copy of the original image to plot detections on
image_with_detections = image.copy()
# loop over the detected faces, mark the image where each face is found
for (x,y,w,h) in faces:
# draw a rectangle around each detected face
# you may also need to change the width of the rectangle drawn depending on image resolution
cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_with_detections)
# -
# ## Loading in a trained model
#
# Once you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.
#
# First, load your best model by its filename.
# +
# ## TODO: define the convolutional neural network architecture
# import torch
# from torch.autograd import Variable
# import torch.nn as nn
# import torch.nn.functional as F
# # can use the below import should you choose to initialize the weights of your Net
# import torch.nn.init as I
# from collections import OrderedDict
# class Net(nn.Module):
# def __init__(self):
# super(Net, self).__init__()
# ## TODO: Define all the layers of this CNN, the only requirements are:
# ## 1. This network takes in a square (same width and height), grayscale image as input
# ## 2. It ends with a linear layer that represents the keypoints
# ## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# # As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# # 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# #Conv layer
# self.feature = nn.Sequential(OrderedDict([#('batch_norm1', nn.BatchNorm2d(1)),
# ('conv1_0', nn.Conv2d(1, 32, 5)),
# ('relu1_0', nn.ReLU()),
# #('avg1_0', nn.MaxPool2d((3,3), stride=3)),
# #('batch_norm2', nn.BatchNorm2d(32)),
# ('conv1_1', nn.Conv2d(32, 64, 5)),
# ('relu1_1', nn.ReLU()),
# ('batch_norm3', nn.BatchNorm2d(64)),
# ('avg1_1', nn.MaxPool2d((3,3), stride=3)),
# ('drop0', nn.Dropout2d(0.5)),
# ('conv1_2', nn.Conv2d(64, 150, 5)),
# ('relu1_2', nn.ReLU()),
# #('batch_norm4', nn.BatchNorm2d(150)),
# #('maxp1_2', nn.MaxPool2d((2,2), stride=2)),
# ('conv2_0', nn.Conv2d(150, 400, 3)),
# ('relu2_0', nn.ReLU()),
# ('batch_norm5', nn.BatchNorm2d(400)),
# ('avg2_0', nn.MaxPool2d((3,3), stride=3)),
# ('conv2_1', nn.Conv2d(400, 1000, 3)),
# ('relu2_1', nn.ReLU()),
# # ('avg2_1', nn.MaxPool2d((2,2), stride=1)),
# # ('batch_norm6', nn.BatchNorm2d(140)),
# ('drop1', nn.Dropout2d(0.25)),
# ('conv2_2', nn.Conv2d(1000, 300, 2)),
# ('relu2_2', nn.ReLU()),
# ('avg2_2', nn.MaxPool2d((3,3), stride=3))
# ]))
# self.fc = nn.Sequential(OrderedDict([('fc1', nn.Linear(10800, 136))]))
# ## Note that among the layers to add, consider including:
# # maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
# def forward(self, x):
# ## TODO: Define the feedforward behavior of this model
# ## x is the input image and, as an example, here you may choose to include a pool/conv step:
# # first feature pooling
# x = self.fc(self.feature(x).view((-1, 10800)))
# return x
# +
# ## TODO: load the best saved model parameters (by your path name)
# ## You'll need to un-comment the line below and add the correct name for *your* saved model
# ## TODO: define the convolutional neural network architecture
# import torch
# from torch.autograd import Variable
# import torch.nn as nn
# import torch.nn.functional as F
# # can use the below import should you choose to initialize the weights of your Net
# import torch.nn.init as I
# from collections import OrderedDict
# class Net(nn.Module):
# def __init__(self):
# super(Net, self).__init__()
# ## TODO: Define all the layers of this CNN, the only requirements are:
# ## 1. This network takes in a square (same width and height), grayscale image as input
# ## 2. It ends with a linear layer that represents the keypoints
# ## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# # As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# # 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# #Conv layer
# self.feature1 = nn.Sequential(OrderedDict([('batch_norm1', nn.BatchNorm2d(1)),
# ('conv1_0', nn.Conv2d(1, 32, 5)),
# ('relu1_0', nn.ReLU()),
# ('batch_norm2', nn.BatchNorm2d(32)),
# ('avg1_0', nn.MaxPool2d((3,3), stride=3)),
# ('conv1_1', nn.Conv2d(32, 64, 5)),
# ('relu1_1', nn.ReLU()),
# ('batch_norm3', nn.BatchNorm2d(64)),
# ('avg1_1', nn.MaxPool2d((2,2), stride=2)),
# ('conv1_2', nn.Conv2d(64, 150, 5)),
# ('relu1_2', nn.ReLU()),
# ('batch_norm4', nn.BatchNorm2d(150)),
# ('maxp1_2', nn.MaxPool2d((2,2), stride=2)),
# ('conv2_0', nn.Conv2d(150, 148, 3)),
# ('relu2_0', nn.ReLU()),
# ('batch_norm5', nn.BatchNorm2d(148)),
# ('avg2_0', nn.MaxPool2d((3,3), stride=3)),
# ('conv2_1', nn.Conv2d(148, 140, 3)),
# ('relu2_1', nn.ReLU()),
# #('avg2_1', nn.MaxPool2d((2,2), stride=1)),
# ('batch_norm6', nn.BatchNorm2d(140)),
# ('conv2_2', nn.Conv2d(140, 136, 2)),
# ]))
# ## Note that among the layers to add, consider including:
# # maxpooling layers, multiple conv layers, fully-connected layers, and other layers (such as dropout or batch normalization) to avoid overfitting
# def forward(self, x):
# ## TODO: Define the feedforward behavior of this model
# ## x is the input image and, as an example, here you may choose to include a pool/conv step:
# # first feature pooling
# x = self.feature1(x).view(x.shape[0],-1)
# return x
from models import Net
import torch
model = torch.load('./saved_models/keypoints_model_frt_tst10.pth')
net = model['arc']
net.load_state_dict(model['state_dict'])
## print out your net and prepare it for testing (uncomment the line below)
net.eval()
# -
# ## Keypoint detection
#
# Now, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.
#
# ### TODO: Transform each detected face into an input Tensor
#
# You'll need to perform the following steps for each detected face:
# 1. Convert the face from RGB to grayscale
# 2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
# 3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
# 4. Reshape the numpy image into a torch image.
#
# You may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.
#
#
# ### TODO: Detect and display the predicted keypoints
#
# After each face has been appropriately converted into an input Tensor for your network to see as input, you'll wrap that Tensor in a Variable() and can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be "un-normalized" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:
#
# <img src='images/michelle_detected.png' width=30% height=30%/>
#
#
#
# +
image_copy = np.copy(image)
# loop over the detected faces from your haar cascade
#for i,(x,y,w,h) in enumerate(faces):
for i in range(1):
# Select the region of interest that is the face in the image
#roi = image_copy[y:y+h, x:x+w]
roi = image_copy
roi = cv2.resize(roi, (224,224))
## TODO: Convert the face region from RGB to grayscale
roi_gray = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
roi_gray = roi_gray/255
## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
roi_gry_res = cv2.resize(roi_gray, (224, 224))
## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)
roi_gry_res_tch = tch.from_numpy(roi_gry_res.reshape((224,224,1,1)).transpose((3,2,0,1)))
img = roi_gry_res_tch.type(torch.FloatTensor)
## TODO: Make facial keypoint predictions using your loaded, trained network
## perform a forward pass to get the predicted facial keypoints
output_pts = net(img)
# reshape to batch_size x 68 x 2 pts
fac_key = output_pts.view(output_pts.size()[0], 68, -1)[0].data.numpy()*50+100
#y = lambda x: ( (x-fac_key.min()) * ( (0 - 250) / (fac_key.min() - fac_key.max()) + 0) )
#fac_key = y(fac_key)
image2 = np.squeeze(np.copy(img[0].data.numpy().transpose(1,2,0)))
## TODO: Display each detected face and the corresponding keypoints
fig = plt.figure(figsize=(6,6))
ax = plt.subplot(1, 2, i+1)
plt.tight_layout()
ax.imshow(roi,cmap='gray')
ax.scatter(fac_key[:,0], fac_key[:,1], s=20, marker='.', c='m')
#plt.imshow(roi_gry_res, cmap='gray')
# -
len(faces)
|
3. Facial Keypoint Detection, Complete Pipeline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 达观杯2021
# +
import pandas as pd
from sklearn.model_selection import train_test_split
# %cd ../../
# -
# ### 加载数据集,并切分train/dev
# +
# 加载数据
df_train = pd.read_csv("./datasets/phase_1/splits/fold_0/train.txt")
df_train.columns = ["id", "text", "label"]
df_val = pd.read_csv("./datasets/phase_1/splits/fold_0/dev.txt")
df_val.columns = ["id", "text", "label"]
df_test = pd.read_csv("./datasets/phase_1/splits/fold_0/test.txt")
df_test.columns = ["id", "text", ]
# 构建词表
charset = set()
for text in df_train['text']:
for char in text.split(" "):
charset.add(char)
id2char = ['OOV', ',', '。', '!', '?'] + list(charset)
char2id = {id2char[i]: i for i in range(len(id2char))}
# 标签集
id2label = list(df_train['label'].unique())
label2id = {id2label[i]: i for i in range(len(id2label))}
# -
# ### 定义模型
# +
# 定义模型
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
MAX_LEN = 128
input_layer = Input(shape=(MAX_LEN,))
layer = Embedding(input_dim=len(id2char), output_dim=128)(input_layer)
layer = Bidirectional(LSTM(128, return_sequences=True))(layer)
layer = Flatten()(layer) # [*, 128, 256] --> [*, 128 * 256]
output_layer = Dense(len(id2label), activation='softmax')(layer)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# -
# ### 准备输入数据
#
# 对训练集、验证集、测试集进行输入转换,构造模型输入。
# +
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
X_train, X_val, X_test = [], [], []
y_train = np.zeros((len(df_train), len(id2label)), dtype=np.int8)
y_val = np.zeros((len(df_val), len(id2label)), dtype=np.int8)
for i in range(len(df_train)):
X_train.append([char2id[char] for char in df_train.loc[i, 'text'].split(" ")])
y_train[i][label2id[df_train.loc[i, 'label']]] = 1
for i in range(len(df_val)):
X_val.append([char2id[char] if char in char2id else 0 for char in df_val.loc[i, 'text'].split(" ")])
y_val[i][label2id[df_val.loc[i, 'label']]] = 1
for i in range(len(df_test)):
X_test.append([char2id[char] if char in char2id else 0 for char in df_test.loc[i, 'text'].split(" ")])
X_train = pad_sequences(X_train, maxlen=MAX_LEN, padding='post', truncating='post')
X_val = pad_sequences(X_val, maxlen=MAX_LEN, padding='post', truncating='post')
X_test = pad_sequences(X_test, maxlen=MAX_LEN, padding='post', truncating='post')
# -
# ### 模型训练
model.fit(x=X_train, y=y_train, validation_data=(X_val, y_val), epochs=1, batch_size=4)
# +
y_val_pred = model.predict(X_val).argmax(axis=-1)
print(y_val_pred[: 20])
y_val = []
for i in range(len(df_val)):
y_val.append(label2id[df_val.loc[i, 'label']])
y_val = [int(w) for w in y_val]
print(y_val[: 20])
from sklearn.metrics import classification_report
results = {}
classification_report_dict = classification_report(y_val_pred, y_val, output_dict=True)
for key0, val0 in classification_report_dict.items():
if isinstance(val0, dict):
for key1, val1 in val0.items():
results[key0 + "__" + key1] = val1
else:
results[key0] = val0
import json
print(json.dumps(results, indent=2, ensure_ascii=False))
# -
# ### 输出预测结果
# +
y_pred = model.predict(X_test).argmax(axis=-1)
pred_labels = [id2label[i] for i in y_pred]
pd.DataFrame({"id": df_test['id'], "label": pred_labels}).to_csv("submission.csv", index=False)
# 提交结果:
# 0.36730954652
# -
|
src/official_baseline/baseline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Synapse PySpark
# name: synapse_pyspark
# ---
# + microsoft={"language": "python"}
# %%pyspark
activity = spark.read.load('datalake_path/ActiveEnergyBurned.csv', format='csv'
## If header exists uncomment line below
, header=True
)
display(df.limit(10))
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
#view column data types
activity.dtypes
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# functions to convert UTC to EST time zone and extract date/time elements
convert_tz = lambda x: x.to_pydatetime()
get_year = lambda x: convert_tz(x).year
get_month = lambda x: '{}-{:02}'.format(convert_tz(x).year, convert_tz(x).month) #inefficient
get_date = lambda x: '{}-{:02}-{:02}'.format(convert_tz(x).year, convert_tz(x).month, convert_tz(x).day) #inefficient
get_day = lambda x: convert_tz(x).day
get_hour = lambda x: convert_tz(x).hour
get_minute = lambda x: convert_tz(x).minute
get_day_of_week = lambda x: convert_tz(x).weekday()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
from datetime import date, datetime, timedelta as td
import pytz
import numpy as np
import pandas as pd
# parse out date and time elements as EST time
activity['startDate'] = pd.to_datetime(activity['startDate'])
activity['year'] = activity['startDate'].map(get_year)
activity['month'] = activity['startDate'].map(get_month)
activity['date'] = activity['startDate'].map(get_date)
activity['day'] = activity['startDate'].map(get_day)
activity['hour'] = activity['startDate'].map(get_hour)
activity['dow'] = activity['startDate'].map(get_day_of_week)
|
SynapseCode/notebooks/apple_health.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This cell is added by sphinx-gallery
# It can be customized to whatever you like
# %matplotlib inline
#
#
# Plugins and Hybrid computation
# ==============================
#
# .. meta::
# :property="og:description": This tutorial introduces the notion of hybrid
# computation by combining several PennyLane device backends to train an algorithm
# containing both photonic and qubit devices.
# :property="og:image": https://pennylane.ai/qml/_images/photon_redirection.png
#
# .. related::
#
# tutorial_qubit_rotation Basic tutorial: qubit rotation
# tutorial_gaussian_transformation Gaussian transformation
#
# *Author: PennyLane dev team. Last updated: 1 Feb 2021.*
#
# This tutorial introduces the notion of hybrid computation by combining several PennyLane
# plugins. We first introduce PennyLane's `Strawberry Fields plugin <https://pennylane-sf.readthedocs.io>`_
# and use it to explore a non-Gaussian photonic circuit. We then combine this photonic circuit with a
# qubit circuit — along with some classical processing — to create and optimize a fully hybrid computation.
# Be sure to read through the introductory `qubit rotation <qubit_rotation>` and
# `Gaussian transformation <gaussian_transformation>` tutorials before attempting this tutorial.
#
# <div class="alert alert-info"><h4>Note</h4><p>To follow along with this tutorial on your own computer, you will require the
# `PennyLane-SF plugin <https://pennylane-sf.readthedocs.io>`_, in order to access the
# `Strawberry Fields <https://strawberryfields.readthedocs.io>`_ Fock backend using
# PennyLane. It can be installed via pip:
#
# .. code-block:: bash
#
# pip install pennylane-sf</p></div>
#
#
# A non-Gaussian circuit
# ----------------------
#
# We first consider a photonic circuit which is similar in spirit to the
# `qubit rotation <qubit_rotation>` circuit:
#
# .. figure:: ../demonstrations/plugins_hybrid/photon_redirection.png
# :align: center
# :width: 30%
# :target: javascript:void(0);
#
# Breaking this down, step-by-step:
#
# 1. **We start the computation with two qumode subsystems**. In PennyLane, we use the
# shorthand 'wires' to refer to quantum subsystems, whether they are qumodes, qubits, or
# any other kind of quantum register.
#
# 2. **Prepare the state** $\left|1,0\right\rangle$. That is, the first wire (wire 0) is prepared
# in a single-photon state, while the second
# wire (wire 1) is prepared in the vacuum state. The former state is non-Gaussian,
# necessitating the use of the ``'strawberryfields.fock'`` backend device.
#
# 3. **Both wires are then incident on a beamsplitter**, with free parameters $\theta$ and $\phi$.
# Here, we have the convention that the beamsplitter transmission amplitude is $t=\cos\theta$,
# and the reflection amplitude is
# $r=e^{i\phi}\sin\theta$. See :doc:`introduction/operations` for a full list of operation conventions.
#
# 4. **Finally, we measure the mean photon number** $\left\langle \hat{n}\right\rangle$ of the second wire, where
#
# .. math:: \hat{n} = \ad\a
#
# is the number operator, acting on the Fock basis number states, such that $\hat{n}\left|n\right\rangle = n\left|n\right\rangle$.
#
# The aim of this tutorial is to optimize the beamsplitter parameters $(\theta, \phi)$ such
# that the expected photon number of the second wire is **maximized**. Since the beamsplitter
# is a passive optical element that preserves the total photon number, this to the output
# state $\left|0,1\right\rangle$ — i.e., when the incident photon from the first wire has been
# 'redirected' to the second wire.
#
#
# Exact calculation
# ~~~~~~~~~~~~~~~~~
#
# To compare with later numerical results, we can first consider what happens analytically.
# The initial state of the circuit is $\left|\psi_0\right\rangle=\left|1,0\right\rangle$, and the output state
# of the system is of the form $\left|\psi\right\rangle = a\left|1, 0\right\rangle + b\left|0,1\right\rangle$, where
# $|a|^2+|b|^2=1$. We may thus write the output state as a vector in this
# computational basis, $\left|\psi\right\rangle = \begin{bmatrix}a & b\end{bmatrix}^T$.
#
# The beamsplitter acts on this two-dimensional subspace as follows:
#
# \begin{align}\left|\psi\right\rangle = B(\theta, \phi)\left|1, 0\right\rangle = \begin{bmatrix}
# \cos\theta & -e^{-i\phi}\sin\theta\\
# e^{i\phi}\sin\theta & \cos\theta
# \end{bmatrix}\begin{bmatrix} 1\\ 0\end{bmatrix} = \begin{bmatrix}
# \cos\theta\\
# e^{i\phi} \sin\theta
# \end{bmatrix}\end{align}
#
# Furthermore, the mean photon number of the second wire is
#
# \begin{align}\left\langle{\hat{n}_1}\right\rangle = \langle{\psi}\mid{\hat{n}_1}\mid{\psi}\rangle = |e^{i\phi} \sin\theta|^2
# \langle{0,1}\mid{\hat{n}_1}\mid{0,1}\rangle = \sin^2 \theta.\end{align}
#
# Therefore, we can see that:
#
# 1. $0\leq \left\langle \hat{n}_1\right\rangle\leq 1$: the output of the quantum circuit is
# bound between 0 and 1;
#
# 2. $\frac{\partial}{\partial \phi} \left\langle \hat{n}_1\right\rangle=0$: the output of the
# quantum circuit is independent of the beamsplitter phase $\phi$;
#
# 3. The output of the quantum circuit above is maximised when $\theta=(2m+1)\pi/2$
# for $m\in\mathbb{Z}_0$.
#
# Loading the plugin device
# -------------------------
#
# While PennyLane provides a basic qubit simulator (``'default.qubit'``) and a basic CV
# Gaussian simulator (``'default.gaussian'``), the true power of PennyLane comes from its
# `plugin ecosystem <https://pennylane.ai/plugins.html>`_, allowing quantum computations
# to be run on a variety of quantum simulator and hardware devices.
#
# For this circuit, we will be using the ``'strawberryfields.fock'`` device to construct
# a QNode. This allows the underlying quantum computation to be performed using the
# `Strawberry Fields <https://strawberryfields.readthedocs.io>`_ Fock backend.
#
# As usual, we begin by importing PennyLane and the wrapped version of NumPy provided by PennyLane:
#
import pennylane as qml
from pennylane import numpy as np
# Next, we create a device to run the quantum node. This is easy in PennyLane; as soon as
# the PennyLane-SF plugin is installed, the ``'strawberryfields.fock'`` device can be loaded
# — no additional commands or library imports required.
#
#
dev_fock = qml.device("strawberryfields.fock", wires=2, cutoff_dim=2)
# Compared to the default devices provided with PennyLane, the ``'strawberryfields.fock'``
# device requires the additional keyword argument:
#
# * ``cutoff_dim``: the Fock space truncation used to perform the quantum simulation
#
# <div class="alert alert-info"><h4>Note</h4><p>Devices provided by external plugins may require additional arguments and keyword arguments
# — consult the plugin documentation for more details.</p></div>
#
#
# Constructing the QNode
# ----------------------
#
# Now that we have initialized the device, we can construct our quantum node. Like
# the other tutorials, we use the :mod:`~.pennylane.qnode` decorator
# to convert our quantum function (encoded by the circuit above) into a quantum node
# running on Strawberry Fields.
#
#
@qml.qnode(dev_fock, diff_method="parameter-shift")
def photon_redirection(params):
qml.FockState(1, wires=0)
qml.Beamsplitter(params[0], params[1], wires=[0, 1])
return qml.expval(qml.NumberOperator(1))
# The ``'strawberryfields.fock'`` device supports all CV objects provided by PennyLane;
# see `CV operations <intro_ref_ops_cv>`.
#
#
# Optimization
# ------------
#
# Let's now use one of the built-in PennyLane optimizers in order to
# carry out photon redirection. Since we wish to maximize the mean photon number of
# the second wire, we can define our cost function to minimize the *negative* of the circuit output.
#
#
def cost(params):
return -photon_redirection(params)
# To begin our optimization, let's choose the following small initial values of
# $\theta$ and $\phi$:
#
#
init_params = np.array([0.01, 0.01])
print(cost(init_params))
# Here, we choose the values of $\theta$ and $\phi$ to be very close to zero;
# this results in $B(\theta,\phi)\approx I$, and the output of the quantum
# circuit will be very close to $\left|1, 0\right\rangle$ — i.e., the circuit leaves the photon in the first mode.
#
# Why don't we choose $\theta=0$ and $\phi=0$?
#
# At this point in the parameter space, $\left\langle \hat{n}_1\right\rangle = 0$, and
# $\frac{d}{d\theta}\left\langle{\hat{n}_1}\right\rangle|_{\theta=0}=2\sin\theta\cos\theta|_{\theta=0}=0$.
# Since the gradient is zero at those initial parameter values, the optimization
# algorithm would never descend from the maximum.
#
# This can also be verified directly using PennyLane:
#
#
dphoton_redirection = qml.grad(photon_redirection, argnum=0)
print(dphoton_redirection([0.0, 0.0]))
# Now, let's use the :class:`~.pennylane.GradientDescentOptimizer`, and update the circuit
# parameters over 100 optimization steps.
#
#
# +
# initialise the optimizer
opt = qml.GradientDescentOptimizer(stepsize=0.4)
# set the number of steps
steps = 100
# set the initial parameter values
params = init_params
for i in range(steps):
# update the circuit parameters
params = opt.step(cost, params)
if (i + 1) % 5 == 0:
print("Cost after step {:5d}: {: .7f}".format(i + 1, cost(params)))
print("Optimized rotation angles: {}".format(params))
# -
# Comparing this to the `exact calculation <photon_redirection_calc>` above,
# this is close to the optimum value of $\theta=\pi/2$, while the value of
# $\phi$ has not changed — consistent with the fact that $\left\langle \hat{n}_1\right\rangle$
# is independent of $\phi$.
#
#
# Hybrid computation
# ------------------
#
# To really highlight the capabilities of PennyLane, let's now combine the qubit-rotation QNode
# from the `qubit rotation tutorial <qubit_rotation>` with the CV photon-redirection
# QNode from above, as well as some classical processing, to produce a truly hybrid
# computational model.
#
# First, we define a computation consisting of three steps: two quantum nodes (the qubit rotation
# and photon redirection circuits, running on the ``'default.qubit'`` and
# ``'strawberryfields.fock'`` devices, respectively), along with a classical function, that simply
# returns the squared difference of its two inputs using NumPy:
#
#
# +
# create the devices
dev_qubit = qml.device("default.qubit", wires=1)
dev_fock = qml.device("strawberryfields.fock", wires=2, cutoff_dim=10)
@qml.qnode(dev_qubit)
def qubit_rotation(phi1, phi2):
"""Qubit rotation QNode"""
qml.RX(phi1, wires=0)
qml.RY(phi2, wires=0)
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev_fock, diff_method="parameter-shift")
def photon_redirection(params):
"""The photon redirection QNode"""
qml.FockState(1, wires=0)
qml.Beamsplitter(params[0], params[1], wires=[0, 1])
return qml.expval(qml.NumberOperator(1))
def squared_difference(x, y):
"""Classical node to compute the squared
difference between two inputs"""
return np.abs(x - y) ** 2
# -
# Now, we can define an objective function associated with the optimization, linking together
# our three subcomponents. Here, we wish to
# perform the following hybrid quantum-classical optimization:
#
# .. figure:: ../demonstrations/plugins_hybrid/hybrid_graph.png
# :align: center
# :width: 70%
# :target: javascript:void(0);
#
# 1. The qubit-rotation circuit will contain fixed rotation angles $\phi_1$ and $\phi_2$.
#
# 2. The photon-redirection circuit will contain two free parameters, the beamsplitter angles
# $\theta$ and $\phi$, which are to be optimized.
#
# 3. The outputs of both QNodes will then be fed into the classical node, returning the
# squared difference of the two quantum functions.
#
# 4. Finally, the optimizer will calculate the gradient of the entire computation with
# respect to the free parameters $\theta$ and $\phi$, and update their values.
#
# In essence, we are optimizing the photon-redirection circuit to return the **same expectation value**
# as the qubit-rotation circuit, even though they are two completely independent quantum systems.
#
# We can translate this computational graph to the following function, which combines the three
# nodes into a single hybrid computation. Below, we choose default values
# $\phi_1=0.5$, $\phi_2=0.1$:
#
#
def cost(params, phi1=0.5, phi2=0.1):
"""Returns the squared difference between
the photon-redirection and qubit-rotation QNodes, for
fixed values of the qubit rotation angles phi1 and phi2"""
qubit_result = qubit_rotation(phi1, phi2)
photon_result = photon_redirection(params)
return squared_difference(qubit_result, photon_result)
# Now, we use the built-in :class:`~.pennylane.GradientDescentOptimizer` to perform the optimization
# for 100 steps. As before, we choose initial beamsplitter parameters of
# $\theta=0.01$, $\phi=0.01$.
#
#
# +
# initialise the optimizer
opt = qml.GradientDescentOptimizer(stepsize=0.4)
# set the number of steps
steps = 100
# set the initial parameter values
params = np.array([0.01, 0.01])
for i in range(steps):
# update the circuit parameters
params = opt.step(cost, params)
if (i + 1) % 5 == 0:
print("Cost after step {:5d}: {: .7f}".format(i + 1, cost(params)))
print("Optimized rotation angles: {}".format(params))
# -
# Substituting this into the photon redirection QNode shows that it now produces
# the same output as the qubit rotation QNode:
#
#
result = [1.20671364, 0.01]
print(photon_redirection(result))
print(qubit_rotation(0.5, 0.1))
# This is just a simple example of the kind of hybrid computation that can be carried
# out in PennyLane. Quantum nodes (bound to different devices) and classical
# functions can be combined in many different and interesting ways.
#
#
|
wip/98_quantum/99_tutorial_plugins_hybrid.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 7098, "status": "ok", "timestamp": 1614263201809, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="Gh_xfCmtHYiL" outputId="b79e324a-fe71-4cb1-d95e-902047f31520"
# ! wget https://github.com/caserec/Datasets-for-Recommneder-Systems/raw/master/Processed%20Datasets/AmazonMusic.tar.xz
# ! tar -xf AmazonMusic.tar.xz
# ! pip install caserecommender
# + executionInfo={"elapsed": 1384, "status": "ok", "timestamp": 1614263256049, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="h7lW8ypWlVDS"
import pandas as pd
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 374} executionInfo={"elapsed": 3116, "status": "ok", "timestamp": 1614263287033, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="QDD2M4iTln4Z" outputId="e9ac2c4c-f953-44f8-809e-ea374b7f653c"
dataset = pd.read_json('./AmazonMusic/Digital_Music_5.json', lines=True)
dataset.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 262} executionInfo={"elapsed": 1124, "status": "ok", "timestamp": 1614263336602, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="1RVWdwDDlvBI" outputId="8c9d0f98-b67c-4662-ed23-e25a5b19c9fd"
dataset.overall.value_counts().plot(kind='bar', color=['g', 'c', 'y', 'b', 'r']);
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 1027, "status": "ok", "timestamp": 1614263408748, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="soDw4hfumA_g" outputId="45f8cec8-c3fc-4b19-8ba3-eceabeb8d49b"
df_recsys = dataset[['reviewerID', 'asin', 'overall']]
df_recsys.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 406} executionInfo={"elapsed": 1681, "status": "ok", "timestamp": 1614263359233, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="NIWB5Gysl7mz" outputId="d69bb45a-9c53-4b49-cecc-521bd3cedf6d"
dataset_metadata = pd.read_csv('AmazonMusic/amazon_music_metadata.csv')
dataset_metadata.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 717, "status": "ok", "timestamp": 1614263465658, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="ODvbyQnTmQWl" outputId="b025205d-9f24-4243-d67a-3dc061a23ff9"
df_recsys = df_recsys.merge(dataset_metadata[['asin', 'title']])
df_recsys.head()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1411, "status": "ok", "timestamp": 1614263503271, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="-rqu6ROVmXZG" outputId="bcf58128-b8b1-41a2-cb70-c9f8ff3009ed"
# unique users and items
df_recsys.reviewerID.nunique(), df_recsys.asin.nunique()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 1497, "status": "ok", "timestamp": 1614263575973, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="_i0BmRjzmYDc" outputId="7b289c50-a7fd-491d-821a-5c7ce21fd1fc"
# map users and items
map_users = {user: u_id for u_id, user in enumerate(df_recsys.reviewerID.unique())}
map_items = {item: i_id for i_id, item in enumerate(df_recsys.asin.unique())}
df_recsys['asin'] = df_recsys['asin'].map(map_items)
df_recsys['reviewerID'] = df_recsys['reviewerID'].map(map_users)
df_recsys.head()
# + [markdown] id="UwKOJ38_pItf"
# map title
# + executionInfo={"elapsed": 5170, "status": "ok", "timestamp": 1614263633132, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="Ur4-pGsum19d"
asin_title = {}
for idx, row in df_recsys.iterrows():
asin_title[row['asin']] = row['title']
np.save('map_tilte.npy', asin_title)
# + [markdown] id="AFgNr1Z4pKrW"
# metadata transformation
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 1579, "status": "ok", "timestamp": 1614264727184, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="aOjNK2jnpEth" outputId="0a33033d-725d-4237-d33a-0fb92997edbe"
new_metadata = dataset_metadata.iloc[:,1:]
new_metadata = new_metadata.melt(id_vars=["title"])
new_metadata = new_metadata[new_metadata.value != 0]
new_metadata.reset_index(inplace=True, drop=True)
new_metadata.tail()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 1649, "status": "ok", "timestamp": 1614264729177, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="o1g6e15EpzyI" outputId="491a9b29-a7fa-400c-c3e1-3e99e334003e"
dict_title = np.load('map_tilte.npy', allow_pickle=True).tolist()
inverse_dict_title = {value: int(key) for key, value in dict_title.items()}
new_metadata['asin_id'] = new_metadata['title'].map(inverse_dict_title)
new_metadata.dropna(inplace=True)
new_metadata = new_metadata[['asin_id', 'variable', 'value']]
# new_metadata = new_metadata.drop_duplicates()
new_metadata['asin_id'] = new_metadata.asin_id.astype(int)
new_metadata.to_csv('items_metadata.dat', index=False, sep='\t', header=False)
new_metadata.tail()
# + [markdown] id="aPT4RRvNnS0G"
# divide dataset
# + executionInfo={"elapsed": 2155, "status": "ok", "timestamp": 1614263702378, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="Mn8oMyN9nDAa"
from sklearn.model_selection import train_test_split
train, test = train_test_split(df_recsys, test_size=0.33, random_state=42)
train.to_csv('train.dat', index=False, header=False, sep='\t')
test.to_csv('test.dat', index=False, header=False, sep='\t')
# + [markdown] id="RtiIKSe3nvsy"
# Rating prediction
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5782, "status": "ok", "timestamp": 1614263832178, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="mGt3pgP3nFFl" outputId="9e5360e2-1208-44be-b9e5-ed3cfd182d18"
from caserec.recommenders.rating_prediction.most_popular import MostPopular
MostPopular('train.dat', 'test.dat', 'rp_mostPopular.dat').compute()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 1679, "status": "ok", "timestamp": 1614263878927, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="0jfl59-snzdo" outputId="6a72cb8b-b0da-44ec-9775-897f4846ead2"
predictions = pd.read_csv('rp_mostPopular.dat', sep='\t', names=['reviewerID', 'asin', 'rate'])
predictions['title'] = predictions.asin.map(asin_title)
predictions.head()
# + [markdown] id="xkcmwXKloIdc"
# Ranking
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 111590, "status": "ok", "timestamp": 1614264031248, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="YLnMnRQMoJM9" outputId="747e7882-6e38-423d-d96e-5585ea42239a"
from caserec.recommenders.item_recommendation.most_popular import MostPopular
MostPopular('train.dat', 'test.dat', 'rank_mostPopular.dat').compute(as_table=True, metrics=['NDCG'])
# + colab={"base_uri": "https://localhost:8080/", "height": 359} executionInfo={"elapsed": 7474, "status": "ok", "timestamp": 1614264064519, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="t9VLRXimoKOy" outputId="c50695be-78f9-41fd-b709-c142f963b0ce"
ranking = pd.read_csv('rank_mostPopular.dat', sep='\t', names=['reviewerID', 'asin', 'score'])
ranking['title'] = ranking.asin.map(asin_title)
ranking.head(10)
# + [markdown] id="szJ9C-CTqvkY"
# ItemKNN attributes
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 20021, "status": "ok", "timestamp": 1614264755283, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="NLbDyp_IqvQe" outputId="2e4661b8-66c9-403a-ff04-b25c0e5529dc"
from caserec.recommenders.rating_prediction.item_attribute_knn import ItemAttributeKNN
ItemAttributeKNN('train.dat', 'test.dat', metadata_file='items_metadata.dat', as_similar_first=True).compute()
# + [markdown] id="A79wH384rrSN"
# ItemKNN and UserKNN
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 36031, "status": "ok", "timestamp": 1614264895351, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="PX8Xclezq5nR" outputId="099e5aef-d73c-40b0-b5f5-b947a1493f0b"
from caserec.recommenders.rating_prediction.itemknn import ItemKNN
ItemKNN('train.dat', 'test.dat', 'rp_iknn.dat').compute()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 1370, "status": "ok", "timestamp": 1614264999362, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="ZmqILAwsq-45" outputId="2b7cd28f-a702-4d64-804d-2f9bc679f9e3"
predictions = pd.read_csv('rp_iknn.dat', sep='\t', names=['reviewerID', 'asin', 'rate'])
predictions['title'] = predictions.asin.map(asin_title)
predictions.head()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 50970, "status": "ok", "timestamp": 1614265085105, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="FvSNn_QGrIMo" outputId="36fb9317-33aa-4312-b6a4-24df3c0c6e99"
from caserec.recommenders.rating_prediction.userknn import UserKNN
UserKNN('train.dat', 'test.dat', 'rp_uknn.dat').compute()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 1578, "status": "ok", "timestamp": 1614265086715, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="i27Mz1ExsaTA" outputId="3327cadf-9e94-47bd-e694-be3da87e978e"
predictions = pd.read_csv('rp_uknn.dat', sep='\t', names=['reviewerID', 'asin', 'rate'])
predictions['title'] = predictions.asin.map(asin_title)
predictions.head()
# + [markdown] id="YDq_tU23shKO"
# Matrix factorization
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 19275, "status": "ok", "timestamp": 1614265116421, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="38x8fgiVsi7t" outputId="b7baacc3-c769-40f9-c084-d2490dbe81f5"
from caserec.recommenders.rating_prediction.matrixfactorization import MatrixFactorization
MatrixFactorization('train.dat', 'test.dat', 'rp_mf.dat').compute()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"elapsed": 1456, "status": "ok", "timestamp": 1614265141290, "user": {"displayName": "Sparsh Agarwal", "photoUrl": "", "userId": "13037694610922482904"}, "user_tz": -330} id="LBhbODLVssSF" outputId="1361d0ba-f07f-4292-d516-b35613c4fe8e"
predictions = pd.read_csv('rp_mf.dat', sep='\t', names=['reviewerID', 'asin', 'rate'])
predictions['title'] = predictions.asin.map(asin_title)
predictions.head()
# + id="J1SKSc3ts0G9"
|
_source/raw/amazonmusic.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (myenv-gpu)
# language: python
# name: myenv-gpu
# ---
import matplotlib.pyplot as plt
import cv2
from osgeo import gdal
import numpy as np
from keras.models import load_model
from keras import losses
import datetime
import math
import sys
# +
# read TIF dataset
def readTif(fileName, xoff = 0, yoff = 0, data_width = 0, data_height = 0):
dataset = gdal.Open(fileName)
if dataset == None:
print(fileName + "File cannot be opened")
# The number of columns of the grid matrix
width = dataset.RasterXSize
# The number of rows of the grid matrix
height = dataset.RasterYSize
# Number of channels
bands = dataset.RasterCount
# Get dataset
if(data_width == 0 and data_height == 0):
data_width = width
data_height = height
data = dataset.ReadAsArray(xoff, yoff, data_width, data_height)
# Get affine matrix information
geotrans = dataset.GetGeoTransform()
# Get projection information
proj = dataset.GetProjection()
return width, height, bands, data, geotrans, proj
# save .tif file
def writeTiff(im_data, im_geotrans, im_proj, path):
if 'int8' in im_data.dtype.name:
datatype = gdal.GDT_Byte
elif 'int16' in im_data.dtype.name:
datatype = gdal.GDT_UInt16
else:
datatype = gdal.GDT_Float32
if len(im_data.shape) == 3:
im_bands, im_height, im_width = im_data.shape
elif len(im_data.shape) == 2:
im_data = np.array([im_data])
im_bands, im_height, im_width = im_data.shape
driver = gdal.GetDriverByName("GTiff")
dataset = driver.Create(path, int(im_width), int(im_height), int(im_bands), datatype)
if(dataset!= None):
dataset.SetGeoTransform(im_geotrans)
dataset.SetProjection(im_proj)
for i in range(im_bands):
dataset.GetRasterBand(i+1).WriteArray(im_data[i])
del dataset
# Tif cropping (tif pixel data, cropping side length)
def TifCroppingArray(img, SideLength):
# Cut list
TifArrayReturn = []
# Number of image blocks on the column
ColumnNum = int((img.shape[0] - SideLength * 2) / (256 - SideLength * 2))
# Number of image blocks on the line
RowNum = int((img.shape[1] - SideLength * 2) / (256 - SideLength * 2))
for i in range(ColumnNum):
TifArray = []
for j in range(RowNum):
cropped = img[i * (256 - SideLength * 2) : i * (256 - SideLength * 2) + 256,
j * (256 - SideLength * 2) : j * (256 - SideLength * 2) + 256]
TifArray.append(cropped)
TifArrayReturn.append(TifArray)
# Taking into account that there will be leftovers in the rows and columns, crop one row and one column forward
# Crop forward the last column
for i in range(ColumnNum):
cropped = img[i * (256 - SideLength * 2) : i * (256 - SideLength * 2) + 256,
(img.shape[1] - 256) : img.shape[1]]
TifArrayReturn[i].append(cropped)
# Crop the last line forward
TifArray = []
for j in range(RowNum):
cropped = img[(img.shape[0] - 256) : img.shape[0],
j * (256-SideLength*2) : j * (256 - SideLength * 2) + 256]
TifArray.append(cropped)
# Crop the lower right corner forward
cropped = img[(img.shape[0] - 256) : img.shape[0],
(img.shape[1] - 256) : img.shape[1]]
TifArray.append(cropped)
TifArrayReturn.append(TifArray)
# The remaining number on the column
ColumnOver = (img.shape[0] - SideLength * 2) % (256 - SideLength * 2) + SideLength
# Number remaining on the line
RowOver = (img.shape[1] - SideLength * 2) % (256 - SideLength * 2) + SideLength
return TifArrayReturn, RowOver, ColumnOver
# Label visualization, that is, assign the n value to the nth category
def labelVisualize(img):
img_out = np.zeros((img.shape[0],img.shape[1]))
for i in range(img.shape[0]):
for j in range(img.shape[1]):
# Assign a value of n to the nth category
# The result is a list [0.1,0.3,0.2,0.1,0.01]
# Select the max number as the prediction result
img_out[i][j] = np.argmax(img[i][j])
return img_out
# Normalize the test image and make it dimensionally consistent with the training image
def normalize(img):
#min of each channel
minlist = [414, 457, 408, 325, 321, 352, 273, 262, 246, 258, 151, 124, 114, 119, 108, 135, 107, 99, 117, 93, 121, 127, 125, 138, 109, 107, 97, 100, 101, 68, 62, 57]
#max - min (channel)
diflist = [599, 566, 615, 698, 702, 671, 750, 761, 777, 765, 872, 899, 909, 904, 915, 888, 876, 848, 906, 703, 900, 896, 872, 762, 656, 682, 672, 739, 635, 510, 445, 198]
img = img.astype(np.float32)
for i in range(32):
img[i][np.where(img[i]==0)]=minlist[i]
img[i] = (img[i]-minlist[i])/diflist[i]
img[i] = img[i]*255
return img
def testGenerator(TifArray):
for i in range(len(TifArray)):
for j in range(len(TifArray[0])):
img = TifArray[i][j]
# normalize
img = img/255.0
img_generator = np.zeros((1, img.shape[0], img.shape[1], img.shape[2]))
img_generator[0] = img
yield img_generator
# Get the result matrix
def Result(shape, TifArray, npyfile, num_class, RepetitiveLength, RowOver, ColumnOver):
result = np.zeros(shape, np.uint8)
# j == row
j = 0
for i,item in enumerate(npyfile):
img = labelVisualize(item)
# print(img)
img = img.astype(np.uint8)
# Special consideration is given to the leftmost column, and the left edge should be spliced in
if(i % len(TifArray[0]) == 0):
# Special consideration should be given to the first line, and the upper edge should be taken into consideration
if(j == 0):
result[0 : 256 - RepetitiveLength, 0 : 256-RepetitiveLength] = img[0 : 256 - RepetitiveLength, 0 : 256 - RepetitiveLength]
# Special consideration should be given to the last line, and the edge below should be taken into consideration.
elif(j == len(TifArray) - 1):
result[shape[0] - ColumnOver - RepetitiveLength: shape[0], 0 : 256 - RepetitiveLength] = img[256 - ColumnOver - RepetitiveLength : 256, 0 : 256 - RepetitiveLength]
else:
result[j * (256 - 2 * RepetitiveLength) + RepetitiveLength : (j + 1) * (256 - 2 * RepetitiveLength) + RepetitiveLength,
0:256-RepetitiveLength] = img[RepetitiveLength : 256 - RepetitiveLength, 0 : 256 - RepetitiveLength]
# Special consideration is given to the rightmost column, and the right edge should be spliced in
elif(i % len(TifArray[0]) == len(TifArray[0]) - 1):
# Special consideration should be given to the first line, and the upper edge should be taken into consideration
if(j == 0):
result[0 : 256 - RepetitiveLength, shape[1] - RowOver: shape[1]] = img[0 : 256 - RepetitiveLength, 256 - RowOver: 256]
# Special consideration should be given to the last line, and the edge below should be taken into consideration.
elif(j == len(TifArray) - 1):
result[shape[0] - ColumnOver : shape[0], shape[1] - RowOver : shape[1]] = img[256 - ColumnOver : 256, 256 - RowOver : 256]
else:
result[j * (256 - 2 * RepetitiveLength) + RepetitiveLength : (j + 1) * (256 - 2 * RepetitiveLength) + RepetitiveLength,
shape[1] - RowOver : shape[1]] = img[RepetitiveLength : 256 - RepetitiveLength, 256 - RowOver : 256]
# After walking the far right side of each line, the number of lines is +1
j = j + 1
# Not the leftmost or rightmost case
else:
# Special consideration should be given to the first line, and the upper edge should be taken into consideration
if(j == 0):
result[0 : 256 - RepetitiveLength,
(i - j * len(TifArray[0])) * (256 - 2 * RepetitiveLength) + RepetitiveLength : (i - j * len(TifArray[0]) + 1) * (256 - 2 * RepetitiveLength) + RepetitiveLength
] = img[0 : 256 - RepetitiveLength, RepetitiveLength : 256 - RepetitiveLength]
# Special consideration should be given to the last line, and the bottom edge should be taken into consideration
if(j == len(TifArray) - 1):
result[shape[0] - ColumnOver : shape[0],
(i - j * len(TifArray[0])) * (256 - 2 * RepetitiveLength) + RepetitiveLength : (i - j * len(TifArray[0]) + 1) * (256 - 2 * RepetitiveLength) + RepetitiveLength
] = img[256 - ColumnOver : 256, RepetitiveLength : 256 - RepetitiveLength]
else:
result[j * (256 - 2 * RepetitiveLength) + RepetitiveLength : (j + 1) * (256 - 2 * RepetitiveLength) + RepetitiveLength,
(i - j * len(TifArray[0])) * (256 - 2 * RepetitiveLength) + RepetitiveLength : (i - j * len(TifArray[0]) + 1) * (256 - 2 * RepetitiveLength) + RepetitiveLength,
] = img[RepetitiveLength : 256 - RepetitiveLength, RepetitiveLength : 256 - RepetitiveLength]
return result
# -
# #### PATH parameter
# +
img_id = 89
TifPath = r"./comp/train/images/00"+ str(img_id) +".tif"
LabelPath = r"./comp/train/labels/00"+ str(img_id) +".tif"
ResultPath = r"evaluation/Predict/" + str(img_id) + ".tif"
ModelPath = r"./Model/unet_model_7.hdf5"
# +
area_perc = 0.5
RepetitiveLength = int((1 - math.sqrt(area_perc)) * 256 / 2)
testtime = []
starttime = datetime.datetime.now()
im_width, im_height, im_bands, im_data, im_geotrans, im_proj = readTif(TifPath)
im_data = normalize(im_data)#normalize
im_data = im_data.swapaxes(1, 0)#swap axes
im_data = im_data.swapaxes(1, 2)
#Cut Tif 512-> 4*256
TifArray, RowOver, ColumnOver = TifCroppingArray(im_data, RepetitiveLength)
endtime = datetime.datetime.now()
text = "Cut Tif cost: " + str((endtime - starttime).seconds) + "s"
print(text)
testtime.append(text)
model = load_model(ModelPath)#Load model
#Generator
testGene = testGenerator(TifArray)
#Predict
results = model.predict_generator(testGene,len(TifArray) * len(TifArray[0]),verbose = 1)
endtime = datetime.datetime.now()
text = "Predict cost: " + str((endtime - starttime).seconds) + "s"
print(text)
testtime.append(text)
#Save result
result_shape = (im_data.shape[0], im_data.shape[1])
result_data = Result(result_shape, TifArray, results, 2, RepetitiveLength, RowOver, ColumnOver)
'''comment part can generate a .tif format result and .txt record'''
# write_result = result_data
# write_result[write_result==0]=255
# writeTiff(write_result, im_geotrans, im_proj, ResultPath)
endtime = datetime.datetime.now()
# text = "Time cost: " + str((endtime - starttime).seconds) + "s"
# print(text)
# testtime.append(text)
# time = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d-%H%M%S')
# with open('timelog_%s.txt'%time, 'w') as f:
# for i in range(len(testtime)):
# f.write(testtime[i])
# f.write("\r\n")
# -
# #### Plot prediction and label
# +
img = np.array(plt.imread(LabelPath))
img[img==255] = 0
fig = plt.figure(figsize=(12,20))
ax = fig.subplots(1,2)
ax[0].set_title('Predict')
ax[0].imshow(result_data)
ax[1].set_title('Label')
ax[1].imshow(img)
|
predict.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp simulations
# -
# # module name here
#
# > API details.
#hide
from nbdev.showdoc import *
# +
#export
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
# -
#export
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# # Parameters
# ### S: Number of sequences
# ### N: Length of one sequence
#export
class TrueParameters:
def __init__(self, S=50, N=10, device=device, B=3, H_dim=2, Y_dim=2, noise_X=1.0, noise_H=1.0):
# Hyperparameters
self.S,self.N = S,N
# x
self.noise_X = torch.tensor(noise_X, dtype=torch.float32).to(device)
# z
self.B = B
self.P = (torch.rand((self.B,self.B)) + torch.eye(self.B,self.B)).to(device)
self.logpi = torch.rand((1,self.B))
self.Wx = torch.rand(self.B).to(device)
# h
self.H_dim = H_dim
self.noise_H = torch.tensor(noise_H, dtype=torch.float32).to(device)
self.mu_h = torch.randn(self.B, self.H_dim).to(device)
self.sigma_h = torch.rand(self.B, self.H_dim).view(self.B, self.H_dim) * torch.randint(1,10, (1,), dtype=torch.float32)
self.sigma_h = self.sigma_h.to(device)
# i
self.beta_a = torch.tensor([-np.log(4), 0.,0.]).to(device)
self.beta_b = torch.tensor([0., 0.,0.]).to(device)
# y
self.Y_dim = Y_dim
self.W_mu_y = torch.randn(self.H_dim, self.Y_dim).to(device)
self.W_sigma_y = torch.rand(self.H_dim, self.Y_dim).to(device)
# # Helper functions
#export
transform_x = lambda t: torch.sin(t/200)*.7 - .1
#export
def link_gamma(xn, zn, Params):
device = xn.device
data = torch.tensor([1., xn, zn]).to(device)
a,b = (torch.dot(Params.beta_a, data).mul(-1).exp(), torch.dot(Params.beta_b, data).mul(-1).exp())
# print(a,b)
return a,b
#export
def sample_using_logits(logits):
return dist.Categorical(logits=logits).sample()
#export
def get_time_from_intervals(I):
last_dim_size = I.shape[-1]
I = F.pad(I, (1,0)) #pads the last dimension with width=1 on the left and width=0 on the right. Value of padding=0
I = I.cumsum(dim=-1)
# Note: new I has one element extra than initial I on the final dimension. This cuts that one last element in a general way
I = torch.stack([torch.select(I, I.dim()-1, i) for i in range(last_dim_size)], dim=-1)
return I
# # Simulation
# ## X, Z, I
# +
#export
def simulate_XZI_seq(Params, device=device, noise_X=1.):
t = torch.tensor([0.]).to(device)
X_s = [transform_x(t)]
Z_s = [sample_using_logits(Params.Wx*X_s[0])]
I_s = [dist.Gamma(*link_gamma(X_s[0], Z_s[0], Params)).sample()]
for n in range(1, Params.N):
t += I_s[-1]
X_s.append(transform_x(t) + torch.randn((1,), device=device)*Params.noise_X) #uses current t which is a function of all I_{t'}, t' < t
Z_s.append(sample_using_logits(Params.Wx*X_s[-1] + Params.P[Z_s[-1]])) #X_{t} and Z_{t-1}
I_s.append(dist.Gamma(*link_gamma(X_s[-1], Z_s[-1], Params)).sample()) #X_{t} and Z_{t}
X_s, Z_s, I_s = torch.tensor(X_s).to(device), torch.tensor(Z_s).to(device), torch.tensor(I_s).to(device)
return X_s, Z_s, I_s
def simulate_XZI(Params):
X, Z, I = [], [], []
noise_X = Params.noise_X
for s in range(Params.S):
X_s, Z_s, I_s = simulate_XZI_seq(Params, noise_X=noise_X)
X.append(X_s)
Z.append(Z_s)
I.append(I_s)
return torch.stack(X, dim=0), torch.stack(Z, dim=0), torch.stack(I, dim=0)
# -
# ## H
#export
def simulate_H(Z,Params):
H = []
for s in range(Params.S):
Z_s = Z[s]
H_s = torch.stack([dist.Normal(Params.mu_h[Z_s[n]], Params.sigma_h[Z_s[n]]*Params.noise_H ).sample() for n in range(Params.N)], dim=0)
H.append(H_s)
return torch.stack(H, dim=0) #shape: (S,T,H_dim)
# ## Y
#export
def simulate_Y(H, Params):
mu, sigma = torch.matmul(H, Params.W_mu_y), torch.matmul(H, Params.W_sigma_y)
Y = dist.Normal(mu, sigma).sample()
return Y
# # Generate FULL DATASET
#export
def simulate_data(Params):
X, Z, I = simulate_XZI(Params)
T = get_time_from_intervals(I)
H = simulate_H(Z, Params)
Y = simulate_Y(H, Params)
return I,Y,T,X,Z,H
# # TEST
TrueParameters()
def test():
Params = TrueParameters(S=5, N=400, H_dim=1, noise_X=0, noise_H=0)
X, Z, I = simulate_XZI(Params)
T = get_time_from_intervals(I)
H = simulate_H(Z, Params)
Y = simulate_Y(H, Params)
return Params,X,Z,I,T,H,Y
Params,X,Z,I,T,H,Y = test()
X.shape,Z.shape,I.shape,T.shape,H.shape,Y.shape
# ## Some Visualizations
#export
def visualize(Params, X,T,I,H=None):
fig, ax = plt.subplots(3,2, figsize=(8,8))
s = 0
ax[0,0].plot(torch.linspace(1e-6,T[s,-1],100), transform_x(torch.linspace(1e-6,T[s,-1],100)))
ax[0,0].scatter(T[s].cpu(), X[s].cpu())
ax[0,0].set_title(f"Latent state $x(t)$ ({s+1}th Seq)")
im = ax[0,1].imshow(Params.P.cpu())
fig.colorbar(im, ax=ax[0,1])
ax[0,1].set_title("Base transition logits")
for _s in range(min(5, Params.S)):
ax[1,0].scatter(T[_s].cpu(), torch.zeros_like(T[_s].cpu())+_s)
ax[1,0].set_title("Event Times (5 indpt sequences)")
ax[1,1].hist(I[s].cpu().numpy(), bins=20)
ax[1,1].set_title(f"Histogram of Interval Times ({s+1}th sequence)")
if H is not None:
ax[2,0].scatter(T[s].cpu(), H[s,:,0].cpu())
try:
ax[2,1].scatter(T[s].cpu(), H[s,:,1].cpu())
except:
pass
print(Params.sigma_h)
c = plt.imshow(Params.sigma_h.cpu().detach().numpy())
plt.colorbar(c);
visualize(Params,X,T,I,H)
from nbdev.export import *
notebook2script()
|
01_simulations.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # K Means Clustering
# **Project to use K Means Clustering to group University by Private/Public**
#
# ### Data:
#
# 01. Private: If the university is or not private (use just to check results)
# 02. Apps: Number of subscriptions received
# 03. Accept: Number of subscriptions accepted
# 04. Enroll: Number of students enrolled
# 05. Top10perc: Percentage of new students who came from the best 10% group on high school
# 06. Top25perc: Percentage of new studantes who came from the best 25% group on high school
# 07. F.Undergrad: number of full-time undergraduate students
# 08. P.Undergrad: number of part-time undergraduate students
# 09. Outstate: Classes out of state
# 10. Room.Board: Room cost
# 11. Books: Estimated Book Cost
# 12. Personal: Estimated spending per person
# 13. PhD: Percentage of PHD's at University
# 14. Terminal: Percentage of College with graduation
# 15. S.F.Ratio: Studentes/college rate
# 16. perc.alumni: Percentage of alumni who donates
# 17. Expend: University expenses per student
# 18. Grad.Rate: Graduation rate
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
# %matplotlib inline
file = "//home//vinicius//Data_Science//Notebooks//Data_Files//College_Data"
df1 = pd.read_csv(file, index_col=0) ##University Names = Index
df1.head()
df1.Private.value_counts() ## final result
df1.info()
df1.describe()
sns.lmplot(x='Room.Board', y='Grad.Rate', data=df1, hue='Private', fit_reg=False, height=6)
## private colleges room cost is >= public cost
sns.lmplot(x='F.Undergrad', y='Outstate' )
|
.ipynb_checkpoints/02 - KMeans Clustering (College Data)-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Traveling Salesman Problem (TSP) solved with Genetic Algorithms.
#
# The Traveling Salesman Problem is a classic optimization problem that has as objective to calculate the most efficient
# way to visit N cities with minimum travelled distance.
#
# We will use a basic genetic algorithm to solve this problem by finding an optimum solution.
#
# Also, we will use the DEAP library to solve this problem, more on this library on the following
# [link.](https://deap.readthedocs.io)
#
# ## The Algorithm.
# First we need some imports:
# + pycharm={"name": "#%%\n"}
import os
import pickle
import array
import csv
import codecs
import random
from urllib.request import urlopen
from deap import base
from deap import creator
from deap import tools
from deap import algorithms
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] pycharm={"name": "#%% md\n"}
# Then we need to define a function to read the problem data, we will load data from the following site:
# [http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/](http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/)
#
# The following function fetch the data from the remote service and parses into a 2-dimensional array. We will use python
# serialization to cache the data, so we don't overload the source site with requests.
# + pycharm={"name": "#%%\n"}
URL_PREFIX = 'http://elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/'
def read_tsp_data(tsp_name):
"""
This function reads the tsp problem sample into an array of coordinates.
:param tsp_name: the tsp problem name.
:return: a 2-dimensional array with the city coordinates.
"""
try:
locations = pickle.load(open(os.path.join('tsp-data', tsp_name + '-loc.pickle'), 'rb'))
distances = pickle.load(open(os.path.join('tsp-data', tsp_name + '-dist.pickle'), 'rb'))
return locations, distances
except(OSError, IOError):
pass
locations = []
# open url with tsp data
tsp_url = URL_PREFIX + tsp_name + '.tsp'
with urlopen(tsp_url) as f:
# read the data as a csv file delimited with whitespaces (utf-8 character encoding)
reader = csv.reader(codecs.iterdecode(f, 'utf-8'), delimiter=' ', skipinitialspace=True)
# ignore all the lines upto the start of the city coordinates
for row in reader:
if row[0] in ('DISPLAY_DATA_SECTION', 'NODE_COORD_SECTION'):
break
for row in reader:
# if we reach the end of the data exit
if row[0] == 'EOF':
break
# delete the index, we need the coordinates only
del row[0]
locations.append(np.asarray(row, dtype=np.float32))
# calculate distances using vector norm
number_cities = len(locations)
distances = [[0] * number_cities for _ in locations]
for i in range(number_cities):
for j in range(i + 1, number_cities):
# vector norm
distance = np.linalg.norm(locations[j] - locations[i])
distances[i][j] = distances[j][i] = distance
if not os.path.exists('tsp-data'):
os.makedirs('tsp-data')
pickle.dump(locations, open(os.path.join('tsp-data', tsp_name + '-loc.pickle'), 'wb'))
pickle.dump(distances, open(os.path.join('tsp-data', tsp_name + '-dist.pickle'), 'wb'))
return locations, distances
# + [markdown] pycharm={"name": "#%% md\n"}
# We will use 29 cities of Bavaria dataset.
# + pycharm={"name": "#%%\n"}
CITIES, DISTANCES = read_tsp_data('bayg29')
NUMBER_CITIES = len(CITIES)
# -
# We need to define the algorithm hyper-parameters like:
# - `NUM_GENERATIONS`: the number of generations that we will iterate applying
# genetic algorithm operators to search for new optimal solutions.
# - `POPULATION_SIZE`: the population size or how many potential solutions we will
# use in each generation of the algorithm.
# - `P_CROSSOVER`: the probability for an individual to be selected for crossover (mating)
# - `P_MUTATION`: the probability for an individual to be selected for a random
# mutation.
# + pycharm={"name": "#%%\n"}
NUM_GENERATIONS = 200
POPULATION_SIZE = 100
P_CROSSOVER = 0.9
P_MUTATION = 0.1
# -
# Next, we define the algorithm types, first the fitness type that will inherit
# from the class `Fitness`, we need a negative weight because we are minimizing
# the distance.
# Also, we create an individual type, this will inherit from the python `list`
# class, because we are going to represent the cities by using an ordered array
# of city indexes to represent the solution, for example, if we have `NUMBER_CITIES` cities,
# we will have the following as a potential solution:
#
# *NOTE: `individual` refers a particular instance or solution to the problem.*
# + pycharm={"name": "#%%\n"}
individual = list(range(NUMBER_CITIES))
individual = random.sample(individual, len(individual))
print(individual)
# + [markdown] pycharm={"name": "#%% md\n"}
# Now we can define a function to calculate the distances for a particular
# instance of the problem.
# + pycharm={"name": "#%%\n"}
def tsp_distance(individual: list) -> float:
"""
Returns the traveling distance for particular ordering of cities.
:param individual: an ordered list of cities to visit.
:return: the total travelled distance.
"""
# get distance between first and last city
distance = DISTANCES[individual[0]][individual[-1]]
# add all other distances
for i in range(NUMBER_CITIES - 1):
distance += DISTANCES[individual[i]][individual[i + 1]]
return distance
# + [markdown] pycharm={"name": "#%% md\n"}
# Next, we define the `FitnessMin` type that will represent the minimization objective, and an `Individual` type that will
# represent an instance or potential solution to this problem.
# + pycharm={"name": "#%%\n"}
creator.create('FitnessMin', base.Fitness, weights=(-1.0,))
creator.create('Individual', array.array, typecode='i', fitness=creator.FitnessMin)
# + [markdown] pycharm={"name": "#%% md\n"}
# Also we need to define common operators that will be used to generate the initial population, in this case we need an
# operator `randomOrder` that will shuffle an ordered array of cities to generate a new random individual.
# The `individualCreator` operator, is called by the `populationCreator` operator to generate each individual and fill the
# initial random population.
# + pycharm={"name": "#%%\n"}
toolbox = base.Toolbox()
# Create operator to shuffle the cities
toolbox.register('randomOrder', random.sample, range(NUMBER_CITIES), NUMBER_CITIES)
# Create initial random individual operator
toolbox.register('individualCreator', tools.initIterate, creator.Individual, toolbox.randomOrder)
# Create random population operator
toolbox.register('populationCreator', tools.initRepeat, list, toolbox.individualCreator)
# + [markdown] pycharm={"name": "#%% md\n"}
# Next we define a function that will calculate the fitness of an individual.
# + pycharm={"name": "#%%\n"}
def tspFitness(individual) -> tuple:
return tsp_distance(individual),
# + [markdown] pycharm={"name": "#%% md\n"}
# Now we define the core operators that will be used on the genetic algorithm:
# - `evaluate`: calculates the fitness of an individual.
# - `select`: chooses the individuals that will be mated to produce new offspring. In this case we use tournament
# selection of three individuals. To know how the tournament selection works, please check the following
# [link](https://en.wikipedia.org/wiki/Tournament_selection).
# - `mate`: performs the crossover (mating) on the previously selected individuals.
# - `mutate`: selects an individual for mutation. The mutation probability is calculated so at least one index is shuffled.
# + pycharm={"name": "#%%\n"}
toolbox.register('evaluate', tspFitness)
toolbox.register('select', tools.selTournament, tournsize=3)
toolbox.register('mate', tools.cxOrdered)
toolbox.register('mutate', tools.mutShuffleIndexes, indpb=1.0 / NUMBER_CITIES)
# + [markdown] pycharm={"name": "#%% md\n"}
# Now it's time to generate the initial population.
# + pycharm={"name": "#%%\n"}
population = toolbox.populationCreator(n=POPULATION_SIZE)
# + [markdown] pycharm={"name": "#%% md\n"}
# A `HallOfFame` object is used to store the best individuals of each generation, we will use it to implement elitism by
# taking the best individuals from each generation and promoting them to the next generation without applying the genetic
# operators.
# + pycharm={"name": "#%%\n"}
HALL_OF_FAME_SIZE = 10
hof = tools.HallOfFame(HALL_OF_FAME_SIZE)
# + [markdown] pycharm={"name": "#%% md\n"}
# We use a `Statistics` object to track several statistics of the individuals.
# + pycharm={"name": "#%%\n"}
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register('min', np.min)
stats.register('avg', np.mean)
# + [markdown] pycharm={"name": "#%% md\n"}
# `Logbook` object is used to track each generation population statistics.
# + pycharm={"name": "#%%\n"}
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + stats.fields
# + [markdown] pycharm={"name": "#%% md\n"}
# Evaluate the individuals with an invalid fitness (all at the beginning)
# + pycharm={"name": "#%%\n"}
invalid_individuals = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_individuals)
for ind, fit in zip(invalid_individuals, fitnesses):
ind.fitness.values = fit
# + [markdown] pycharm={"name": "#%% md\n"}
# Update the hall of fame object with the initial population.
# + pycharm={"name": "#%%\n"}
hof.update(population)
hof_size = len(hof.items)
# + [markdown] pycharm={"name": "#%% md\n"}
# Calculates statistics for the first generation.
# + pycharm={"name": "#%%\n"}
record = stats.compile(population)
logbook.record(gen=0, nevals=len(invalid_individuals), **record)
print(logbook.stream)
# + [markdown] pycharm={"name": "#%% md\n"}
# Now we are ready to start the genetic flow, iterating and applying the operators to *evolve* and find an optimum
# solution.
# + pycharm={"name": "#%%\n"}
for gen in range(1, NUM_GENERATIONS + 1):
# Select the next generation individuals
offspring = toolbox.select(population, len(population) - hof_size)
# Vary the pool of individuals
offspring = algorithms.varAnd(offspring, toolbox, P_CROSSOVER, P_MUTATION)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# add the best back to population:
offspring.extend(hof.items)
# Update the hall of fame with the generated individuals
hof.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
print(logbook.stream)
# + [markdown] pycharm={"name": "#%% md\n"}
# Now we can get the best individual from the hall of fame and plot the optimum path.
# We can see that about the 90th generation the average and minimum path are very similar, that meas that the algorithm
# has converged to an "optimal" solution. We need to take into account that the "optimal" can be a local optima, not the
# global optimum.
# + pycharm={"name": "#%%\n"}
best = hof.items[0]
print('Best Individual = ', best)
print('Best Fitness = ', best.fitness.values[0])
plt.figure(1)
# plot genetic flow statistics:
minFitnessValues, meanFitnessValues = logbook.select("min", "avg")
plt.figure(2)
sns.set_style("whitegrid")
plt.plot(minFitnessValues, color='red')
plt.plot(meanFitnessValues, color='green')
plt.xlabel('Generation')
plt.ylabel('Min / Average Fitness')
plt.title('Min and Average fitness over Generations')
# show both plots:
plt.show()
# now plot the best travelling path.
plt.scatter(*zip(*CITIES), marker='.', color='red')
locs = [CITIES[i] for i in best]
locs.append(locs[0])
plt.plot(*zip(*locs), linestyle='-', color='blue')
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Conclusions
# We can draw the following conclusions from this problem.
# - The genetic algorithm finds a local optima in about the 90th iteration, we could stop the genetic flow at that moment
# when the mean fitness is close to the minimum fitness with some arbitrary epsilon, because that means that
# the whole population has converged around a local optima, and the mutation operator will not be able to generate more
# diverse individuals that could lead to exploration in other more promising regions of the problem space. If we stop
# before, we could save processing resources that are wasted doing unnecessary iterations.
# - This algorithm is not parallelized, we could speed up the execution of the genetic flow by using
# the python multiprocessing module, threading here will not be very useful because this is a CPU-bound problem, not
# IO-bound, so the Python Global Interpreter Lock (GIL) will generate thread contention, while if we choose
# multiprocessing we can bypass the GIL and apply the genetic operators in concurrently.
# - By using a genetic algorithm we can generate an optimal solution, without investing time learning or implementing a
# custom algorithm, that maybe will find the global optima, but it will be tied to this specific problem. With genetic
# algorithms we can find a "better" solution by just defining a suitable representation of the TSP problem as an
# *individual*.
#
# ## Further Experiments
# Just for fun we could do the following experiments.
# - What happens if we create a genetic algorithms that optimizes the hyper-parameters? We "wrap" this algorithm into a
# genetic algorithm that instead optimizes the tournament selection parameter, the crossover parameter, the mutation
# parameters, and the number of iterations, so it automatically restarts the genetic flow to search for additional optima
# in other parts of the problem space. We can create arrays of different operators implementations, and then apply a
# "mutation" by selecting a new operator implementation.
# - What happens if we change some operators for some individuals? At generation 30th, we don't mate using two point crossover,
# we switch to a new crossover implementation. This could lead to a random-search.
# - We could at the end of the genetic flow, increase the mutation parameter and restart the algorithm, so we increase
# the chance of finding new local optima in other parts of the problem space.
|
ch4/tsp-genetic-algorithms.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
#matrix math
import numpy as np
#graphing
import matplotlib.pyplot as plt
#graphing animation
import matplotlib.animation as animation
import pandas as pd
# %matplotlib inline
# + deletable=true editable=true
def load_dataset(name):
return np.loadtxt(name)
# + deletable=true editable=true
#dataset = pd.read_csv('durudataset.txt',header=None)
dataset = load_dataset('durudataset.txt')
# + deletable=true editable=true
dataset
# + deletable=true editable=true
type(dataset)
# + deletable=true editable=true
def plot_data(dataset,threshold = 1):
for point in dataset:
x,y=point
plt.scatter(x,y)
'''if x > threshold:
plt.scatter(x,y,color='r',marker='o')
else:
plt.scatter(x,y,color='g',marker='o')'''
plt.grid()
plt.title("Dummy")
plt.ylabel("Vertical")
plt.xlabel("Horizontal")
# + deletable=true editable=true
plot_data(dataset)
# + deletable=true editable=true
def euclidian(a, b):
return np.linalg.norm(a-b)
# + deletable=true editable=true
a=[0.49, 0.29, 0.48, 0.5, 0.56, 0.24, 0.35]
b=[0.07, 0.40, 0.48, 0.5, 0.54, 0.35, 0.44]
euclidian(np.array(a),np.array(b))
# + deletable=true editable=true
def plot(dataset, history_centroids, belongs_to):
colors = ['r', 'g']
fig, ax = plt.subplots()
for index in range(dataset.shape[1]):
instances_close = [i for i in range(len(belongs_to)) if belongs_to[i] == index]
for instance_index in instances_close:
ax.plot(dataset[instance_index][0], dataset[instance_index][1], (colors[index] + 'o'))
history_points = []
for index, centroids in enumerate(history_centroids):
for inner, item in enumerate(centroids):
if index == 0:
history_points.append(ax.plot(item[0], item[1], 'bo')[0])
else:
history_points[inner].set_data(item[0], item[1])
print("centroids {} {}".format(index, item))
plt.show()
# + deletable=true editable=true
# %matplotlib notebook
def plot_step_by_step(dataset, history_centroids, belongs_to):
colors = ['r', 'g']
fig, ax = plt.subplots()
for index in range(dataset.shape[0]):
instances_close = [i for i in range(len(belongs_to)) if belongs_to[i] == index]
for instance_index in instances_close:
ax.plot(dataset[instance_index][0], dataset[instance_index][1], (colors[index] + 'o'))
history_points = []
for index, centroids in enumerate(history_centroids):
for inner, item in enumerate(centroids):
if index == 0:
history_points.append(ax.plot(item[0], item[1], 'bo')[0])
else:
history_points[inner].set_data(item[0], item[1])
print("centroids {} {}".format(index, item))
plt.pause(2)
# + deletable=true editable=true
def kmeans(k,dataset, epsilon=0, distance='euclidian'):
print("Working...")
history_centroids = []
if distance == 'euclidian':
dist_method = euclidian
# dataset = dataset[:, 0:dataset.shape[1] - 1]
num_instances, num_features = dataset.shape
prototypes = dataset[np.random.randint(0, num_instances - 1, size=k)]
history_centroids.append(prototypes)
prototypes_old = np.zeros(prototypes.shape)
belongs_to = np.zeros((num_instances, 1))
norm = dist_method(prototypes, prototypes_old)
iteration = 0
while norm > epsilon:
iteration += 1
norm = dist_method(prototypes, prototypes_old)
prototypes_old = prototypes
for index_instance, instance in enumerate(dataset):
dist_vec = np.zeros((k, 1))
for index_prototype, prototype in enumerate(prototypes):
dist_vec[index_prototype] = dist_method(prototype,instance)
belongs_to[index_instance, 0] = np.argmin(dist_vec)
tmp_prototypes = np.zeros((k, num_features))
for index in range(len(prototypes)):
instances_close = [i for i in range(len(belongs_to)) if belongs_to[i] == index]
prototype = np.mean(dataset[instances_close], axis=0)
# prototype = dataset[np.random.randint(0, num_instances, size=1)[0]]
tmp_prototypes[index, :] = prototype
prototypes = tmp_prototypes
history_centroids.append(tmp_prototypes)
# plot(dataset, history_centroids, belongs_to)
return prototypes, history_centroids, belongs_to
# + deletable=true editable=true
def predict(centroids,vector_test):
tmp_dist = 9999
dist_vec = np.zeros((2,1))
for index_centroid,centroid in enumerate(centroids):
if euclidian(centroid,vector_test) <= tmp_dist:
tmp_dist = euclidian(centroid,vector_test)
closest_centroid = centroid
return np.argmin(np.sum((centroids - vector_test)**2, axis=1)),closest_centroid
# + deletable=true editable=true
def main():
dataset = load_dataset('durudataset.txt')
centroids, history_centroids, belongs_to = kmeans(2,dataset)
print("=======================================================")
print("Centroids {}".format(centroids))
plot(dataset, history_centroids, belongs_to)
return centroids, history_centroids, belongs_to
'''
vector_test = np.array([0.312,0.21])
#plot(dataset, history_centroids, belongs_to)
tests_class_A = np.array([((np.random.rand(),np.random.rand())) for _ in range(5)])
tests_class_B = np.array([((np.random.rand() + 1,np.random.rand() + 1)) for _ in range(5)])
num_instances,num_features = tests_class_A.shape
A = tests_class_A[np.random.randint(0, num_instances - 1, size=1)]
B = tests_class_B[np.random.randint(0, num_instances - 1, size=1)]
print(A,B)
print("=======================================================")
class_centroid,closest_centroid = predict(centroids,vector_test)
print("\nPoint({}):\nC({}) - Class({})".format(vector_test,closest_centroid,class_centroid))
print("=======================================================")
class_centroid,closest_centroid = predict(centroids,A)
print("\nPoint({}):\nC({}) - Class({})".format(A,closest_centroid,class_centroid))
print("=======================================================")
class_centroid,closest_centroid = predict(centroids,B)
print("\nPoint({}):\nC({}) - Class({})".format(B,closest_centroid,class_centroid))
'''
# + deletable=true editable=true
# %matplotlib notebook
centroids, history_centroids, belongs_to=main()
# -
|
k-means.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Table of Contents
#
# * [Initial Configurations](#IC)
# * [Import Libraries](#IL)
# * [Autheticate the AML Workspace](#AML)
# * [Get Data (Bronze)](#GD)
# * [Setup Directory Structure](#SD)
# * [Check the uploaded files](#UF)
# * [Read the movies data](#MD)
# * [Features](#F)
# * [Data Wrangling](#DW)
# * [Step1. Remove unwanted features](#S1)
# * [Step2. Remove features which has too many missing values or is insignificant](#S2)
# * [Step3. Remove rows based on value of certain features](#S3)
# * [Step4. Fix column types](#S4)
# * [Step5. Handle null values](#S5)
# * [Step6. Remove duplicates](#S6)
# * [Step7. Fix casing](#S7)
# * [Step8. Text data clean (NLP): Clean up the overview attribute and add another column 'overview_cleaned'](#S8)
# * [Step9. Identify outliers and bad data](#S9)
# * [Put Data (Silver)](#PS)
# * [Write the transformed dataframe to the silver zone](#WS)
# * [Read silver zone movies file](#RS)
# -
# ### Initial Configurations <a class="anchor" id="IC"></a>
# #### Import Libraries <a class="anchor" id="IL"></a>
# + gather={"logged": 1619300237654} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Import required Libraries
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.image import imread
import cv2
# %matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import azureml.core
import azureml.automl
#from azureml.core.experiment import Experiment
from azureml.core import Workspace, Dataset, Datastore
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Autheticate the AML Workspace <a class="anchor" id="AML"></a>
# + gather={"logged": 1619300246250} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Autheticate the AML Workspace
workspace = Workspace.from_config()
output = {}
output['Subscription ID'] = workspace.subscription_id
output['Workspace Name'] = workspace.name
output['Resource Group'] = workspace.resource_group
output['Location'] = workspace.location
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Get Data (Bronze) <a class="anchor" id="GD"></a>
# - Raw data extraction for the file, API based and web datasets. Let us call this __Bronze Layer__.
# - Data transformation using python from Raw to Processed stage. We will call this __Silver Layer__.
# - Finally store the processed data using standard taxonomy in a SQL based serving layer. We will call this __Gold Layer__.
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Setup Directory Structure <a class="anchor" id="SD"></a>
# + gather={"logged": 1619300246424} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
data_folder = os.path.join(os.getcwd(), 'data')
#'/mnt/batch/tasks/shared/LS_root/mounts/clusters/compute-cpu-ds12-v2/code/Users/rabiswas/Movies_Data_Consortium/data'
#Create the data directory
os.makedirs(data_folder, exist_ok=True)
#Create the bronze, silver and gold folders
bronze_data_folder = data_folder +"/bronze"
os.makedirs(bronze_data_folder, exist_ok=True)
silver_data_folder = data_folder +"/silver"
os.makedirs(silver_data_folder, exist_ok=True)
gold_data_folder = data_folder +"/gold"
os.makedirs(gold_data_folder, exist_ok=True)
#Create sub folder
#'/mnt/batch/tasks/shared/LS_root/mounts/clusters/compute-cpu-ds12-v2/code/Users/rabiswas/Movies_Data_Consortium/data/bronze/flat_file'
file_data_bronze = bronze_data_folder +"/flat_file"
os.makedirs(file_data_bronze, exist_ok=True)
#Manually upload the folders from Kaggle
# + gather={"logged": 1619300246674} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
file_data_bronze
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Check the uploaded files <a class="anchor" id="UF"></a>
# + gather={"logged": 1619300246887} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#List the folder structure and the files
for root, directories, files in os.walk(file_data_bronze, topdown=True):
for name in directories:
print(os.path.join(root, name))
for name in files:
print(os.path.join(root, name))
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Read the movies data <a class="anchor" id="MD"></a>
# + gather={"logged": 1619300247289} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Read the movies_metadata csv
# The main Movies Metadata file. Contains information on 45,000 movies featured in
# the Full MovieLens dataset. Features include posters, backdrops, budget, revenue, release dates,
# languages, production countries and companies. Some of the columns of this file are nested JSON objects.
movies_metadata_file = '/mnt/batch/tasks/shared/LS_root/mounts/clusters/compute-cpu-ds12-v2/code/Users/rabiswas/Movies_Data_Consortium/data/bronze/flat_file/movies_metadata.csv'
df_movies_metadata_bronze = pd.read_csv(movies_metadata_file)
df_movies_metadata_bronze.head()
# + gather={"logged": 1619300247496} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Number of rows and columns
df_movies_metadata_bronze.shape
# + gather={"logged": 1619300247705} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#It is easier to view the data if we transpose
df_movies_metadata_bronze.head(3).transpose()
# + gather={"logged": 1619300247922} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#List of columns
df_movies_metadata_bronze.columns
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Features <a class="anchor" id="F"></a>
# - __adult:__ Indicates if the movie is X-Rated or Adult.
# - __belongs_to_collection:__ A stringified dictionary that gives information on the movie series the particular film belongs to.
# - __budget:__ The budget of the movie in dollars.
# - __genres:__ A stringified list of dictionaries that list out all the genres associated with the movie.
# - __homepage:__ The Official Homepage of the move.
# - __id:__ The ID of the move.
# - __imdb_id:__ The IMDB ID of the movie.
# - __original_language:__ The language in which the movie was originally shot in.
# - __original_title:__ The original title of the movie.
# - __overview:__ A brief blurb of the movie.
# - __popularity:__ The Popularity Score assigned by TMDB.
# - __poster_path:__ The URL of the poster image.
# - __production_companies:__ A stringified list of production companies involved with the making of the movie.
# - __production_countries:__ A stringified list of countries where the movie was shot/produced in.
# - __release_date:__ Theatrical Release Date of the movie.
# - __revenue:__ The total revenue of the movie in dollars.
# - __runtime:__ The runtime of the movie in minutes.
# - __spoken_languages:__ A stringified list of spoken languages in the film.
# - __status:__ The status of the movie (Released, To Be Released, Announced, etc.)
# tagline:__ The tagline of the movie.
# - __title:__ The Official Title of the movie.
# - __video:__ Indicates if there is a video present of the movie with TMDB.
# - __vote_average:__ The average rating of the movie.
# - __vote_count:__ The number of votes by users, as counted by TMDB.
# + gather={"logged": 1619300248113} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.info()
# + [markdown] nteract={"transient": {"deleting": false}}
# As we can see there are a total of 45,466 movies with 24 features and most of the features have very few NaN values.
# Next we will start cleaning up the data.
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Data Wrangling <a class="anchor" id="DW"></a>
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step1. Remove unwanted features <a class="anchor" id="S1"></a>
#
# - __imdb_id__ Since we only need the tmdb id to call the api
# - __original_title__ Since we have the offical title of the movie in the column title
# - __belongs_to_collection__ We do not have use for this in the context of this analysis
# - __poster_path__ We do not have use for this in the context of this analysis
# - __video__ We do not have use for this in the context of this analysis
# - __homepage__ We do not have use for this in the context of this analysis
# - __tagline__ We do not have use for this in the context of this analysis
# + gather={"logged": 1619300248269} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Removed the unwanted features
df_movies_metadata_bronze.drop(['imdb_id', 'original_title' ,'belongs_to_collection', 'poster_path' ,'video', 'homepage' ,'tagline'], axis=1, inplace=True)
# + gather={"logged": 1619300248447} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#List of columns
df_movies_metadata_bronze.columns
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step2. Remove features which has too many missing values or values of one type for it to contribute anything meaningful <a class="anchor" id="S2"></a>
# + gather={"logged": 1619300248677} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#lets find the unique values in a column to get a starting point
# We can safely ignore the columns whihc has lot of unique values
df_movies_metadata_bronze.nunique(axis=0)
# + [markdown] nteract={"transient": {"deleting": false}}
# So we got the following columns to inspect
# - adult
# - status
# - vote_average
# - original_language
# + gather={"logged": 1619300249056} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze['adult'].value_counts()
# + gather={"logged": 1619300249269} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze['status'].value_counts()
# + gather={"logged": 1619300249476} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze['vote_average'].value_counts()
# + gather={"logged": 1619300249691} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze['original_language'].value_counts()
# + [markdown] nteract={"transient": {"deleting": false}}
# - Since we have just 9 adult movies in the dataset, we can safely drop them from the analysis
# + gather={"logged": 1619300249935} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Removed the insignificant features
df_movies_metadata_bronze.drop(['adult'], axis=1, inplace=True)
# + gather={"logged": 1619300250121} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.columns
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step3. Remove rows based on value of certain features <a class="anchor" id="S3"></a>
# + gather={"logged": 1619300250306} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze['status'].value_counts()
# + [markdown] nteract={"transient": {"deleting": false}}
# For our analysis we can safely remove the movies in the following status
# - Rumored 230
# - Planned 15
# - Canceled 2
# + gather={"logged": 1619300250483} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Get the omit values in a list and then filter
omit_values_list = ['Rumored','Planned','Canceled']
df_movies_metadata_bronze = df_movies_metadata_bronze[~df_movies_metadata_bronze.status.isin(omit_values_list)]
# + gather={"logged": 1619300250665} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze['status'].value_counts()
# + gather={"logged": 1619300250842} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#df_movies_metadata_bronze['popularity'].value_counts()
##?? Can we remove the movies which are not popular?
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step4. Fix column types <a class="anchor" id="S4"></a>
# + gather={"logged": 1619300251014} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.dtypes
# + gather={"logged": 1619300251194} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#https://pandas.pydata.org/docs/reference/api/pandas.to_numeric.html?highlight=to_numeric#pandas.to_numeric
#If errors='coerce', then invalid parsing will be set as NaN.
print (df_movies_metadata_bronze[pd.to_numeric(df_movies_metadata_bronze['id'], errors='coerce').isnull()])
# + [markdown] nteract={"transient": {"deleting": false}}
# We have just 3 rows where the id column is not numeric. So we will drop it and convert the rest to numeric.
# + gather={"logged": 1619300251366} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#downcast{‘integer’, ‘signed’, ‘unsigned’, ‘float’}, default None
#If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules:
#‘integer’ or ‘signed’: smallest signed int dtype (min.: np.int8)
#‘unsigned’: smallest unsigned int dtype (min.: np.uint8)
#‘float’: smallest float dtype (min.: np.float32)
df_movies_metadata_bronze["id"] =pd.to_numeric(df_movies_metadata_bronze['id'], errors='coerce',downcast="integer")
df_movies_metadata_bronze.dropna(subset=["id"],inplace=True)
# + gather={"logged": 1619300251525} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
print (df_movies_metadata_bronze[pd.to_numeric(df_movies_metadata_bronze['id'], errors='coerce').isnull()])
# + gather={"logged": 1619300251693} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#We have just 3 rows where the popularity column is not numeric. So we will drop it and convert the rest to numeric.
print (df_movies_metadata_bronze[pd.to_numeric(df_movies_metadata_bronze['popularity'], errors='coerce').isnull()])
# + gather={"logged": 1619300251854} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze["popularity"] =pd.to_numeric(df_movies_metadata_bronze['popularity'], errors='coerce',downcast="integer")
df_movies_metadata_bronze.dropna(subset=["popularity"],inplace=True)
# + gather={"logged": 1619300252012} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
print (df_movies_metadata_bronze[pd.to_numeric(df_movies_metadata_bronze['popularity'], errors='coerce').isnull()])
# + gather={"logged": 1619300252178} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Budget column should be numeric and there is no rows that are in opposition
print (df_movies_metadata_bronze[pd.to_numeric(df_movies_metadata_bronze['budget'], errors='coerce').isnull()])
# + gather={"logged": 1619300252347} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze["budget"] =pd.to_numeric(df_movies_metadata_bronze['budget'], errors='coerce',downcast="integer")
df_movies_metadata_bronze.dropna(subset=["budget"],inplace=True)
# + gather={"logged": 1619300327392} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze['original_language'] = pd.Series(df_movies_metadata_bronze['original_language'], dtype="string")
# + gather={"logged": 1619300364871} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.dtypes
# + [markdown] nteract={"transient": {"deleting": false}}
# While we are tried to do our best, the latest release of Pandas has a function called convert_dtypes which Convert columns to best possible dtypes using dtypes supporting pd.NA.
# Ref: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.convert_dtypes.html
# + gather={"logged": 1619300559054} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#pip install --upgrade pandas
pd. __version__
# + gather={"logged": 1619300524437} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze = df_movies_metadata_bronze.convert_dtypes()
# + gather={"logged": 1619300527367} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.dtypes
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step5. Handle null values <a class="anchor" id="S5"></a>
# + gather={"logged": 1619300894405} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.isnull().sum()
# + [markdown] nteract={"transient": {"deleting": false}}
# original_language, release_date and status (released or not) is important. So we will remove those rows from our datset.
# Obviosuly we will quickly inspected it row by row but since they are so low in number we will eventually remove it unless something is striking.
# + gather={"logged": 1619301806449} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze[df_movies_metadata_bronze.original_language.isnull()]
# + gather={"logged": 1619302157705} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#We will not be removing any popular movie having original_language as null
df_movies_metadata_bronze[df_movies_metadata_bronze.original_language.isnull() & df_movies_metadata_bronze.popularity > 1]
# + gather={"logged": 1619302152427} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#We will not be removing any popular movie having status as null
df_movies_metadata_bronze[df_movies_metadata_bronze.status.isnull() & df_movies_metadata_bronze.popularity > 1]
# + gather={"logged": 1619302145947} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#We will not be removing any popular movie having release_date as null
df_movies_metadata_bronze[df_movies_metadata_bronze.release_date.isnull() & df_movies_metadata_bronze.popularity > 1]
# + gather={"logged": 1619366524090} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.dropna(subset=["original_language","status","release_date","runtime"],inplace=True)
# + gather={"logged": 1619365432441} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Lets fill the NAs with an empty string '' for overview attribute
df_movies_metadata_bronze[['overview']] = df_movies_metadata_bronze[['overview']].fillna(value='')
# + gather={"logged": 1619366487285} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#We will not be removing any popular movie having runtime as null
df_movies_metadata_bronze[df_movies_metadata_bronze.runtime.isnull() & df_movies_metadata_bronze.popularity > 1]
# + gather={"logged": 1619366527124} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Checking and there are no null values in the dataset
df_movies_metadata_bronze.isnull().sum()
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step6. Remove duplicates <a class="anchor" id="S6"></a>
# + gather={"logged": 1619304843904} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Duplicate rows
df_movies_metadata_bronze[df_movies_metadata_bronze.duplicated()].head()
# + gather={"logged": 1619304823905} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze[df_movies_metadata_bronze.id==105045]
# + [markdown] nteract={"transient": {"deleting": false}}
# As a matter of fact we found a few duplicate rows.
# + gather={"logged": 1619304903894} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.shape
# + gather={"logged": 1619304979356} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.drop_duplicates(inplace=True)
# + gather={"logged": 1619304984052} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.shape
# + gather={"logged": 1619305146277} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#To become certain I would like to check the duplicate again but on id field since it acts as a primary key
df_movies_metadata_bronze[df_movies_metadata_bronze.duplicated(subset=['id'])].head()
# + gather={"logged": 1619305152900} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze[df_movies_metadata_bronze.duplicated(subset=['id'])].count()
# + gather={"logged": 1619305177686} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze[df_movies_metadata_bronze.id==14788]
# + [markdown] nteract={"transient": {"deleting": false}}
# Indeed we have duplicates on movie id column. We will remove them as well.
# + gather={"logged": 1619305301291} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.shape
# + gather={"logged": 1619305303683} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Remove 13 movie id duplicates from the dataset
df_movies_metadata_bronze.drop_duplicates(subset=['id'],inplace=True)
# + gather={"logged": 1619305311613} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.shape
# + [markdown] nteract={"transient": {"deleting": false}}
# Interestingly different movies can have same title for example the remakes.. So we will have to keep them.
# + gather={"logged": 1619305484261} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze[df_movies_metadata_bronze.duplicated(subset=['title'])].count()
# + gather={"logged": 1619305711540} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze[df_movies_metadata_bronze.duplicated(subset=['title']) & df_movies_metadata_bronze.popularity > .05].head()
# + gather={"logged": 1619305769578} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze[df_movies_metadata_bronze.title == 'Cape Fear']
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step7. Fix casing <a class="anchor" id="S7"></a>
# + gather={"logged": 1619364458587} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Initcap each word in the title
#test
df_movies_metadata_bronze.title.str.title().head(3)
# + gather={"logged": 1619364559538} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Initcap each word in the title
#run
df_movies_metadata_bronze['title'] = df_movies_metadata_bronze.title.str.title()
# + gather={"logged": 1619364590766} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#It is easier to view the data if we transpose
df_movies_metadata_bronze.head(3).transpose()
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step8. Text data clean (NLP): Clean up the overview attribute and add another column 'overview_cleaned' <a class="anchor" id="S8"></a>
# + gather={"logged": 1619364806290} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Build punctuation dictionary
import unicodedata
import sys
# Create a dictionary of punctuation characters
punctuation = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
# Add the backtick/ Grave accent character
punctuation.update({96:None})
# + gather={"logged": 1619365112897} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Let us clean up and make the data ready
# As we are going to use words as features so we can use some text formatting techniques which will help us in feature extraction
# including removing punctuation marks/digits ,and also stop-words. In addition, the implementation of lemmatization words using NLTK
# Tokenization is the last step to break reviews up into words and other meaningful tokens.
import re
import string
#pip install nltk
import nltk
#nltk.download('stopwords')
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
ENGLISH_STOP_WORDS = stopwords.words('english')
def function_clean_stop(text):
#convert into lowercase
text = text.lower()
#removing the URL Http
text = re.sub(r"http\S+", "", text)
# Removal of mentions
#text = re.sub("@[^\s]*", "", text) -> Taken care if in the punctuations. But I want to keep the word.
# Removal of hashtags
#text = re.sub("#[^\s]*", "", text) -> Taken care if in the punctuations. But I want to keep the word.
# Removal of numbers
text = re.sub('[0-9]*[+-:]*[0-9]+', '', text)
text = re.sub("'s", "", text)
#remove all punctuation from the text.
text = str(text.translate(punctuation))
listofwords = text.strip().split() # to remove any space from beginning and the end of text
tokenized_words = []
for word in listofwords:
if not word in ENGLISH_STOP_WORDS:
lemm_word = WordNetLemmatizer().lemmatize(word)
if len(lemm_word)>0:
tokenized_words.append(lemm_word)
return_str = ' '.join([str(elem) for elem in tokenized_words])
return(return_str)
# + gather={"logged": 1619365476284} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# applying the cleaning function to text column
df_movies_metadata_bronze['overview_cleaned'] = df_movies_metadata_bronze['overview'].apply(lambda overview: function_clean_stop(overview))
# + gather={"logged": 1619365518686} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze[['overview','overview_cleaned']].head()
# + gather={"logged": 1619365564896} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.dtypes
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Step9. Identify outliers and bad data <a class="anchor" id="S9"></a>
# + gather={"logged": 1619366251299} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Get rid of the outliers in vote_average column
plt.boxplot(df_movies_metadata_bronze.vote_average, notch=True)
# + gather={"logged": 1619366711011} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#There are no outliers on the vote_average column
df_movies_metadata_bronze[(df_movies_metadata_bronze['vote_average'] < 0) | (df_movies_metadata_bronze['vote_average'] > 10)]
# + gather={"logged": 1619366570531} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Get rid of the outliers in runtime column
plt.boxplot(df_movies_metadata_bronze.runtime, notch=True)
# + [markdown] nteract={"transient": {"deleting": false}}
# Looking at the data I will be getting rid of the movies with runtime > 900 minutes(>15 hours) and lesser than 0
# + gather={"logged": 1619366719400} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#Finding the outliers on the runtime column
df_movies_metadata_bronze[(df_movies_metadata_bronze['runtime'] < 0) | (df_movies_metadata_bronze['runtime'] > 900)]
# + gather={"logged": 1619366824033} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
#We will not be removing any popular movie having runtime as outlier
df_movies_metadata_bronze[((df_movies_metadata_bronze['runtime'] < 0) | (df_movies_metadata_bronze['runtime'] > 900)) & df_movies_metadata_bronze.popularity > 1]
# + gather={"logged": 1619367258705} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# get names of indexes for which
# df_movies_metadata_bronze['runtime'] < 0 or df_movies_metadata_bronze['runtime'] > 900
index_names = df_movies_metadata_bronze[(df_movies_metadata_bronze['runtime'] < 0) | (df_movies_metadata_bronze['runtime'] > 900)].index
# + gather={"logged": 1619367294542} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.shape
# + gather={"logged": 1619367298295} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
# drop these row indexes
# from dataFrame
df_movies_metadata_bronze.drop(index_names, inplace = True)
# + gather={"logged": 1619367304579} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_bronze.shape
# + [markdown] nteract={"transient": {"deleting": false}}
# ### Put Data (Silver) <a class="anchor" id="PS"></a>
# - Raw data extraction for the file, API based and web datasets. Let us call this __Bronze Layer__.
# - Data transformation using python from Raw to Processed stage. We will call this __Silver Layer__.
# - Finally store the processed data using standard taxonomy in a SQL based serving layer. We will call this __Gold Layer__.
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Write the transformed dataframe to the silver zone <a class="anchor" id="WS"></a>
# + gather={"logged": 1619367748435} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
silver_file_name = silver_data_folder + "movies_metadata.csv"
df_movies_metadata_bronze.to_csv(silver_file_name, index=False)
# + [markdown] nteract={"transient": {"deleting": false}}
# #### Read silver zone movies file <a class="anchor" id="RS"></a>
# + gather={"logged": 1619367871758} jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}}
df_movies_metadata_silver = pd.read_csv(silver_file_name)
df_movies_metadata_silver.head(3).transpose()
|
notebook/Transform_Flat_File.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sergioarnold87/Marketing-Inteligencia_Artificial/blob/main/Esqueleto_Departamento_de_Ventas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="M27qF7CTrBqc"
# # TAREA #1: ENTENDER EL ENUNCIADO DEL PROBLEMA Y EL CASO PRÁCTICO
# + [markdown] id="Z2ZMlH-gtOxf"
#
# <table>
# <tr><td>
# <img src="https://drive.google.com/uc?id=1l7bHyrjzq839zVZE06cfdDksLabCN2hg"
# alt="Fashion MNIST sprite" width="1000">
# </td></tr>
# <tr><td align="center">
# <b>Figura 1. Prediccion de Ventas Futuras usando Series Temporales
# </td></tr>
# </table>
#
# + [markdown] id="3O2y6H5Sk9dS"
# 
# + [markdown] id="653W5Abck9mf"
# 
# + [markdown] id="TkKF3voY7_r5"
# 
# + [markdown] id="J2xNWhFz7_20"
# 
# + [markdown] id="-5jwMD3w8Aa2"
# 
# + [markdown] id="zKmFmyaGunc7"
# #2: IMPORTAR LAS LIBRERÍAS Y LOS DATASETS
# + id="S0Cx3743urFY"
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import datetime
# + id="5P_ciLXawMZg"
# Necesitarás montar tu disco usando los siguientes comandos:
# Para obtener más información sobre el montaje, consulta esto: https://stackoverflow.com/questions/46986398/import-data-into-google-colaboratory
# + [markdown] id="IRU42d3vaYzm"
# TAREA #2.1: IMPORTAR EL DATASET DE VENTAS
# + id="tjIiJdM4u1IE"
# Debes incluir el enlace completo al archivo csv que contiene tu conjunto de datos
# + id="E7puy9-xxS4A"
sales_train_df.head(5)
# Casi un millón de observaciones
# 1115 tiendas únicas
# Notemos que las ventas es la variable objetivo (la que intentamos predecir)
# Id: ID de transacción (combinación de la tienda y la fecha)
# Store: identificador único de la tienda
# Sales: ventas diarias, esta es la variable objetivo
# Customers: número de clientes de un día dado
# Open: Booleano para indicar si la tienda estaba abierta o cerrada (0 = cerrada, 1 = abierta)
# Promo: describe si la tienda tenía algún tipo de promoción ese día o no
# StateHoliday: indica si el día era festivo o no (a = vacaciones públicas, b = vacaciones de Pascua holiday, c = Navidades, 0 = No era festivo)
# SchoolHoliday: indica si (Store, Date) se ve afectado por el cierre de las escuelas públicas
# Fuente original de los datos: https://www.kaggle.com/c/rossmann-store-sales/data
# + id="RUt2ON_UxyYk"
# + id="hMq3-KWOx0e1"
# 9 columnas en total
# 8 características, cada una con 1017209 puntos de datos
# 1 variable objetivo (ventas)
# + id="s0E9xPLdx2Ok"
# Cantidad de ventas promedio por día = 5773 Euros, ventas mínimas por día = 0, ventas máximas por día = 41551
# Número medio de clientes = 633, número mínimo de clientes = 0, número máximo de clientes = 7388
# + [markdown] id="eJ8D_qIRalvY"
# TAREA #2.2: IMPORTAR LA INFORMACIÓN SOBRE LAS TIENDAS
#
#
# + id="TzfwLhLUazX1"
# StoreType: categoría que indica el tipo de tienda (a, b, c, d)
# Assortment: a = básico, b = extra, c = extedido
# CompetitionDistance (en metros): distancia a la tienda de la competencia más cercana
# CompetitionOpenSince [Mes/Año]: fecha en que abrió la competencia
# Promo2: Promo2 es una promoción continuada y consecutiva en algunas tiendas (0 = la tienda no participa, 1 = la tienda participa)
# Promo2Since [Año/Semana]: fecha en la que la tienda empieza a participar en la Promo2
# PromoInterval: describe los intervalos consecutivos donde la Promo2 empieza, indicando los meses en los que empieza la misma. P.e. "Feb,May,Aug,Nov" significa que cada nueva ronda de promoción empieza en Febrero, Mayo, Agosto, Noviembre de cualquier año de esa tienda
# + id="92SaSNJvkxqb"
# + id="A45D-v_0qJF0"
# Hagamos lo mismo con los datos store_info_df
# Hay que tener en cuenta que el data frame anterior incluye las transacciones registradas por día (en millones)
# Este data frame solo incluye información sobre las 1115 tiendas exclusivas que forman parte de este estudio
# + id="ey2G1N1FqJRR"
# De media, la distancia de la competencia es de 5404 metros (5,4 kms)
# + id="icDs6kqoqvWL"
# + [markdown] id="LlszUhNNyrl_"
# # TAREA #3: EXPLORAR EL DATASET
# + [markdown] id="DDuw6cEkZehx"
# TAREA #3.1: EXPLORAR EL DATASET DE VENTAS
# + id="wThGn_F4k60c"
# Veamos si nos faltan datos, ¡esperemos que no sea así!
# + id="t3BY8FxelGIP"
# Promedio de 600 clientes por día, el máximo es 4500 (¡tenga en cuenta que no podemos ver el valor atípico en 7388!)
# Los datos se distribuyen por igual en varios días de la semana (~ 150000 observaciones x 7 días = ~ 1,1 millones de observaciones)
# Las tiendas están abiertas ~ 80% del tiempo
# Los datos se distribuyen por igual entre todas las tiendas (sin sesgo)
# La promoción # 1 se ejecutó aproximadamente el 40% del tiempo
# Ventas promedio alrededor de 5000-6000 Euros
# Las vacaciones escolares duran alrededor del 18% del tiempo
# + id="kP8opr7YzJ_d"
# + id="-5qvu70o5oAI"
# ¡Veamos cuántas tiendas están abiertas y cerradas!
# + id="uBDltyUy5o1L"
# Contemos el número de tiendas que están abiertas y cerradas
# + id="BfDQRuNj4_b8"
# nos quedamos solo con las tiendas abiertas y eliminamos las tiendas cerradas
# + id="GsBQng4uok30"
# + id="GvRoygK4pf7M"
# Eliminemos la columna open ya que ahora no tiene sentido
# + id="jI5r7ZM5pwM6"
# + id="B5SXd3txxv7V"
# Ventas promedio = 6955 Euros, número promedio de clientes = 762 (ha subido)
# + [markdown] id="ZahU74KlZtlW"
# TAREA #3.2: EXPLORAR LOS DATOS DE LA INFORMACIÓN DE LAS TIENDAS
# + id="1BJlRJj0rCOg"
# ¡Veamos si falta algún dato en el data frame de información de la tienda!
# + id="auxnWnW4dib5"
# Echemos un vistazo a los valores faltantes en la 'CompetitionDistance'
# Solo faltan 3 filas
# + id="D6DUDWUFe3aw"
# Echemos un vistazo a los valores faltantes en el 'CompetitionOpenSinceMonth'
# Faltan muchas filas = 354 (casi un tercio de las 1115 tiendas)
# + id="kf56pKPM2hfm"
# + id="yr1YMhZjgJ0k"
# Parece que si 'promo2' es cero, 'promo2SinceWeek', 'Promo2SinceYear' y la información de 'PromoInterval' se establece en cero
# Hay 354 filas donde 'CompetitionOpenSinceYear' y 'CompetitionOpenSinceMonth' falta
# Establezcamos estos valores en ceros
# + id="s-haJvqUhTYe"
# + id="wJm_hfpNkFJd"
# Hay 3 filas con valores de 'CompetitionDistance' que faltan, llenémoslas con valores promedio de la columna 'CompetitionDistance'
# + id="Mu2dJhtsmDEG"
# + id="jAlKp0rTrCdW"
# la mitad de las tiendas participan en la promoción 2
# la mitad de las tiendas tienen su competencia a una distancia de 0-3000 m (3 kms de distancia)
# + [markdown] id="i6sJonGln2iC"
# TAREA #3.3: EXPLOREMOS EL DATASET COMBINADO
# + id="Gkuk3GM2ucpc"
# Combinemos ambos data frames en función de 'store'
# + id="fyhB3BGku-Ny"
# + id="c6UiAqTkKD85"
# los clientes y la promoción se correlacionan positivamente con las ventas
# Promo2 no parece ser efectivo en absoluto
# + id="zBy4m89a08Sj"
# Los clientes / Promo2 y las ventas están fuertemente correlacionados
# + id="hKVqck-sqn-k"
# Separemos el año y pongámoslo en una columna separada
# + id="YH1shh1grvs4"
# + id="nybqRzqAr67_"
# Hagamos lo mismo para el día y el mes
# + id="IdRdqBWhsEkj"
# + id="obSN0pN3sP_L"
# Echemos un vistazo a las ventas promedio y la cantidad de clientes por mes.
# 'groupby' funciona muy bien al agrupar todos los datos que comparten la misma columna del mes, luego obtener la media de la columna de ventas
# Parece que las ventas y el número de clientes alcanzan su punto máximo en el período de Navidad
# + id="3RoxwwX9zydn"
# Echemos un vistazo a las ventas y a los clientes por día del mes.
# El número mínimo de clientes suele rondar el día 24 del mes.
# La mayoría de los clientes y las ventas son alrededor del 30 y el 1 del mes
# + id="RdNPmxos18wF"
# Hagamos lo mismo para el día de la semana (notemos que 7 = domingo)
# + id="5OHUts90uLkp"
# + id="kxG6ejjM4QIu"
# + id="vG1MKNlHUeQW"
# + [markdown] id="53qDZFRn3-S1"
# # TAREA # 4: ENTENDER LA INTUICIÓN DETRÁS DE FACEBOOK PROPHET
# + [markdown] id="aabh8NCIlzGJ"
# 
# + [markdown] id="OEaG3L1Rl4oi"
# 
# + [markdown] id="rW-8HbAdmAwA"
# 
# + [markdown] id="rD7llllklpEG"
# # TAREA # 5: ENTRENAR AL MODELO PARTE A
# + id="pCw6Tq60DCRI"
# import prophet
# + id="WFii_1-b_xGf"
# + id="k2Zv3i-mBbC_"
# + [markdown] id="XgNgJ_VvIG1w"
# # TAREA # 6: ENTRENAR AL MODELO PARTE B
# + [markdown] id="KIPJxfxZJI6S"
# - StateHoliday: indica si el día era festivo o no (a = vacaciones públicas, b = vacaciones de Pascua holiday, c = Navidades, 0 = No era festivo)
# - SchoolHoliday: indica si (Store, Date) se ve afectado por el cierre de las escuelas públicas
#
#
#
#
#
#
# + id="E2NgKTupBa9S"
# + id="K2JIwssFIkEL"
# Obtener todas las fechas relacionadas con las vacaciones escolares
# + id="D8GT7wzdKQUE"
# + id="QEkc-IKLKQ6g"
# Obtener todas las fechas correspondientes a los festivos estatales
# + id="YPYEYvU0MC7y"
# + id="WJVGG2xJMf3S"
# + id="ap59cCGLMrRm"
# + id="wGXuPQJLM33f"
# + id="uJ1nRbgaKQ9U"
# Concatenamos las vacaciones escolares y los festivos estatales
# + id="oCZbAdZ9NYIt"
# + id="20StJuEQNfDx"
# Hagamos predicciones usando días festivos para una tienda específica
# + [markdown] id="0qi5JcBAQN0K"
# # BUEN TRABAJO!!!
|
Esqueleto_Departamento_de_Ventas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# # Python - Looping Over Dataframe
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Python/Python_Looping_Over_Dataframe.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a>
# + [markdown] papermill={} tags=[]
# **Tags:** #pandas #python #loops #dataframes #forloop #loop
# + [markdown] papermill={} tags=[]
# This notebook will help you in looping over your tables and getting concise information
# + [markdown] papermill={} tags=[]
# **Author:** [<NAME>](https://www.linkedin.com/in/oludolapo-oketunji/)
# + [markdown] papermill={} tags=[]
# ## Input
# + [markdown] papermill={} tags=[]
# ### Import Library
# + papermill={} tags=[]
import pandas as pd
import numpy as np
# + [markdown] papermill={} tags=[]
# ## Model
# Loopring over dataframes can be a a lifesaver when there are lots of columns and we want to view our data at a go. This is an advntage of for loop in a dataframe.
# + [markdown] papermill={} tags=[]
# ### Create Sample Dataframe
# + papermill={} tags=[]
dict1 = {
"student_id": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"student_name": ["Peter", "Dolly", "Maggie", "David", "Isabelle", "Harry", "Akin", "Abbey", "Victoria", "Sam"],
"student_course": np.random.choice(["Biology", "Physics", "Chemistry"], size=10)
}
# + papermill={} tags=[]
data = pd.DataFrame(dict1)
# + papermill={} tags=[]
data
# + [markdown] papermill={} tags=[]
# ## Output
# + [markdown] papermill={} tags=[]
# ### Looping over the data to get the column name
# + papermill={} tags=[]
for column in data:
print(column)
# + [markdown] papermill={} tags=[]
# ### Looping over the data to view the columns and their values sequentially
# + papermill={} tags=[]
for k, v in data.iteritems():
print(k)
print(v)
# + [markdown] papermill={} tags=[]
# ### Looping over dataframes to get the informtion about a row with respect to its columns
# + papermill={} tags=[]
for k, v in data.iterrows():
print(k)
print(v)
# + [markdown] papermill={} tags=[]
# ### Looping over datafrmes to view the data per row as a tuple with the column values
# + papermill={} tags=[]
for row in data.itertuples():
print(row)
|
Python/Python_Looping_Over_Dataframe.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
X, y = load_iris(return_X_y=True, as_frame=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
X[:3]
# -
y[:3]
# +
from hypergbm.search_space import search_space_general
from hypergbm import HyperGBM, CompeteExperiment
from hypernets.searchers.evolution_searcher import EvolutionSearcher
from sklearn.metrics import get_scorer
rs = EvolutionSearcher(search_space_general, 200, 100, optimize_direction='max')
hk = HyperGBM(rs, task='multiclass', reward_metric='accuracy', callbacks=[])
experiment = CompeteExperiment(hk, X_train, y_train, X_test=X_test, callbacks=[], scorer=get_scorer('accuracy'),
pseudo_labeling=True,
pseudo_labeling_proba_threshold=0.9)
pipeline = experiment.run(max_trials=10)
pipeline
# +
import numpy as np
y_pred = pipeline.predict(X_test).astype(np.float64)
from sklearn.metrics import accuracy_score
accuracy_score(y_pred, y_test)
|
hypergbm/examples/misc/pseudo_labeling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''runpandas_dev'': conda)'
# name: python383jvsc74a57bd0449a6a5da217c2d6cffa80a6a7a4724fa6726b57ec2f120da10e91a7f29f4eb0
# ---
# # Release 0.4.0 with new running metrics and examples package!
# > New release of runpandas comes with new features and improved docs!
#
# - toc: false
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [general, jupyter, releases]
# - image: images/cardio-climbing.png
# > This current state of the project is `early beta`, which means that features can be added, removed or changed in backwards incompatible ways.
# It has beeen a while since our last release, this is because we are working hard in new features in our new release of [RunPandas 0.4](https://pypi.org/project/runpandas/). Let's highlight them:
# - The Activity now provides extra running statistics such as Vertical Altitude Speed (VAM), mean speed, mean pace, gradient, and mean heart rate.
# - Now we provide in our `runpandas.MeasureSeries` the capability of conversions such as distance conversion (km - miles), latitudes and longitudes (degrees - radians) and pace conversion (min/km and min/mile).
# - There is an auxiliar package for loading activity examples for testing and demo purposes: `runpandas.datasets` . The goal is to enrich with several real examples in FIT, GPX and TCX format files.
# - Finally, there is a CI workflow for uploading automatically a package to Pypi after release.
# ## What is Runpandas?
#
# Runpandas is a python package based on ``pandas`` data analysis library, that makes it easier to perform data analysis from your running sessions stored at tracking files from cellphones and GPS smartwatches or social sports applications such as Strava, MapMyRUn, NikeRunClub, etc. It is designed to enable reading, transforming and running metrics analytics from several tracking files and apps.
#
# ## Main Features
#
# ### Support to some new running metrics such as mean pace and mean speed.
# First let's explain the differences between mean pace and mean speed. Although both values express similar information, they are the reverse of each other. The Pace is how much time you need to cover a particular distance, while speed is an indicator of the number of meters you are able to cover within one second. These values can be presented different, depending on the measure units used to express these metrics. Pace is given in unit of time per unit of distance, whereas speed is distance over time.
#
# The formulas are:
#
# ``Speed (m/s) = distance (m) / time (s)``
#
# ``Pace (s/m) = time (sec) / distance (m)``
#
# We provide in runpandas new acessors (`runpandas.acessors`) for computing those metrics:
#Disable Warnings for a better visualization
import warnings
warnings.filterwarnings('ignore')
# # !pip install runpandas
import runpandas as rpd
activity = rpd.read_file('./data/sample.tcx')
#compute the distance using haversine formula between two consecutive latitude, longitudes observations.
activity['distpos'] = activity.compute.distance()
#compute the speed normalized per interval.
activity['speed'] = activity.compute.speed(from_distances=True)
activity['speed'].head()
print('Mean speed m/s:', activity.mean_speed())
print('Mean pace s/m:', activity.mean_pace())
# Generally this is shown in different units like speed (km/h) and pace (min/km):
# +
#convert m/s to km/h by multiplying the factor of 3.6
print('Mean speed km/h:', activity.mean_speed() * 3.6)
#We define a auxiliar function to convert the pace from sec/m to min/km:
def convert_pace_secmeters2minkms(seconds):
from pandas import Timedelta
pace_min = int((seconds * 1000) / 60)
pace_sec = int(seconds * 1000 - (pace_min * 60))
total_seconds = (pace_min * 60) + pace_sec
return Timedelta(seconds=total_seconds)
pace_min_km = convert_pace_secmeters2minkms(activity.mean_pace().total_seconds())
print('Mean pace min/km:', pace_min_km)
# -
# ### Support to gradient and vertical speed.
# Gradient is a measure of the route steepness-the magnitude of its incline or slope as compared to the horizontal. Most often presented as a percentage, the gradient of a climb will normally fall somewhere between 3-15 percent. For practical use, it is usually used for estimating the difficulty of the climb during the route.
#
#Gradient computed through the distance points
activity['grad'] = activity.compute.gradient()
activity['grad']
# VAM (Vertical Altitude Speed) similar to speed except it tracks how fast you go up vertically rather than horizontally between two points. While speed is measured in miles or kilometers per hour, VAM is measured in vertical meters per hour (vmh). It tells you how many meters you would climb if you went up a moderate grade for an hour.
#Vertical Altitude Speed (VAM) in m/s
activity['vam'] = activity.compute.vertical_speed()
activity['vam']
# ### Support to other metrics such as mean heart_pace
#Meart heart rate through the activity
'bpm', int(activity.mean_heart_rate())
# ### Some conversion functions available for measure metrics
#convert the speed m/s to km/h
activity['speed'].kph
#gradient converted from degrees to percent
activity['grad'].pct
#Total Altitude descent and ascent
print('Ascent', sum(activity['alt'].ascent))
print('Descent', sum(activity['alt'].descent))
#distance from meters to kms
activity['dist'].km
# ### An example activities package including several real word activities from different formats.
# The runpandas package also comes with extra batteries, such as our ``runpandas.datasets package``, which includes a range of example data for testing purposes. There is a dedicated repository with all the data available. An index of the data is kept here.
#
#
example_fit = rpd.activity_examples(path='Garmin_Fenix_6S_Pro-Running.fit')
print(example_fit.summary)
print('Included metrics:', example_fit.included_data)
rpd.read_file(example_fit.path).head()
# In case of you just only want to see all the activities in a specific file type , you can filter the ``runpandas.activities_examples``, which returns a filter iterable that you can iterate over:
fit_examples = rpd.activity_examples(file_type=rpd.FileTypeEnum.FIT)
for example in fit_examples:
#Download and play with the filtered examples
print(example.path)
# ## What is coming next ?
# Working hard in advanced running metrics such as power , heart rate zones and the feature of printing the summary of the activity with the main statistics.
# ## Thanks
# We are constantly developing Runpandas improving its existing features and adding new ones. We will be glad to hear from you about what you like or don’t like, what features you may wish to see in upcoming releases. Please feel free to contact us.
|
_notebooks/2021-04-24-release-v04.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# name: python38264bitf0ceb5a79d6c453ba42cf1b0e66b42d2
# ---
# +
import os
import re
import pandas
import time
import threading
import configparser
import numpy
from urllib import parse
import smtplib
from email.message import EmailMessage
from email.encoders import encode_base64
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from mimetypes import guess_type, read_mime_types
# +
BASE_DIR = os.getcwd()
DOWNLOADS_PATH = os.path.join(os.environ.get('HOMEPATH'), 'Downloads')
FILE_PATH = os.path.join(DOWNLOADS_PATH, 'emails.csv')
CONFIG_PATH = os.path.join(BASE_DIR, 'credentials.ini')
if not os.path.exists(FILE_PATH):
raise FileExistsError('Could not find the following file.')
# -
data = pandas.read_csv(FILE_PATH)
# +
df = pandas.DataFrame(data=data, columns=['Name', 'Surname', 'Email'])
df.head()
# -
# # Optionally: Transform column names
df = df.rename(columns={'Name': 'name', 'Surname': 'surname', 'Email': 'email'})
df.head()
# +
# Normalize each emails so that we do have difficulties
# when attempting to the send to them
def normalize(email):
has_matched = re.search(r'^.*(?=\@).*$', str(email))
if has_matched:
return has_matched.group(0).lower().strip()
return None
df['email'] = df['email'].apply(normalize)
df.head()
# -
# Detect and drop empty email fields
df = df[df['email'].isna() == False]
df.shape
# Convert into a numpy array that
# can be used by the email class
emails_array = numpy.array(df['email'])
emails_array[:3]
class OfferEmail:
config = configparser.ConfigParser()
def __init__(self, setting='google'):
self.config.read(CONFIG_PATH)
host = self.config[setting]['host']
port = self.config['default']['port']
user = self.config[setting]['user']
password = self.config[setting]['password']
try:
connection = smtplib.SMTP(host, port)
except smtplib.SMTPConnectError:
raise
else:
print('1.', 'Connecting to "%s"' % host)
connection.ehlo()
connection.starttls()
connection.ehlo()
try:
connection.login(user, password)
except smtplib.SMTPAuthenticationError:
raise
else:
print('2.', 'Login successful @ %s' % user)
self.connection = connection
def construct_inner_links(self):
website = ''
product_link
return website, product_link
def send(self, sender, receiver, subject, **kwargs):
message = MIMEMultipart('alternative')
message['From'] = sender
message['To'] = receiver
message['BCC'] = kwargs['bcc'] if 'bcc' in kwargs else None
message['Subject'] = subject
website = 'https://nawoka.fr/'
website = self.config['default']['website']
utm_params = {
'utm_source': 'link',
'utm_medium': 'email'
}
params = parse.urlencode(utm_params)
website_link = f'{website}?{params}'
product_link = parse.urljoin(website, 'shop/products/femme/tops/23/crop-top-simple-bretelles')
product_link = f'{product_link}?{params}&utm_campaign=Special+Offer'
plain_text = """
Bonjour,
Découvrez notre promotion spéciale pour l'été sur votre toute nouvelle plateforme e-commerce de mode Nawoka.
En suivant ce lien, vous pourrez bénéficier de -30% sur ce {product_link} crop top unique</a> en série limitée 💓💓
Pour toutes questions ou informations complémentaires, n'hésitez pas à nous contacter en répondant directement à ce mail.
Bon shopping sur notre site ! 😊
---
NAWOKA.FR
{website_link}
"""
html = f"""
<html>
<body>
<p>
Bonjour,
</p>
<p>
Découvrez notre promotion spéciale pour l'été sur votre toute nouvelle plateforme e-commerce de mode Nawoka.
En suivant ce lien, vous pourrez bénéficier de <strong style='color: blue;'>-30% sur ce
<a href='{product_link}'>crop top unique</a> en série limitée</strong> 💓💓
</p>
<p>
Pour toutes questions ou informations complémentaires, n'hésitez pas à nous contacter en répondant directement à ce mail.
</p>
<p>
Bon shopping sur notre site ! 😊
</p>
<p>---</p>
<p style='font-weight: bold;>
NAWOKA.FR
</p>
<p>
<a href='{website_link}'>nawoka.fr</a>
</p>
</body>
</html>
"""
text = MIMEText(plain_text, 'plain')
html = MIMEText(html, 'html')
message.attach(text)
message.attach(html)
print('...', 'Sending emails')
self.connection.sendmail(sender, receiver, message.as_string())
print('3.', 'Closing connection')
self.connection.close()
# +
def send_emails_wrapper():
emailer = OfferEmail(setting='google')
def construct_emails(debug=False):
if not debug:
emails = list(emails_array)
main_email = emails.pop(0)
return main_email, ','.join(emails_array)
return '<EMAIL>'
emails = construct_emails()
subject = "-30% sur ce crop top tendance pour l'été 💓💓"
emailer.send('<EMAIL>', emails[0], subject, bcc=emails[1])
send_emails_wrapper()
# +
def write_emails_to_new_file():
df.to_csv(os.path.join(BASE_DIR, 'emails.csv'))
write_emails_to_new_file()
# -
|
ecommerce_emails/data_fixer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: rl_course
# language: python
# name: rl_course
# ---
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:60% !important; }</style>"))
# # Temporal Difference Reinforcement Learning
#
# So far, we only considered planning, meaning that we assumed that we know the underlying model of the environment and that the agent has access to it.
# Now, we considere the case in which do not have access to the full MDP. That is, we do __model-free prediction__ now.
#
# To illustrate this, we implement the black jack example from the RL Lecture 4 by <NAME> for Monte Carlo Reinforcement Learning [see example](https://youtu.be/PnHCvfgC_ZA?t=1003)
#
# We learn directly from episodes of experience. We do that by determining how much reward we get for a given policy. Policy can be just a random walk.
#
# TD method properties:
# * is model-free
# * learns from incomplete episodes by bootstrapping
# * updates a guess towards a guess
#
# Simplest TD-learning algorithm TD(0):
# * Update value $V(S_t)$ towards __estimated__ return $R_{t+1} + \gamma V(S_t)$
#
# $$
# V(S_t) \gets V(S_t) + \alpha ( R_{t+1} + \gamma V(S_{t+1}) - V(S_t))
# $$
# where
# * $R_{t+1} + \gamma V(S_{t+1})$ is called __TD target__ (Estimated return)
# * $\delta_t = R_{t+1} + \gamma V(S_{t+1}) - V(S_t)$ is called __TD error__
#
# With TD one can react to different rewards within an episode. It updates the value function based on the prediction for the next time step.
#
# TD converges to solution of maximum likelihood Markov model. It exploits the Markov property
#
# _Bootstrapping:_ Update of the original guess by a guess later on in the episode (DP & TD use it)
# _Sampling:_ An update samples an expectation. (MC & TD use it)
# ## TD($\lambda$) Algorithm
#
# Let the _TD target_ look n steps into the future. Thus the n-step return is defined as
# $$
# G^{(n)}_T = R_{t+1} + \gamma R_{t+2} + ... + \gamma^{n-1} R_{t+n+1} + \gamma^n V(S_{t+n})
# $$
#
# and the value update (n-step TD-learning):
# $$
# V(S_t) \gets V(S_t) + \alpha (G^{(n)}_t - V(S_t))
# $$
#
# In TD($\lambda$) we want to combine all n-step lookaheads. For that we use the $\lambda$-return. That is, the geometrically weighted average of all n.
# The weight is;
#
# $$
# G^{\lambda}_t = (1-\lambda) \sum_{n=1} \lambda^{n-1} \cdot G^{(n)}_t
# $$
#
# and the value update :
# $$
# V(S_t) \gets V(S_t) + \alpha (G^{\lambda}_t - V(S_t))
# $$
#
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import plotting
plotting.set_layout(drawing_size=15)
# ## The Environment
#
# For this example we use the python package [gym](https://gym.openai.com/docs/) which provides a ready-to-use implementation of a BlackJack environment.
#
# The states are stored in this tuple format: (Agent's score , Dealer's visible score, and whether or not the agent has a usable ace)
#
# Here, we can look at the number of different states:
import gym
env = gym.make('Blackjack-v0')
env.observation_space
# And the number of actions we can take:
env.action_space
# To start a game call `env.reset()` which will return the obersavtion space
env.reset()
# We can take two different actions: `hit` = 1 or `stay` = 0.
#
# The result of this function call shows the _obersavtion space_, the reward (winning=+1, loosing =-1) and if the game is over,
env.step(1)
# ## Define the Agent
#
#
# +
class agents():
""" This class defines the agent
"""
def __init__(self, state_space, action_space, gamma=1, lamb=0.25, alpha = 0.005):
""" TODO """
# Store the discount factor
self.gamma = gamma
# Store the lambda value
self.lamb = lamb
# Store the learning rate
self.alpha = alpha
n_player_states = state_space[0].n
n_dealer_states = state_space[1].n
n_usable_ace = state_space[0].n
# two available actions stay (0) and hit (1)
actions = range(action_space.n)
# Store the value function for each state
# start with zero
self.v = np.zeros((n_player_states,n_dealer_states,n_usable_ace))
# incremental counter for a state
self.N = np.zeros((n_player_states,n_dealer_states,n_usable_ace))
def random_move(self):
# Do a random move, i.e. choose randomly 0 or 1
return np.random.randint(0,2)
def incre_counter(self, state):
# Increments the counter for a given state
# convert the true/false state to 0/1
s2 = 1 if state[2] else 0
# increment the counter for that state
self.N[state[0],state[1],s2] += 1
def policy_evaluation(self,all_states,all_rewards):
# Doing policy evaluation using TD(lambda) learning
for i_s,s in enumerate(all_states):
# convert the true/false state to 0/1
s2 = 1 if s[2] else 0
# Get the value function for that state
V_s = self.v[s[0],s[1],s2]
# calculate the total reward for a given n of forward states
Gs = []
for i_r,r in enumerate(all_rewards):
# calculate the total reward up for a given n of future states
G = np.sum([agent.gamma**k * r for k,r in enumerate(all_rewards[:i_r+1])])
Gs.append(G)
# now calculate G^lambda
G_lamb = (1-self.lamb) * np.sum( [self.lamb**(n-1) * G for n,G in enumerate(Gs)])
# Update the value funtion
self.v[s[0],s[1],s2] = V_s + self.alpha * (G_lamb - V_s)
# +
# how many episodes should be played
n_episodes = 50000
# initialize the agent. let it know the number of states and actions
agent = agents(env.observation_space, env.action_space)
# Incremental MC updates
# Play one episode then update V(s)
for i in range(n_episodes):
all_states = []
all_rewards = []
# start the game
s = env.reset()
# save initiale states and update counter
all_states.append(s)
agent.incre_counter(s)
# play until environment tells you that the game is over
game_ended = False
while not game_ended:
# increment counter
# Here, we follow a random policy
move = agent.random_move()
# End the game if sum of cards is >20
if s[0] >= 20:
move = 0
else:
# otherwise do a random move. that should speed up the learning process
move = 1
s,r,game_ended,_ = env.step(move)
# save everything
all_states.append(s)
all_rewards.append(r)
# increment the counter for a given state
agent.incre_counter(s)
# do the policy evaluation using TD learning
agent.policy_evaluation(all_states,all_rewards)
### END OF EPISODE ###
# -
# ## Plotting
# +
fig = plt.figure(figsize=(10,5))
axes = fig.subplots(1,2,squeeze=False)
ax = axes[0,0]
c = ax.pcolormesh(agent.v[13:22,1:,0],vmin=-1,vmax=1)
ax.set_yticklabels(range(13,22))
ax.set_xticklabels(range(1,11,2))
ax.set_xlabel('Dealer Showing')
ax.set_ylabel('Player Sum')
ax.set_title('No Usable Aces')
# plt.colorbar(c)
ax = axes[0,1]
c = ax.pcolormesh(agent.v[13:22,1:,1],vmin=-1,vmax=1)
ax.set_yticklabels(range(13,22))
ax.set_xticklabels(range(1,11,2))
ax.set_title('Usable Aces')
ax.set_xlabel('Dealer Showing')
plt.colorbar(c)
plt.show()
# -
|
Lecture 4 TD Learning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # List Comprehensions
#
# # 列表解析
# > If you read enough Python code, you'll eventually come across the terse and efficient construction known as a *list comprehension*.
# This is one feature of Python I expect you will fall in love with if you've not used it before; it looks something like this:
#
# 如果你已经读了很多的Python代码,你一定已经碰到了一种很简洁高效的写法,叫做*列表解析*。这是Python中我认为你会爱上的一样特性;就像下面这行代码一样:
[i for i in range(20) if i % 3 > 0]
# > The result of this is a list of numbers which excludes multiples of 3.
# While this example may seem a bit confusing at first, as familiarity with Python grows, reading and writing list comprehensions will become second nature.
#
# 这行代码的结果会生成一个列表,里面包含了20以内所有不能整除3的数。虽然初次见到这种写法会有些不适应,但是当你越来越熟悉Python语言之后,阅读和书写列表解析代码会变成一种本能。
# ## Basic List Comprehensions
#
# ## 基本列表解析
#
# > List comprehensions are simply a way to compress a list-building for-loop into a single short, readable line.
# For example, here is a loop that constructs a list of the first 12 square integers:
#
# 列表解析实际上是提供了一种简单的方法将使用for循环构建列表转变为一行简短可读的代码。例如,下面的循环将产生头12个非负数的平方数列表:
L = []
for n in range(12):
L.append(n ** 2)
L
# The list comprehension equivalent of this is the following:
#
# 等效的列表解析写法如下:
[n ** 2 for n in range(12)]
# > As with many Python statements, you can almost read-off the meaning of this statement in plain English: "construct a list consisting of the square of ``n`` for each ``n`` up to 12".
#
# > This basic syntax, then, is ``[``*``expr``* ``for`` *``var``* ``in`` *``iterable``*``]``, where *``expr``* is any valid expression, *``var``* is a variable name, and *``iterable``* is any iterable Python object.
#
# 就像很多的Python语句一样,你可以基本上将上面的列表解析语句按照普通英语解读出来:"构造一个列表包含每个`n`的平方,直到12为止"。
#
# 列表解析的基本语法是`[`*`表达式`* `for` *`变量`* `in` *`迭代器`*`]`,其中`表达式`为任何正确的表达式,`变量`为变量名称,`迭代器`为任何Python中可迭代的对象。
# ## Multiple Iteration
#
# ## 多重迭代
#
# > Sometimes you want to build a list not just from one value, but from two. To do this, simply add another ``for`` expression in the comprehension:
#
# 有时你需要构建一个列表,其元素不止来源于一个值,而是来源于多个值。只要在语句中加入一个`for`循环即可:
[(i, j) for i in range(2) for j in range(3)]
# > Notice that the second ``for`` expression acts as the interior index, varying the fastest in the resulting list.
# This type of construction can be extended to three, four, or more iterators within the comprehension, though at some point code readibility will suffer!
#
# 注意第二个`for`循环是内层的迭代,因此在列表中`j`的变化是最快的。这种多重迭代可以支持3个、4个甚至多个的迭代,但是需要注意的是,很多重的迭代往往会降低代码的可读性。
# ## Conditionals on the Iterator
#
# ## 迭代中的条件
#
# > You can further control the iteration by adding a conditional to the end of the expression.
# In the first example of the section, we iterated over all numbers from 1 to 20, but left-out multiples of 3.
# Look at this again, and notice the construction:
#
# 你可以在`for`之后加入一个条件来进一步控制迭代。在本章的第一个例子中,我们迭代了20以内的非负数,但是剔除了能被3整除的数。我们再来看一遍,注意一下它的结构:
[val for val in range(20) if val % 3 > 0]
# > The expression ``(i % 3 > 0)`` evaluates to ``True`` unless ``val`` is divisible by 3.
# Again, the English language meaning can be immediately read off: "Construct a list of values for each value up to 20, but only if the value is not divisible by 3".
# Once you are comfortable with it, this is much easier to write – and to understand at a glance – than the equivalent loop syntax:
#
# 表达式`(i % 3 > 0)`会在`val`不能被3整除时为真`True`。我们又一次看到,这行代码基本可以直接用普通英语解读:"构建一个列表包含20以内的非负数,仅包含不能被3整除的数"。一旦你开始习惯这种写法,你会发现这样写出来的代码比使用循环的代码更容易读:
L = []
for val in range(20):
if val % 3:
L.append(val)
L
# ## Conditionals on the Value
#
# ## 条件赋值
#
# > If you've programmed in C, you might be familiar with the single-line conditional enabled by the ``?`` operator:
# ``` C
# int absval = (val < 0) ? -val : val
# ```
#
# > Python has something very similar to this, which is most often used within list comprehensions, ``lambda`` functions, and other places where a simple expression is desired:
#
# 如果你使用过C语言编程,你可以会对`?`的三元表达式很熟悉:
#
# ```C
# int absval = (val < 0) ? -val : val;
# ```
#
# Python也有类似的语法,特别是在列表解析、`lambda`函数中经常用到:
val = -10
val if val >= 0 else -val
# > We see that this simply duplicates the functionality of the built-in ``abs()`` function, but the construction lets you do some really interesting things within list comprehensions.
# This is getting pretty complicated now, but you could do something like this:
#
# 这段代码就是内建函数`abs`的实现,但是在列表解析中使用条件赋值会变得非常又去。我们的列表解析现在开始有点复杂了,看下面的例子:
[val if val % 2 else -val
for val in range(20) if val % 3]
# > Note the line break within the list comprehension before the ``for`` expression: this is valid in Python, and is often a nice way to break-up long list comprehensions for greater readibility.
# Look this over: what we're doing is constructing a list, leaving out multiples of 3, and negating all mutliples of 2.
#
# 我们在`for`前面加了一个换行:这在Python中是合法的,并且也推荐在长的列表解析语句中加入换行增加代码的可读性。上面的例子是构建一个列表,包含20以内的非负数,仅包括不能整除3的数,并且当该数能整除2时,转为负数。
# > Once you understand the dynamics of list comprehensions, it's straightforward to move on to other types of comprehensions. The syntax is largely the same; the only difference is the type of bracket you use.
#
# 一旦你理解了列表解析的语法,你也就很容易理解其他类型的解析语法了。语法都是一致的,却别仅在于使用的括号不一样。
#
# > For example, with curly braces you can create a ``set`` with a *set comprehension*:
#
# 例如,使用花括号构建一个集合`set`,也就是*集合解析*:
{n**2 for n in range(12)}
# > Recall that a ``set`` is a collection that contains no duplicates.
# The set comprehension respects this rule, and eliminates any duplicate entries:
#
# 回想一下,`set`是一个不含重复元素的集合。集合解析同样遵循这个规则,构建的`set`当中不包含重复项:
{a % 3 for a in range(1000)}
# > With a slight tweak, you can add a colon (``:``) to create a *dict comprehension*:
#
# 做一个小的调整,你可以使用冒号(`:`)来构建*字典解析*:
{n:n**2 for n in range(6)}
# > Finally, if you use parentheses rather than square brackets, you get what's called a *generator expression*:
#
# 最后,如果你使用了小括号而非中括号的话,你就创建了一个生成器,也就是*生成器表达式*:
(n**2 for n in range(12))
# > A generator expression is essentially a list comprehension in which elements are generated as-needed rather than all at-once, and the simplicity here belies the power of this language feature: we'll explore this more next.
#
# 生成器表达式和列表解析是一样的语法,唯一的区别在于,生成器不会立刻产生所有元素,仅在使用的时候才会生成元素值。我们后面还会详细介绍这里。
#
|
11-List-Comprehensions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///titanic.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Passenger = Base.classes.passenger
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/names<br/>"
f"/api/v1.0/passengers"
)
@app.route("/api/v1.0/names")
def names():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all passenger names"""
# Query all passengers
results = session.query(Passenger.name).all()
session.close()
# Convert list of tuples into normal list
all_names = list(np.ravel(results))
return jsonify(all_names)
@app.route("/api/v1.0/passengers")
def passengers():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of passenger data including the name, age, and sex of each passenger"""
# Query all passengers
results = session.query(Passenger.name, Passenger.age, Passenger.sex).all()
session.close()
# Create a dictionary from the row data and append to a list of all_passengers
all_passengers = []
for name, age, sex in results:
passenger_dict = {}
passenger_dict["name"] = name
passenger_dict["age"] = age
passenger_dict["sex"] = sex
all_passengers.append(passenger_dict)
return jsonify(all_passengers)
if __name__ == '__main__':
app.run(debug=True)
|
.ipynb_checkpoints/app.py-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MindSpore-1.0.1
# language: python
# name: mindspore-1.0.1
# ---
# # <center/>转换数据集为MindRecord
# ## 概述
#
# 用户可以将非标准的数据集和常用的数据集转换为MindSpore数据格式,即MindRecord,从而方便地加载到MindSpore中进行训练。同时,MindSpore在部分场景做了性能优化,使用MindRecord数据格式可以获得更好的性能体验。
#
# MindSpore数据格式具备的特征如下:
# - 实现多变的用户数据统一存储、访问,训练数据读取更加简便。
# - 数据聚合存储,高效读取,且方便管理、移动。
# - 高效的数据编解码操作,对用户透明、无感知。
# - 可以灵活控制分区的大小,实现分布式训练。
#
# MindSpore数据格式的目标是归一化用户的数据集,并进一步通过`MindDataset`实现数据的读取,并用于训练过程。
#
# 
#
# > 本文档适用于CPU、GPU和Ascend环境。
# ## 基本概念
# 一个MindRecord文件由数据文件和索引文件组成,且数据文件及索引文件暂不支持重命名操作:
#
# - 数据文件
#
# 包含文件头、标量数据页、块数据页,用于存储用户归一化后的训练数据,且单个MindRecord文件建议小于20G,用户可将大数据集进行分片存储为多个MindRecord文件。
#
#
# - 索引文件
#
# 包含基于标量数据(如图像Label、图像文件名等)生成的索引信息,用于方便的检索、统计数据集信息。
#
# 
#
# 数据文件主要由以下几个关键部分组成:
#
# - 文件头
#
# 文件头主要用来存储文件头大小、标量数据页大小、块数据页大小、Schema信息、索引字段、统计信息、文件分区信息、标量数据与块数据对应关系等,是MindRecord文件的元信息。
#
#
# - 标量数据页
#
# 标量数据页主要用来存储整型、字符串、浮点型数据,如图像的Label、图像的文件名、图像的长宽等信息,即适合用标量来存储的信息会保存在这里。
#
#
# - 块数据页
#
# 块数据页主要用来存储二进制串、Numpy数组等数据,如二进制图像文件本身、文本转换成的字典等。
#
# ## 整体流程
#
# 1. 准备环节。
# 2. 将数据集转换为MindRecord。
# 3. 读取MindRecord数据集。
# ## 准备环节
# ### 创建目录
# 下载需要处理的图片数据`tansform.jpg`作为待处理的原始数据。
# 创建文件夹目录`./datasets/convert_dataset_to_mindrecord/datas_to_mindrecord/`用于存放本次体验中所有的转换数据集。
# 创建文件夹目录`./datasets/convert_dataset_to_mindrecord/images/`用于存放下载下来的图片数据。
# !wget -N https://gitee.com/mindspore/docs/raw/master/tutorials/notebook/convert_dataset_to_mindrecord/datasets/convert_dataset_to_mindrecord/images/transform.jpg
# !mkdir -p ./datasets/convert_dataset_to_mindrecord/datas_to_mindrecord/
# !mkdir -p ./datasets/convert_dataset_to_mindrecord/images/
# !mv -f ./transform.jpg ./datasets/convert_dataset_to_mindrecord/images/
# !tree ./datasets/convert_dataset_to_mindrecord/images/
# ## 将数据集转换为MindRecord
# 将数据集转换为MindRecord主要分为以下5个步骤:
#
# 1. 导入`FileWriter`类,用于将用户定义的原始数据写入,参数用法如下:
#
# - `file_name` - MindSpore数据格式文件的文件名,本例使用变量`data_record_path`传入该参数。
# - `shard_num` - MindSpore数据格式文件的数量,默认为1,取值范围在[1,1000],本例使用4。
#
# 2. 定义数据集Schema,Schema用于定义数据集包含哪些字段以及字段的类型,然后添加Schema,相关规范如下:
#
# - 字段名:字母、数字、下划线。
# - 字段属性`type`:int32、int64、float32、float64、string、bytes。
# - 字段属性`shape`:如果是一维数组,用[-1]表示,如果是二维数组,用[m,n]表示,如果是三维数组,用[x,y,z]表示。
#
# > - 如果字段有属性`shape`,则对应数据类型必须为int32、int64、float32、float64。
# > - 如果字段有属性`shape`,则用户传入`write_raw_data`接口的数据必须为`numpy.ndarray`类型。
#
# 本例中定义了`file_name`字段,用于标注准备写入数据的文件名字,定义了`label`字段,用于给数据打标签,定义了`data`字段,用于保存数据。
# 3. 准备需要写入的数据,按照用户定义的Schema形式,准备需要写入的样本列表。
# 4. 添加索引字段,添加索引字段可以加速数据读取,改步骤为可选操作。
# 5. 写入数据,最后生成MindSpore数据格式文件。接口说明如下:
#
# - `write_raw_data`:将数据写入到内存之中。
# - `commit`:将最终内存中的数据写入到磁盘。
# +
from mindspore.mindrecord import FileWriter
import os
# clean up old run files before in Linux
data_path = './datasets/convert_dataset_to_mindrecord/datas_to_mindrecord/'
os.system('rm -f {}test.*'.format(data_path))
# import FileWriter class ready to write data
data_record_path = './datasets/convert_dataset_to_mindrecord/datas_to_mindrecord/test.mindrecord'
writer = FileWriter(file_name=data_record_path,shard_num=4)
# define the data type
data_schema = {"file_name":{"type":"string"},"label":{"type":"int32"},"data":{"type":"bytes"}}
writer.add_schema(data_schema,"test_schema")
# prepeare the data contents
file_name = "./datasets/convert_dataset_to_mindrecord/images/transform.jpg"
with open(file_name, "rb") as f:
bytes_data = f.read()
data = [{"file_name":"transform.jpg", "label":1, "data":bytes_data}]
# add index field
indexes = ["file_name","label"]
writer.add_index(indexes)
# save data to the files
writer.write_raw_data(data)
writer.commit()
# -
# 该示例会生成8个文件,成为MindRecord数据集。`test.mindrecord0`和`test.mindrecord0.db`称为1个MindRecord文件,其中`test.mindrecord0`为数据文件,`test.mindrecord0.db`为索引文件,生成的文件如下所示:
# !tree ./datasets/convert_dataset_to_mindrecord/datas_to_mindrecord/
# 6. 如果需要在现有数据格式文件中增加新数据,可以调用`open_for_append`接口打开已存在的数据文件,继续调用`write_raw_data`接口写入新数据,最后调用`commit`接口生成本地数据文件。
writer = FileWriter.open_for_append('./datasets/convert_dataset_to_mindrecord/datas_to_mindrecord/test.mindrecord0')
writer.write_raw_data(data)
writer.commit()
# ## 读取MindRecord数据集
# 下面将简单演示如何通过`MindDataset`读取MindRecord数据集。
# 1. 导入读取类`MindDataset`。
import mindspore.dataset as ds
# 2. 首先使用`MindDataset`读取MindRecord数据集,然后对数据创建了字典迭代器,并通过迭代器读取了一条数据记录。
file_name = './datasets/convert_dataset_to_mindrecord/datas_to_mindrecord/test.mindrecord0'
# create MindDataset for reading data
define_data_set = ds.MindDataset(dataset_file=file_name)
# create a dictionary iterator and read a data record through the iterator
print(next(define_data_set.create_dict_iterator(output_numpy=True)))
|
tutorials/notebook/convert_dataset_to_mindrecord/mindspore_convert_dataset_to_mindrecord.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cdod] *
# language: python
# name: conda-env-cdod-py
# ---
# [source](../api/alibi_detect.od.mahalanobis.rst)
# # Mahalanobis Outlier Detector
# ## Overview
#
# The Mahalanobis online outlier detector aims to predict anomalies in tabular data. The algorithm calculates an outlier score, which is a measure of distance from the center of the features distribution ([Mahalanobis distance](https://en.wikipedia.org/wiki/Mahalanobis_distance)). If this outlier score is higher than a user-defined threshold, the observation is flagged as an outlier. The algorithm is online, which means that it starts without knowledge about the distribution of the features and learns as requests arrive. Consequently you should expect the output to be bad at the start and to improve over time. The algorithm is suitable for low to medium dimensional tabular data.
#
# The algorithm is also able to include categorical variables. The `fit` step first computes pairwise distances between the categories of each categorical variable. The pairwise distances are based on either the model predictions (*MVDM method*) or the context provided by the other variables in the dataset (*ABDM method*). For MVDM, we use the difference between the conditional model prediction probabilities of each category. This method is based on the Modified Value Difference Metric (MVDM) by [Cost et al (1993)](https://link.springer.com/article/10.1023/A:1022664626993). ABDM stands for Association-Based Distance Metric, a categorical distance measure introduced by [Le et al (2005)](http://www.jaist.ac.jp/~bao/papers/N26.pdf). ABDM infers context from the presence of other variables in the data and computes a dissimilarity measure based on the [Kullback-Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence). Both methods can also be combined as ABDM-MVDM. We can then apply multidimensional scaling to project the pairwise distances into Euclidean space.
# ## Usage
#
# ### Initialize
#
# Parameters:
#
# * `threshold`: Mahalanobis distance threshold above which the instance is flagged as an outlier.
#
# * `n_components`: number of principal components used.
#
# * `std_clip`: feature-wise standard deviation used to clip the observations before updating the mean and covariance matrix.
#
# * `start_clip`: number of observations before clipping is applied.
#
# * `max_n`: algorithm behaves as if it has seen at most `max_n` points.
#
# * `cat_vars`: dictionary with as keys the categorical columns and as values the number of categories per categorical variable. Only needed if categorical variables are present.
#
# * `ohe`: boolean whether the categorical variables are one-hot encoded (OHE) or not. If not OHE, they are assumed to have ordinal encodings.
#
# * `data_type`: can specify data type added to metadata. E.g. *'tabular'* or *'image'*.
#
# Initialized outlier detector example:
#
# ```python
# from alibi_detect.od import Mahalanobis
#
# od = Mahalanobis(
# threshold=10.,
# n_components=2,
# std_clip=3,
# start_clip=100
# )
# ```
# ### Fit
#
# We only need to fit the outlier detector if there are categorical variables present in the data. The following parameters can be specified:
#
# * `X`: training batch as a numpy array.
#
# * `y`: model class predictions or ground truth labels for `X`. Used for *'mvdm'* and *'abdm-mvdm'* pairwise distance metrics. Not needed for *'abdm'*.
#
# * `d_type`: pairwise distance metric used for categorical variables. Currently, *'abdm'*, *'mvdm'* and *'abdm-mvdm'* are supported. *'abdm'* infers context from the other variables while *'mvdm'* uses the model predictions. *'abdm-mvdm'* is a weighted combination of the two metrics.
#
# * `w`: weight on *'abdm'* (between 0. and 1.) distance if `d_type` equals *'abdm-mvdm'*.
#
# * `disc_perc`: list with percentiles used in binning of numerical features used for the *'abdm'* and *'abdm-mvdm'* pairwise distance measures.
#
# * `standardize_cat_vars`: standardize numerical values of categorical variables if True.
#
# * `feature_range`: tuple with min and max ranges to allow for numerical values of categorical variables. Min and max ranges can be floats or numpy arrays with dimension *(1, number of features)* for feature-wise ranges.
#
# * `smooth`: smoothing exponent between 0 and 1 for the distances. Lower values will smooth the difference in distance metric between different features.
#
# * `center`: whether to center the scaled distance measures. If False, the min distance for each feature except for the feature with the highest raw max distance will be the lower bound of the feature range, but the upper bound will be below the max feature range.
#
# ```python
# od.fit(
# X_train,
# d_type='abdm',
# disc_perc=[25, 50, 75]
# )
# ```
#
# It is often hard to find a good threshold value. If we have a batch of normal and outlier data and we know approximately the percentage of normal data in the batch, we can infer a suitable threshold:
#
# ```python
# od.infer_threshold(
# X,
# threshold_perc=95
# )
# ```
#
# Beware though that the outlier detector is stateful and every call to the `score` function will update the mean and covariance matrix, even when inferring the threshold.
# ### Detect
#
# We detect outliers by simply calling `predict` on a batch of instances `X` to compute the instance level Mahalanobis distances. We can also return the instance level outlier score by setting `return_instance_score` to True.
#
# The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys:
#
# * `is_outlier`: boolean whether instances are above the threshold and therefore outlier instances. The array is of shape *(batch size,)*.
#
# * `instance_score`: contains instance level scores if `return_instance_score` equals True.
#
#
# ```python
# preds = od.predict(
# X,
# return_instance_score=True
# )
# ```
# ## Examples
#
# ### Tabular
#
# [Outlier detection on KDD Cup 99](../examples/od_mahalanobis_kddcup.nblink)
|
doc/source/methods/mahalanobis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SOM Training on RGB Colors Dataset
#
# +
import torch
import random
import matplotlib
import numpy as np
import pandas as pd
from fastsom import *
from fastai.tabular.all import *
# -
# ### Creating the DataLoaders
def colors():
# Set color dataset
colors = np.empty((0,3), float)
colors = np.append(colors, np.array([[0, 0, 0]]), axis=0)
colors = np.append(colors, np.array([[1, 1, 1]]), axis=0)
for i in range(20):
colors = np.append(colors, np.array([[0, 0, random.random()]]), axis=0)
colors = np.append(colors, np.array([[0, random.random(), 0]]), axis=0)
colors = np.append(colors, np.array([[random.random(), 0, 0]]), axis=0)
colors = np.append(colors, np.array([[1, 1, random.random()]]), axis=0)
colors = np.append(colors, np.array([[1, random.random(), 1]]), axis=0)
colors = np.append(colors, np.array([[random.random(), 1, 1]]), axis=0)
colors = np.append(colors, np.array([[0, random.random(), random.random()]]), axis=0)
colors = np.append(colors, np.array([[random.random(), random.random(), 0]]), axis=0)
colors = np.append(colors, np.array([[1, random.random(), random.random()]]), axis=0)
colors = np.append(colors, np.array([[random.random(), random.random(), 1]]), axis=0)
colors = np.append(colors, np.array([[random.random(), random.random(), random.random()]]), axis=0)
x = colors
y = None
labels = ['Red', 'Green', 'Blue']
return x, y, labels
x, y, labels = colors()
# Let's create a `DataFrame` which we'll convert into a `TabularDataLoaders`:
df = pd.DataFrame(x, columns=labels)
df.head()
dls = TabularDataLoaders.from_df(df, cat_names=[], cont_names=labels, procs=[Normalize])
# ### Creating the Learner
learn = SomLearner(dls, size=(10, 10), visualize=[SOM_TRAINING_VIZ.CODEBOOK_3D])
# ### Training the model
# %matplotlib notebook
learn.fit(40)
# %matplotlib inline
learn.recorder.plot_loss()
# ### Interpretation
#
# The `SomInterpretation` class provides visualization utilities, to better understand the output of the training process.
interp = SomInterpretation.from_learner(learn)
# `show_weights` displays a three-dimensional PCA of the SOM codebook as an RGB image:
# %matplotlib inline
interp.show_weights()
# `show_hitmap` displays a heatmap of hit counts for each map position:
# %matplotlib inline
interp.show_hitmap()
# `show_feature_heatmaps` shows the per-feature value distribution over the SOM codebooks:
# %matplotlib inline
interp.show_feature_heatmaps()
|
nbs/colors.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %reload_ext autoreload
# %autoreload 2
import numpy as np
import skimage
import skimage.io
import scipy.io as sio
import skimage.transform
import sys
import tensorflow as tf
import numpy as np
import sys
import os
import scipy.io as sio
import re
import time
from tqdm import tqdm
np.random.seed(0)
# +
outputChannels = 2
classType = 'unified_CR'
# 0 leaf --> background?
indices = [0]
savePrefix = "direction_" + classType + "_unified_CR_pretrain"
train = False
# -
from ioUtils import *
#ssUnet
# +
valFeeder = Batch_Feeder(dataset_path="../../watershednet/data/for_training/42/",
unet_output_path = '../../pytorch-nested-unet/outputs/42',
indices=indices,
subset='test',
batchSize=5,
padWidth=None,
padHeight=None,
flip=False,
keepEmpty=False,
train=True,
img_shape = (384,384))
valFeeder.set_paths()
# -
# +
# train_model(model=model, outputChannels=outputChannels,
# learningRate=learningRate,
# trainFeeder=trainFeeder, valFeeder=valFeeder,
# modelSavePath="../models/direction", savePrefix=savePrefix,
# initialIteration=initialIteration)
# +
# # !ls ../models/direction
# -
modelWeightPaths = ["../models/direction/direction_unified_CR_unified_CR_pretrain_150.mat"]
from train_direction import initialize_model
# +
#modelWeightPaths = ["./cityscapes/models/direction/direction3_unified_ss_wide_pretrain_VGGFIX_020.mat"]
model = initialize_model(outputChannels=outputChannels, wd=0, modelWeightPaths=modelWeightPaths)
# feeder = Batch_Feeder(dataset="cityscapes", indices=indices, train=train, batchSize=batchSize, padWidth=None, padHeight=None)
# feeder.set_paths(idList=read_ids("./cityscapes/splits/vallist.txt"),
# imageDir="./cityscapes/inputImages/val",
# ssDir="./cityscapes/unified/ssMaskFineGT/val")
# -
import math
batchSize = 8
# +
with tf.Session() as sess:
tfBatchImages = tf.placeholder("float", shape=[None, 384, 384, 3])
tfBatchGT = tf.placeholder("float", shape=[None, 384, 384, 2])
tfBatchSS = tf.placeholder("float", shape=[None, 384, 384])
with tf.name_scope("model_builder"):
print ("attempting to build model")
model.build(tfBatchImages, tfBatchSS)
print ("built the model")
sys.stdout.flush()
init = tf.initialize_all_variables()
sess.run(init)
for i in tqdm(range(int(math.floor(valFeeder.total_samples() / batchSize)))):
imageBatch, gtBatch, ssBatch, ssUnet = valFeeder.next_batch()
outputBatch = sess.run(model.output, feed_dict={tfBatchImages: imageBatch,
tfBatchGT: gtBatch,
tfBatchSS: ssBatch})
# for j in range(len(idBatch)):
# outputFilePath = os.path.join(outputSavePath, idBatch[j]+'.mat')
# outputFileDir = os.path.dirname(outputFilePath)
# if not os.path.exists(outputFileDir):
# os.makedirs(outputFileDir)
# sio.savemat(outputFilePath, {"dir_map": outputBatch[j]}, do_compression=True)
# print ("processed image %d out of %d"%(j+batchSize*i, feeder.total_samples()))
# -
outputBatch.shape
import matplotlib.pyplot as plt
img = outputBatch[1]
gt = gtBatch[1]
plt.figure(figsize=(15,15))
plt.subplot(121)
plt.imshow(img[:,:,0])
plt.subplot(122)
plt.imshow(gt[:,:,0])
plt.imshow(ssBatch[0])
plt.imshow(ssUnet[0])
plt.figure(figsize=(15,15))
plt.subplot(121)
plt.imshow(img[:,:,0],cmap='gray')
plt.subplot(122)
plt.imshow(gt[:,:,1])
img[:,:,0]
plt.figure(figsize=(15,15))
plt.subplot(121)
plt.imshow(ssBatch[0])
plt.subplot(122)
plt.imshow(ssUnet[0])
def ssProcess(ssImage):
ssMask = np.zeros(shape=ssImage.shape, dtype=np.float32)
ssImageInt = ssImage
if ssImageInt.dtype == np.float32:
ssImageInt = (ssImageInt*255).astype(np.uint8)
# order: Person, Rider, Motorcycle, Bicycle, Car, Truck, Bus, Train
ssMask += (ssImageInt==CLASS_TO_SS['person']).astype(np.float32)*1
ssMask += (ssImageInt==CLASS_TO_SS['rider']).astype(np.float32)*2
ssMask += (ssImageInt==CLASS_TO_SS['motorcycle']).astype(np.float32)*3
ssMask += (ssImageInt==CLASS_TO_SS['bicycle']).astype(np.float32)*4
ssMask += (ssImageInt==CLASS_TO_SS['car']).astype(np.float32)*6
ssMask += (ssImageInt==CLASS_TO_SS['truck']).astype(np.float32)*7
ssMask += (ssImageInt==CLASS_TO_SS['bus']).astype(np.float32)*8
ssMask += (ssImageInt==CLASS_TO_SS['train']).astype(np.float32)*9
ssBinary = (ssMask != 0).astype(np.float32)
ssMask[ssMask == 0] = 1 # temp fix
ssMask = (ssMask - 5) * 32
return ssBinary, ssMask
plt.imshow(ssUnet[0])
# +
# ssBinary, ssMask = ssProcess(ssUnet[0])
# plt.figure(figsize=(15,15))
# plt.subplot(121)
# plt.imshow(ssBinary)
# plt.subplot(122)
# plt.imshow(ssMask)
# -
def forward_model(model, feeder, outputSavePath):
with tf.Session() as sess:
tfBatchImages = tf.placeholder("float", shape=[None, 384, 384, 3])
tfBatchGT = tf.placeholder("float", shape=[None, 384, 384, 2])
tfBatchSS = tf.placeholder("float", shape=[None, 384, 384])
with tf.name_scope("model_builder"):
print ("attempting to build model")
model.build(tfBatchImages, tfBatchSS)
print ("built the model")
sys.stdout.flush()
init = tf.initialize_all_variables()
sess.run(init)
for i in range(int(math.floor(feeder.total_samples() / batchSize))):
imageBatch, ssBatch, ssMaskBatch, idBatch = feeder.next_batch()
outputBatch = sess.run(model.output, feed_dict={tfBatchImages: imageBatch, tfBatchSS: ssBatch, tfBatchSSMask: ssMaskBatch})
for j in range(len(idBatch)):
outputFilePath = os.path.join(outputSavePath, idBatch[j]+'.mat')
outputFileDir = os.path.dirname(outputFilePath)
if not os.path.exists(outputFileDir):
os.makedirs(outputFileDir)
sio.savemat(outputFilePath, {"dir_map": outputBatch[j]}, do_compression=True)
print ("processed image %d out of %d"%(j+batchSize*i, feeder.total_samples()))
forward_model(model, feeder=feeder,
outputSavePath="./training/output/direction_ss")
|
DN/Untitled1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tensorflow_p36]
# language: python
# name: conda-env-tensorflow_p36-py
# ---
# +
import tensorflow as tf
import horovod.tensorflow as hvd
layers = tf.contrib.layers
learn = tf.contrib.learn
tf.logging.set_verbosity(tf.logging.INFO)
# -
# Horovod: initialize Horovod.
hvd.init()
hvd.rank()
# Download and load MNIST dataset.
mnist = learn.datasets.mnist.read_data_sets('MNIST-data-%d' % hvd.rank())
mnist1 = learn.datasets.mnist.read_data_sets('MNIST-data-%d' % 1)
print(mnist)
print(mnist1)
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.conv2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = tf.nn.max_pool(
h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.conv2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = tf.nn.max_pool(
h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
return tf.argmax(logits, 1), loss
# +
# Build model...
with tf.name_scope('input'):
image = tf.placeholder(tf.float32, [None, 784], name='image')
label = tf.placeholder(tf.float32, [None], name='label')
predict, loss = conv_model(image, label, tf.contrib.learn.ModeKeys.TRAIN)
# Horovod: adjust learning rate based on number of GPUs.
opt = tf.train.RMSPropOptimizer(0.001 * hvd.size())
# Horovod: add Horovod Distributed Optimizer.
opt = hvd.DistributedOptimizer(opt)
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = opt.minimize(loss, global_step=global_step)
# -
print(hvd.size())
# +
hooks = [
# Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states
# from rank 0 to all other processes. This is necessary to ensure consistent
# initialization of all workers when training is started with random weights
# or restored from a checkpoint.
hvd.BroadcastGlobalVariablesHook(0),
# Horovod: adjust number of steps based on number of GPUs.
tf.train.StopAtStepHook(last_step=20000 // hvd.size()),
tf.train.LoggingTensorHook(tensors={'step': global_step, 'loss': loss},
every_n_iter=10),
]
# Horovod: pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
# -
print(hvd.local_rank())
# +
# Horovod: save checkpoints only on worker 0 to prevent other workers from
# corrupting them.
checkpoint_dir = './checkpoints' if hvd.rank() == 0 else None
# The MonitoredTrainingSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
with tf.train.MonitoredTrainingSession(checkpoint_dir=checkpoint_dir,
hooks=hooks,
config=config) as mon_sess:
while not mon_sess.should_stop():
# Run a training step synchronously.
image_, label_ = mnist.train.next_batch(100)
mon_sess.run(train_op, feed_dict={image: image_, label: label_})
|
notebooks/horovod-local.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2. 第二章 - 如何训练一个神经网络(简单版)
#
# 目录:
#
# 第一部分 神经网络
# * 2.0 神经网络简介
#
# 第二部分 线性回归
# * 2.1 加载数据
# * 2.2 定义模型
# * 2.3 定义损失函数
# * 2.4 选择优化器
# * 2.5 训练模型并验证
# * 2.6 附:可视化验证
# ## 2.0 神经网络简介
# 以监督学习为例,我们训练一个神经网络,目标是:给定输入数据及其正确的标签,将输入数据传入我们的神经网络中,通过神经网络的复杂计算,得到对应的输出值,使这个输出值尽可能地与正确标签相符。
# 为了实现上述目标,我们以这样一种简单的方式理解神经网络的训练流程:
# * 首先,我们拿到含有正确标签的数据(获取数据集);
# * 然后,我们定义一系列含有待定参数的计算(定义神经网络,其参数就是我们的学习目标);
# * 接着,我们将数据按照第二步定义好的规则,计算得到对应的输出值(前向传播);
# * 随后,我们根据一定规则,比较输出值与正确标签的差异(计算损失函数);
# * 最后,我们根据输出值与正确标签的差异大小,反观我们当前计算中所使用的各个参数,对参数进行优化更新(反向传播以及参数优化);
# 以此反复,优化我们的计算,最终寻找到该计算(神经网络)的最佳参数。
# 对应地,在 Jittor (计图) 中,我们可以按如下顺序训练一个神经网络:
# 1. 加载数据
# 2. 定义模型
# 3. 定义损失函数
# 4. 选择优化器
# 5. 训练模型(并验证)
#
# 现在,我们按照上述流程,实现一个简单的神经网络吧!(线性回归问题)
# +
# 加载计图
import jittor as jt
# 开启 GPU 加速
# jt.flags.use_cuda = 1
# -
# ## 任务:线性回归问题
#
# 任务描述如下:
# * 已知 x 和 y 具有一定的线性关系。
# * 给定 x 的值,用模型预测 y 的值。
#
# 解决步骤如下:
# * 首先,我们会随机生成一些具有线性关系的数据点 $(x, y)$,当作我们的数据集;
# * 然后,定义我们的计算模型为 $y=a + b \cdot x $;
# * 接着,我们用计图的内置函数,选择我们的损失函数和参数优化器;
# * 最后,我们将完成训练模型的主代码块以及验证部分的代码。
# ## 2.1 加载数据
# 首先,我们要准备好实验的数据集。
#
# 在这个线性回归问题中,我们会随机生成 100 个数据点 $(x, y)$。其中,x 和 y 潜在的线性关系为 $y=a + b \cdot x $ (我们会为 $y$ 设置一定的噪音 )。这里,$a$ 和 $b$ 为我们模型将要学习的参数。**我们先手动设置 $a = 1$,$b = 2$ 来生成数据集。然后,我们用这样一个数据集训练我们的模型,看看模型是否有能力学习到这两个参数值。**
# +
import numpy as np
# 设定种子,保持结果的可复制性。
np.random.seed(2021)
# 初始化 100 个数据点。其中,x 为输入数据,y 为正确的标签值(通过 x 预测的值)。
x = np.random.rand(100).reshape(100,1)
y = 1 + 2 * x + 0.1 * np.random.randn(100,1) # 线性关系设置为 a = 1, b = 2,并利用正态分布,给 y 的值设定噪音
# 将我们的数据点,切分为训练集和验证集(先随机切分索引,再根据索引切分数据集)
index = np.arange(100) # 生成 100 个索引值
np.random.shuffle(index) # 将索引随机排序
train_index = index[:80] # 训练集数据对应的索引
val_index = index[-20:] # 验证集数据对应的索引
# 根据索引,切分训练集和验证集
x_train, y_train = x[train_index], y[train_index]
x_val, y_val = x[val_index], y[val_index]
# -
# 将数据集准备好后,我们通过 Matplotlib 查看一下数据点的分布
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows = 1, ncols = 2)
axs[0].scatter(x_train, y_train) # 展示训练集上的数据点
axs[1].scatter(x_val, y_val) # 展示验证集上的数据点
# 最后,我们将数据点加载到 Jittor 中,转化为 Jittor 可操作的 Var 类型。
# 将 NumPy 数组转化成 Var
x_train_var = jt.array(x_train)
y_train_var = jt.array(y_train)
x_val_var = jt.array(x_val)
y_val_var = jt.array(y_val)
# ## 2.2 定义模型
# 模型的定义:我们定义模型需要继承 Jittor 的 Module 类,并实现 \_\_init__ 函数和 execute 函数。
# * \_\_init__ 函数: 用于定义模型由哪些参数或操作组成;
# * execute 函数: 定义了模型执行的顺序和模型的返回值。
# +
"""
模型 1
"""
from jittor import Module
class FirstModel(Module):
def __init__(self):
super().__init__()
# 随机初始化参数 a 和 b
self.a = jt.rand(1)
self.b = jt.rand(1)
def execute(self, x):
# 模型通过输入的 x 值,进行与参数 a 和参数 b 的计算,得到预测的 y 值,并返回计算结果
y_pred = self.a + self.b * x
return y_pred
# -
# 接下来,用我们定义好的模型类,创建一个模型实例。
model = FirstModel()
# 现在,我们来瞧一眼这个实例模型,它初始化时随机的参数是多少:
print(model.state_dict())
# ## 2.3 定义损失函数
# 我们从 Jittor 的函数库里选择 MSE (均方误差)作为衡量 “模型输出值” 与 “正确标签” 差异大小的标准,即损失函数。
#
# (提示: Jittor 内置的损失函数和优化器都在 nn 类中。您只需导入 nn 类,即可轻松地使用这些函数。)
# +
# 导入 nn 类
from jittor import nn
# 设置损失函数
loss_function = nn.MSELoss()
# -
# ## 2.4 选择优化器
# 我们选择 Jittor 内置的 SGD (Stochastic Gradient Descent,随机梯度下降) 作为模型参数的优化器,并设置学习率 $learning\_rate = 0.1$。
#
# 注意:在创建优化器实例的时候,我们需要将模型参数传入,代表我们的优化器将对这些参数进行优化更新。
#
# (提示:模型的参数可通过 model.parameters() 获取。)
# +
# 设置学习率
learning_rate = 0.1
# 传入模型参数,创建优化器实例
optimizer = nn.SGD(model.parameters(), learning_rate)
# -
# ## 2.5 训练模型并验证
# 首先,我们完成模型训练的代码块:
def train(model, x_train, y_train, loss_function, optimizer):
model.train() # 开启训练模式
y_pred = model(x_train) # 将输入值 x_train 传入模型,计算得到输出值(即预测值) y_pred
loss = loss_function(y_train, y_pred) # 通过损失函数,计算真实值 y_train 和预测值 y_pred 的差异大小
optimizer.step(loss) # 优化器根据计算出来的损失函数值对模型参数进行优化、更新
return loss # 返回本次训练的 Loss 值,以便记录
# 随后,我们完成模型验证的代码块:
def val(model, x_val, y_val, loss_function):
model.eval() # 开启验证模式,不更新模型参数
y_pred = model(x_val) # 将输入值 x_val 传入模型,计算得到输出值(即预测值) y_pred
loss = loss_function(y_val, y_pred) # 通过损失函数,计算真实值 y_val 和预测值 y_pred 的差异大小
return loss # 返回本次验证的 Loss 值,以便记录
# 接下来,我们完成模型训练并验证的主代码块:
# 在执行前后,我们会分别打印出模型的参数,看我们的训练是否将模型参数训练成我们预期的 $a = 1$,$b = 2$。
# +
# 打印训练前的模型参数
print("Before training: \n", model.state_dict())
# 设置迭代次数(在这个案例中,一个纪元(Epoch)即是一个迭代(Iteration))
epochs = 500
# 初始化空列表,分别用于记录训练集和验证集上的 Loss 值
train_loss_list = list()
val_loss_list = list()
# 循环迭代训练
for epoch in range(epochs):
# 在训练集上进行训练,将更新模型参数。
train_loss = train(model, x_train_var, y_train_var, loss_function, optimizer)
train_loss_list.append(train_loss)
# 在验证集上进行验证,模型参数不做更新。
val_loss = val(model, x_val_var, y_val_var, loss_function)
val_loss_list.append(val_loss)
# 打印训练结束后的模型参数
print("After training: \n", model.state_dict())
# -
# 从上述打印结果,可以看到,经过我们的训练,模型参数由最初的随机值朝着正确的方向发生了改变。
# 在训练结束后,我们的模型参数已明显的接近于 $a = 1$,$b = 2$。
# 这说明,我们的模型训练是成功的!
# ## 2.6 附:可视化验证
# 最后,让我们利用可视化工具,验证一下实验结果吧。
# **Loss 在训练集和验证集上不同的下降趋势:**
# * Loss 值越大,代表通过模型计算出的预测值 y_pred 和真实值 y 的差距越大;
# * Loss 值越小,说明 y_pred 和 y 越来越接近,代表模型预测得越来越准确。
# 作图前 50 个 Epoch 中 Loss 的变化趋势
plt.plot(train_loss_list[:50],'r', label="Training Loss")
plt.plot(val_loss_list[:50],'g', label="Validation Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# **比较验证集上 “原始数据点” 和 “模型预测直线” :**
# +
# 利用训练好的模型,对验证集上的 x 进行计算,预测出 y_pred
y_pred = model(x_val_var)
# Matplotlib 作图。注意,需将计图的 Var 类型转化为 NumPy 数组
plt.scatter(x_val_var.numpy(), y_val_var.numpy(), label="Validation Data") # 原始数据点
plt.plot(x_val_var.numpy(), y_pred.numpy(), 'r', label="Model Prediction") # 模型预测结果
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.show()
# -
# # 📣
# 恭喜您!已成功完成了线性回归的任务!🎉🎉🎉
#
# 您可能觉得这个模型还太过简单、过于理论,无法应用到实际的神经网络训练中。
# 那么,请您继续最终章的挑战。
# 在终章中,我们会以上述模型为雏形,建立一个健全的神经网络,解决一个实际的分类问题。
|
python/jittor/notebook/60分钟快速入门Jittor/计图入门教程 2 --- 如何训练一个简单线性回归.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 1
# Import packages for dataframe manipulation and viewing all code output in-notebook.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Read Fremont Bike CSV file into Pandas DF
bike_data = pd.read_csv('Fremont_Bridge_Bicycle_Counter.csv')
# Inspect data
bike_data.head()
n = bike_data.shape[0]
# # Part 2
# Break up the datetime string into date, time, and AM/PM, then break up date and time further
date_split1 = bike_data['Date'].str.split(" ", expand = True)
date_split2 = date_split1[0].str.split("/", expand = True)
time_split = date_split1[1].str.split(":", expand = True)
# Pull year out of the date string, pull hour of day out of the time string, and keep AM/PM
bike_data['Year'] = date_split2[2]
bike_data['Hour'] = time_split[0]
bike_data['AM/PM'] = date_split1[2]
# Cast hour to an integer
bike_data['Hour'] = bike_data['Hour'].astype(int)
# Map hour of day to a 24-hour cycle - 12:00:00 AM is hour 0 and we count up until hour 23 at 11:00:00 PM.
for i in range(n):
if (bike_data.loc[i, 'Hour'] == 12):
bike_data.loc[i, 'Hour'] = 0
if (bike_data.loc[i, 'AM/PM'] == 'PM'):
bike_data.loc[i, 'Hour'] += 12
# Sanity check. Note that total bicycle count is already present.
bike_data.head()
# # Part 3
# Create dataframe for bike data where the year is 2016
bike_data_2016 = bike_data.copy()
bike_data_2016 = bike_data_2016[bike_data_2016['Year'] == '2016']
# Sanity check again
bike_data.head()
bike_data_2016.head()
# # Part 4
# Group by hour and apply sum() to get hourly counts for bicycle traffic
bike_counts_by_hour = bike_data.groupby('Hour').sum()
# Sanity check
bike_counts_by_hour
# Now plot the bicycle counts against the hour of day
bike_counts_by_hour['Fremont Bridge Total'].plot()
plt.ylabel('Total count of bicycles')
plt.title('Hourly bicycle count across Fremont Bridge since 2012')
plt.show()
# # Part 5
# Calculate average bike traffic per hour in the group by calculation now
# This should basically be the same as the counts divided by a constant
bike_counts_avgs = bike_data.groupby('Hour').mean()
# +
# Calculate and output argmax hour of day
max_avg = bike_counts_avgs['Fremont Bridge Total'].max()
for i in range(24):
if bike_counts_avgs.loc[i, "Fremont Bridge Total"] == max_avg:
max_hour = i
break
max_hour
# -
# Due to how I represented hour of day in the "Hour" column by Military time, hour 17 is 5:00 PM. That lines up nicely with evening rush hour traffic when folks are all commuting home from work.
|
analysis/Homework 1 analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="JPFeXztkPQfa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 88} outputId="1b193ad5-b7d9-443a-ff63-fb1d1009eef0" executionInfo={"status": "ok", "timestamp": 1583238669043, "user_tz": -60, "elapsed": 4585, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
# !pip install --upgrade tables
# + id="Of2BTcLZPeDh" colab_type="code" colab={}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + id="oL0-16KyPvcX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d06456dd-efaf-4352-89be-fa187c5f284c" executionInfo={"status": "ok", "timestamp": 1583238669288, "user_tz": -60, "elapsed": 4815, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
# cd "/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_2"
# + id="X2geqSGKQRyw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7285ba13-0a04-4f99-f1fb-34485d3b06fc" executionInfo={"status": "ok", "timestamp": 1583238672111, "user_tz": -60, "elapsed": 7628, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
df = pd.read_hdf('data/car.h5')
df.shape
# + id="1x_2fzdzQUaO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="cc10cef6-bb1d-49cf-af3d-ef5248a71eee" executionInfo={"status": "ok", "timestamp": 1583238672111, "user_tz": -60, "elapsed": 7617, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
df.columns.values
# + id="JZMo50bYQefm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="f52c248a-6150-4f42-9057-44df673cad21" executionInfo={"status": "ok", "timestamp": 1583238672112, "user_tz": -60, "elapsed": 7608, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
df['price_value'].hist(bins=100)
# + id="K6MACGkNQvGT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="70d55a3a-0c39-4021-9646-bbd8e90120c6" executionInfo={"status": "ok", "timestamp": 1583238672112, "user_tz": -60, "elapsed": 7598, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
df['price_value'].max()
# + id="iTVa5AEbQ1fd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="ffd335b4-b8c6-433a-f444-864e5c3b3b2d" executionInfo={"status": "ok", "timestamp": 1583238672113, "user_tz": -60, "elapsed": 7588, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
df['price_value'].describe()
# + id="vXqHhLexQ5-R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="97ffe956-d497-4850-bef4-459d1d430989" executionInfo={"status": "ok", "timestamp": 1583238672113, "user_tz": -60, "elapsed": 7578, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
df['param_marka-pojazdu'].unique()
# + id="zxThVNCnRQXe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 364} outputId="79a18065-e8cf-4118-a199-c5854a78a25d" executionInfo={"status": "ok", "timestamp": 1583238673642, "user_tz": -60, "elapsed": 9096, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
df.groupby('param_marka-pojazdu')['price_value'].agg(np.mean).plot(kind='bar')
# + id="nHrQpaCjR0ym" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 418} outputId="8b9c4703-faed-4b84-8e94-f432f9fa2e9d" executionInfo={"status": "ok", "timestamp": 1583238674156, "user_tz": -60, "elapsed": 9597, "user": {"displayName": "<NAME>0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg(np.mean)
.sort_values(ascending=False)
.head(50)
).plot(kind='bar', figsize = (15, 5))
# + id="CtGfIumQSVa6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 422} outputId="1acea97c-15c7-4c36-ae27-44420fdf2f8c" executionInfo={"status": "ok", "timestamp": 1583238674597, "user_tz": -60, "elapsed": 10026, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg(np.median)
.sort_values(ascending=False)
.head(50)
).plot(kind='bar', figsize = (15, 5))
# + id="I-lC35mtSqoJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 422} outputId="0c58a868-a06e-4e4c-8119-808ad779fec9" executionInfo={"status": "ok", "timestamp": 1583238675629, "user_tz": -60, "elapsed": 11047, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg([np.mean, np.median, np.size])
.sort_values(by='mean', ascending=False)
.head(50)
).plot(kind='bar', figsize = (15, 5))
# + id="Bn6dSKFJS6zM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 485} outputId="f2550e83-c689-406f-caf2-71b78949b221" executionInfo={"status": "ok", "timestamp": 1583238677152, "user_tz": -60, "elapsed": 12557, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg([np.mean, np.median, np.size])
.sort_values(by='mean', ascending=False)
.head(50)
).plot(kind='bar', figsize = (15, 5), subplots = True)
# + id="xPOF11VJTJxo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 485} outputId="b889b275-d72f-4fc2-e200-318251460323" executionInfo={"status": "ok", "timestamp": 1583238678870, "user_tz": -60, "elapsed": 14263, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg([np.mean, np.median, np.size])
.sort_values(by='size', ascending=False)
.head(50)
).plot(kind='bar', figsize = (15, 5), subplots = True)
# + id="q3cYIjM2TUjz" colab_type="code" colab={}
def group_and_barplot(feat_groupby, feat_agg='price_value', agg_funcs=[np.mean, np.median, np.size], feat_sort='mean', top=50, subplots=True):
return(
df
.groupby(feat_groupby)[feat_agg]
.agg(agg_funcs)
.sort_values(by=feat_sort, ascending=False)
.head(top)
).plot(kind='bar', figsize=(15, 5), subplots=subplots)
# + id="BvFQbNwZVHDa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 485} outputId="1ace7fad-e73b-4dfe-b4ad-776a1838d1f1" executionInfo={"status": "ok", "timestamp": 1583238800012, "user_tz": -60, "elapsed": 2453, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
group_and_barplot('param_marka-pojazdu')
# + id="RiDfgd8UYdlR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 417} outputId="600148d4-a359-42dc-cbef-faea817f8bde" executionInfo={"status": "ok", "timestamp": 1583238804610, "user_tz": -60, "elapsed": 2553, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
group_and_barplot('param_marka-pojazdu');
# + id="VncAVwSLYupD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 436} outputId="75739634-dda0-4f40-bf8a-d706588b33fa" executionInfo={"status": "ok", "timestamp": 1583238991013, "user_tz": -60, "elapsed": 1881, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
group_and_barplot('param_kraj-pochodzenia', feat_sort='size');
# + id="KEbcttVJZD4v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} outputId="a510d4ad-84c1-4951-b7f1-a67c6c510473" executionInfo={"status": "ok", "timestamp": 1583239086611, "user_tz": -60, "elapsed": 1728, "user": {"displayName": "<NAME>\u0142a", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggnav9sCJsBvoHjyZAE5-hdytji0L2Acp21HmMJMQ=s64", "userId": "03238446980367325604"}}
group_and_barplot('param_kolor', feat_sort='mean');
# + id="NxAM1l9TZyEr" colab_type="code" colab={}
|
day2_visualisation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
"""
def findMinDivisibleUpToX(x):
"""Returns minimum evenly divisible number for all numbers up to x."""
# This appears to be a patheticly weak solution, but it does work. Should probably think it over, though.
# A better way might be to sieve x and then multiply the required numbers, like this:
# 20, 10, 5, 4, 2; 19; 18, 9, 6, 3; 17; 16, 8; 15; 14, 7; 13; 12; 11; 1.
i = 1
divisors = range(1,x+1)
while(True):
evenlyDivisable = 1
for d in divisors:
if(i%d != 0):
evenlyDivisable = 0
break
if(evenlyDivisable):
return i
i = i+1
# -
findMinDivisibleUpToX(20)
|
Project_Euler-Problem_5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# metadata:
# interpreter:
# hash: 0e0d1c7ef0f381ce9c31735005e25185fd13b9c57d8e85878ff9ff982cb55e39
# name: Python 3.8.5 64-bit
# ---
# +
# import libraries
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from network import Network
from utils import device, get_all_preds, get_num_correct
# %matplotlib inline
# -
# load the model with least test loss
model = Network().to(device)
model.load_state_dict(torch.load('models/model-run(lr=0.01, batch_size=512).ckpt', map_location=device))
model # the network's architecture
# +
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1]
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# extract and transform the data
train_set = torchvision.datasets.FashionMNIST(
root='./data/',
train=True,
download=True,
transform=transform
)
test_set = torchvision.datasets.FashionMNIST(
root='./data/',
train=False,
download=True,
transform=transform
)
# -
# set model to eval mode as we won't ne needing any training now and turn off gradient tracking as well
model.eval()
with torch.no_grad():
train_loader = torch.utils.data.DataLoader(train_set, batch_size=6000)
train_preds = get_all_preds(model, train_loader)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1000)
test_preds = get_all_preds(model, test_loader)
print(train_preds.shape) # shape of the predicted train scores
print(test_preds.shape) # shape of the predicted test scores
# +
train_correct = get_num_correct(train_preds, train_set.targets)
test_correct = get_num_correct(test_preds, test_set.targets)
print('Train Correct: {:5}\tTrain Accuracy: {:5.2f}%'.format(train_correct, 100*train_correct/len(train_set)))
print('Test Correct: {:6}\tTest Accuracy: {:6.2f}%'.format(test_correct, 100*test_correct/len(test_set)))
# -
# stack the correct label and the predicted label side by side
train_stacked = torch.stack(
(train_set.targets, train_preds.argmax(dim=1)),
dim=1
) # shape [60000, 2]
test_stacked = torch.stack(
(test_set.targets, test_preds.argmax(dim=1)),
dim=1
) # shape [10000, 2]
# +
# obtain one batch of test set
images, labels = next(iter(test_loader))
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
# plot format: "predicted-label (true-label)"
fig = plt.figure(figsize=(25, 20))
for i in np.arange(100):
ax = fig.add_subplot(10, 10, i+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[i]), cmap='gray')
ax.set_title(f"{test_set.classes[test_stacked[i, 1]]} ({test_set.classes[test_stacked[i, 0]]})",
color=("green" if test_stacked[i, 1]==test_stacked[i, 0] else "red"))
plt.show()
fig.savefig('visualizations/test_results.png', bbox_inches='tight')
plt.close()
# +
# calculate the confusion matrix for train_set using train_stacked we calculated above
train_confmat = torch.zeros(10, 10, dtype=torch.int16)
for row in train_stacked:
cl, pl = row.tolist()
train_confmat[cl, pl] += 1
train_confmat
# +
# calculate the confusion matrix for test_set using test_stacked we calculated above
test_confmat = torch.zeros(10, 10, dtype=torch.int16)
for row in test_stacked:
cl, pl = row.tolist()
test_confmat[cl, pl] += 1
test_confmat
# -
# print the per-class train accuracy
for i in range(10):
print('Train accuracy of {:12s}:\t{:.2f}% ({}/{})'.format(
train_set.classes[i],
train_confmat[i, i]/60,
train_confmat[i, i],
6000))
# print the per-class test accuracy
for i in range(10):
print('Test accuracy of {:12s}:\t{:.2f}% ({}/{})'.format(
train_set.classes[i],
test_confmat[i, i]/10,
test_confmat[i, i],
1000))
# +
# plot confusion matrices
cm_train = pd.DataFrame(train_confmat.numpy(), index = train_set.classes, columns = train_set.classes)
cm_test = pd.DataFrame(test_confmat.numpy(), index = test_set.classes, columns = test_set.classes)
cm_fig = plt.figure(figsize=(100, 60))
ax = cm_fig.add_subplot(10, 12, 1)
ax = sns.heatmap(cm_train, annot=True, cmap="tab20c", fmt="d", annot_kws={"size": 10})
ax.set_title("Confusion Matrix (train_set)")
ax = cm_fig.add_subplot(10, 12, 2)
ax = sns.heatmap(cm_test, annot=True, cmap="tab20c", fmt="d", annot_kws={"size": 10})
ax.set_title("Confusion Matrix (test_set)")
plt.show()
cm_fig.savefig('visualizations/confusion_matrix.png', bbox_inches='tight')
plt.close()
|
results.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import h5py
import json
import sys
sys.path.append("C:/Users/qq651/OneDrive/Codes/")
sys.path.append("C:/Users/qq651/OneDrive/Codes/A2project")
import illustris_python as il
import matplotlib.pyplot as plt
from plotTools.plot import *
import illustrisAPI as iapi
# +
rs = np.array([0, 0.2, 0.5, 0.7, 1.0, 1.5, 2.0, 2.5, 3.0])
il1_snap = [135, 120, 108, 95, 85, 75, 68, 64, 60]
tng_snap = [99, 84, 67, 59, 50, 40, 33, 29, 25]
il1_barID = np.load('f:/Linux/localRUN/barredID_il1.npy')
il1_diskID = np.load('f:/Linux/localRUN/diskID_il1.npy')
tng_barID = np.load('f:/Linux/localRUN/barredID_4WP_TNG.npy')
tng_diskID = np.load('f:/Linux/localRUN/diskID_4WP.npy')
# -
tng_unbar = []
for i in tng_diskID:
if i not in tng_barID:
tng_unbar.append(i)
# +
M=1000 #g
L=100 #cm
T=1 #s
V=100 #cms-1
tng = np.load('f:/Linux/localRUN/tngSimuData.npy', allow_pickle=True).item()
h = tng['h']
def e_const(snap, simuData):
a = simuData['Redshifts'][snap, 2]
# t = simuData['Redshifts'][snap, 3] * 1e3 * 1e9 * 31556926 * T / h
return (1e10*1.989e30*M/h)*(a*3.086e+19*L/h)**2/((0.978*1e9*31556926*T/h)**2)
cons = {}
t = {}
for snap in tng_snap:
cons[snap] = e_const(snap, tng)
t[snap] = tng['Redshifts'][snap, 3] * 1e3 * 1e9 * 31556926 * T / h
# +
il1 = np.load('f:/Linux/localRUN/il1SimuData.npy', allow_pickle=True).item()
def e_const(snap, simuData):
h = simuData['h']
a = simuData['Redshifts'][snap, 2]
# t = simuData['Redshifts'][snap, 3] * 1e3 * 1e9 * 31556926 * T / h
return (1e10*1.989e30*M/h)*(a*3.086e+19*L/h)**2/((0.978*1e9*31556926*T/h)**2)
il1_cons = {}
il1_t = {}
for snap in il1_snap:
il1_cons[snap] = e_const(snap, il1)
il1_t[snap] = il1['Redshifts'][snap, 3] * 1e3 * 1e9 * 31556926 * T / h
# -
il1_cons, il1_t, cons, t
# +
def logMasSun(data):
if type(data) != type(np.array(0)):
data = np.array(data)
data = np.log10(data * 10 ** 10)
data[np.isinf(data)] = 0
return data
def logmass(data):
if type(data) != type(np.array(0)):
data = np.array(data)
data = np.log10(data)
data[np.isinf(data)] = 0
return data
# -
# #Next block is BH kinematic energy plot
# -------------------------------------------
# +
#get data
unbar_rawdata = []
bar_rawdata = []
for i in tng_snap:
unbar_rawdata.append([])
bar_rawdata.append([])
for subID in tng_diskID:
isdata = 1
prog = LoadMergHist('TNG', subID)[0]
tmp = []
last = 0
t_last = 0
for snap in tng_snap[::-1]:
try:
haloID = prog[snap]
f = h5py.File('f:/Linux/TNG_cutoff/bhs/snap_%d/cutout_%d.hdf5'%(snap, haloID), 'r')
engy = np.array(f['PartType5']['BH_CumEgyInjection_RM']).sum() * cons[snap] - last
last = np.array(f['PartType5']['BH_CumEgyInjection_RM']).sum() * cons[snap]
delta_t = t[snap] - t_last
t_last = t[snap]
except:
isdata = 0
break
tmp.append(engy / delta_t)
if isdata:
if subID in tng_barID:
for i in range(len(tmp)):
bar_rawdata[i].append(tmp[len(tmp)-1-i])
else:
for i in range(len(tmp)):
unbar_rawdata[i].append(tmp[len(tmp)-1-i])
#plot
bar_rawdata = logmass(bar_rawdata)
unbar_rawdata = logmass(unbar_rawdata)
ydata, err = Y_rawdata(bar_rawdata, len(tng_snap))
ydata2, err2 = Y_rawdata(unbar_rawdata, len(tng_snap))
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.set_xlabel('Z')
ax.set_ylabel(r'$log_{10}(\dot{E}_{BH,kinetic}[erg/s])$', fontsize=13)
# ax.set_yscale("log")
ax.set_xlim(-0.1, 2.1)
ax.set_ylim(36, 44)
ax.set_xticks(rs)
# ax.set_title("Galaxies(subhalo) dark matter mass")
#lines
ax.errorbar(rs, ydata, yerr=err, elinewidth=2, capthick=2, capsize=3, color='r', fmt='o',ms=5, ls='-', label='TNG-100 barred')
ax.errorbar(rs, ydata2, yerr=err2, elinewidth=2, capthick=2, capsize=3, color='orange', fmt='o',ms=5, ls='-', label='TNG-100 no bar')
ax.legend(loc = 0)
ax.set_xlim(-0.1, 2)
plt.savefig('f:/Linux/local_result/E_kinetic.pdf')
# +
sMass = il.func.loadSubhalos('TNG', 99, 'SubhaloMassType')[:, 4]
sMass = logmas(sMass)
def selec(left, right, data):
#return a mask
return (data > left) & (data < right)
b1 = tng_diskID[selec(10.4, 10.6, sMass[tng_diskID])]
b2 = tng_diskID[selec(10.6, 10.8, sMass[tng_diskID])]
b3 = tng_diskID[selec(10.8, 10.9, sMass[tng_diskID])]
# +
def Y_rawdata(data, snapnum):
plotdata = [[], [], []]
for i in range(snapnum):
d0, d1, d2 = ErrorBarMedian(data[i, :])
plotdata[0].append(d0)
plotdata[1].append(d1)
plotdata[2].append(d2)
plotdata = np.array(plotdata)
Err = np.vstack((plotdata[1,:] - plotdata[0,:], plotdata[2,:] - plotdata[1,:]))
return plotdata[1, :], Err
def getData(simu, snapList, fields, haloType='sub'):
raw = {}
for snap in snapList:
if haloType == 'fof':
tmp = il.func.loadhalos(simu, snap, fields)
else:
tmp = il.func.loadSubhalos(simu, snap, fields)
raw[snap] = tmp
return raw
# -
# #next block is BH particles feedback energy(thermal) plot
# -------------------------------------------
# +
#get data
unbar_rawdata = []
bar_rawdata = []
unbar_il1 = []
bar_il1 = []
for i in tng_snap:
unbar_rawdata.append([])
bar_rawdata.append([])
bar_il1.append([])
unbar_il1.append([])
for subID in tng_diskID:
isdata = 1
prog = LoadMergHist('TNG', subID)[0]
tmp = []
last = 0
t_last = 0
for snap in tng_snap[::-1]:
try:
haloID = prog[snap]
f = h5py.File('f:/Linux/TNG_cutoff/bhs/snap_%d/cutout_%d.hdf5'%(snap, haloID), 'r')
engy = np.array(f['PartType5']['BH_CumEgyInjection_QM']).sum() * cons[snap] - last
last = np.array(f['PartType5']['BH_CumEgyInjection_QM']).sum() * cons[snap]
delta_t = t[snap] - t_last
t_last = t[snap]
except:
isdata = 0
break
tmp.append(engy / delta_t)
if isdata:
if subID in tng_barID:
for i in range(len(tmp)):
bar_rawdata[i].append(tmp[len(tmp)-1-i])
else:
for i in range(len(tmp)):
unbar_rawdata[i].append(tmp[len(tmp)-1-i])
for subID in il1_diskID:
isdata = 1
prog = LoadMergHist('il1', subID)[0]
tmp = []
last = 0
t_last = 0
for snap in il1_snap[::-1]:
try:
haloID = prog[snap]
f = h5py.File('f:/Linux/il1_bh_cutoff/snap_%d/cutout_%d.hdf5'%(snap, haloID), 'r')
engy = np.array(f['PartType5']['BH_CumEgyInjection_QM']).sum() * il1_cons[snap] - last
last = np.array(f['PartType5']['BH_CumEgyInjection_QM']).sum() * il1_cons[snap]
delta_t = il1_t[snap] - t_last
t_last = il1_t[snap]
except:
isdata = 0
break
tmp.append(engy / delta_t)
if isdata:
if subID in il1_barID:
for i in range(len(tmp)):
bar_il1[i].append(tmp[len(tmp)-1-i])
else:
for i in range(len(tmp)):
unbar_il1[i].append(tmp[len(tmp)-1-i])
# -
def logmass(data):
if type(data) != type(np.array(0)):
data = np.array(data)
data = np.log10(data)
data[np.isinf(data)] = 0
data[np.isnan(data)] = 0
return data
# +
#plot data
bar_rawdata = logmass(bar_rawdata)
unbar_rawdata = logmass(unbar_rawdata)
bar_il1 = logmass(bar_il1)
unbar_il1 = logmass(unbar_il1)
ydata, err = Y_rawdata(bar_rawdata, len(tng_snap))
ydata2, err2 = Y_rawdata(unbar_rawdata, len(tng_snap))
ydata_il1, err_il1 = Y_rawdata(bar_il1, len(il1_snap))
ydata2_il1, err2_il1 = Y_rawdata(unbar_il1, len(il1_snap))
# +
#plot
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.set_xlabel('Z', fontsize=13)
ax.set_ylabel(r'$log_{10}(\dot{E}_{BH,thermal}[erg/s])$', fontsize=13)
# ax.set_yscale("log")
ax.set_xlim(-0.1, 3.1)
ax.set_ylim(39.6, 44)
ax.set_xticks(rs)
# ax.set_title("subhalo BH particle feedback thermal energy")
#lines
ax.errorbar(rs-0.02, ydata, yerr=err, elinewidth=2, capthick=2, capsize=3, color='r', fmt='o',ms=5, ls='-', label='TNG-100 barred')
ax.errorbar(rs-0.02, ydata2, yerr=err2, elinewidth=2, capthick=2, capsize=3, color='orange', fmt='o',ms=5, ls='-', label='TNG-100 no bar')
ax.errorbar(rs+0.02, ydata_il1, yerr=err_il1, elinewidth=2, capthick=2, capsize=3, color='blue', fmt='o',ms=5, ls='-', label='Illustris-1 barred')
ax.errorbar(rs+0.02, ydata2_il1, yerr=err2_il1, elinewidth=2, capthick=2, capsize=3, color='c', fmt='o',ms=5, ls='-', label='Illustris-1 no bar')
ax.legend(loc = 0)
# ax.set_xlim(-0.1, 2)
# plt.savefig('f:/Linux/local_result/BH/E_thermal.pdf')
# -
|
JpytrNb/pdfPlot/.ipynb_checkpoints/BH_Edot-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: torch
# language: python
# name: torch
# ---
# # Libraries
# 1. [filterpy](https://filterpy.readthedocs.io/en/latest/kalman/UnscentedKalmanFilter.html)
# +
import numpy as np
import pandas as pd
import random
from glob import glob
import os
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
from pathlib import Path
import plotly.express as px
import seaborn as sns
import geopy
import pymap3d as pm
from filterpy.kalman import UnscentedKalmanFilter, MerweScaledSigmaPoints
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import torchsummary
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import warnings
warnings.filterwarnings(action='ignore')
# -
# # Hyper Parameters
SEED = 1990
random.seed(SEED)
np.random.seed(SEED)
q = np.array([1,1,1,1,1])
r = np.array([1,1,1,1])
# # Useful functions
def calc_haversine(lat1, lon1, lat2, lon2):
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
a = np.sin(dlat / 2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0)**2
c = 2 * np.arcsin(a ** 0.5)
dist = 6_367_000 * c
return dist
def check_score(input_df: pd.DataFrame) -> pd.DataFrame:
output_df = input_df.copy()
output_df['meter'] = input_df.apply(
lambda r: calc_haversine(
r.latDeg, r.lngDeg, r.t_latDeg, r.t_lngDeg
),
axis=1
)
meter_score = output_df['meter'].mean()
scores = []
for phone in output_df['phone'].unique():
_index = output_df['phone']==phone
p_50 = np.percentile(output_df.loc[_index, 'meter'], 50)
p_95 = np.percentile(output_df.loc[_index, 'meter'], 95)
scores.append(p_50)
scores.append(p_95)
score = sum(scores) / len(scores)
return output_df, meter_score , score
# +
ell_wgs84 = pm.Ellipsoid()
def calc_geo2enu(df:pd.DataFrame)->pd.DataFrame:
output = df.copy()
llh = np.array(df[['latDeg', 'lngDeg', 'heightAboveWgs84EllipsoidM']])
denu = pm.geodetic2enu(llh[:,0], llh[:,1], llh[:,2], llh[0,0], llh[0,1], llh[0,2], ell=ell_wgs84)
output['x'] = denu[0]
output['y'] = denu[1]
output['z'] = denu[2]
return output
def calc_enu2geo(df:pd.DataFrame)->pd.DataFrame:
output = df.copy()
enu = np.array(df[['x', 'y', 'z']])
llh = np.array(df[['latDeg', 'lngDeg', 'heightAboveWgs84EllipsoidM']])
geo = pm.enu2geodetic(enu[:,0], enu[:,1], enu[:,2], llh[0,0], llh[0,1], llh[0,2], ell=ell_wgs84, deg = True)
output['latDeg'] = geo[0]
output['lngDeg'] = geo[1]
output['heightAboveWgs84EllipsoidM'] = geo[2]
return output
# -
# # Data
data_dir = Path("../input/google-smartphone-decimeter-challenge")
df_train = pd.read_pickle(str(data_dir / "gsdc_extract_train.pkl.gzip"))
df_test = pd.read_pickle(str(data_dir / "gsdc_extract_test.pkl.gzip"))
phones = df_train['phone'].unique()
phone = phones[random.randint(0, len(phones))]
df_sample = df_train[df_train['phone'] == phone].copy().reset_index().drop(columns = ['index'])
print(df_sample.shape)
df_sample.head()
for col in df_train.columns:
print(col)
# ## Model Define
# $$
# \begin{matrix}
# x_t =& x_{t-1} + \frac{v_{t-1}}{w_{t-1}}\left({\sin}\left({\omega}_{t-1}dt + {\theta}_{t-1}\right) - {\sin}\left({\theta}\right)\right)\\
# y_t =& y_{t-1} + \frac{v_{t-1}}{w_{t-1}}\left({\cos}\left({\theta}_{t-1}\right) - {\cos}\left({\omega}_{t-1}dt + {\theta}_{t-1}\right)\right)\\
# v_t =& v_{t-1}\\
# {\theta}_t =& {\theta}_{t-1} + {\omega}_{t-1}dt\\
# {\omega}_t =& {\omega}_{t-1}
# \end{matrix}
# $$
def fx(x, dt):
xout = np.zeros_like(x)
if abs(x[4]) > 1e-3:
xout[0] = x[0] + x[2]/x[4] * (np.sin(x[4] * dt + x[3]) - np.sin(x[3]))
xout[1] = x[1] + x[2]/x[4] * (np.cos(x[3]) - np.cos(x[4] * dt + x[3]))
xout[2] = x[2]
xout[3] = x[3] + x[4] * dt
xout[4] = x[4]
else:
xout[0] = x[0] + x[2] * dt * (np.cos(x[3]))
xout[1] = x[1] + x[2] * dt * (np.sin(x[3]))
xout[2] = x[2]
xout[3] = x[3] + x[4] * dt
xout[4] = x[4]
return xout
def batch_filter(df, q_, r_):
df1 = calc_geo2enu(df)
df1['yawRad'] = np.deg2rad(df1['yawDeg'])
features = ['x', 'y']
index = [0, 1]
rindex = [0, 1]
if df1['yawDeg'].isna().mean() == 1 or df1['yawDeg'].mean() == 0:
pass
else:
features += ['yawRad']
index += [3]
rindex+= [2]
if df1['UncalGyroZRadPerSec'].isna().mean() == 1 or df1['UncalGyroZRadPerSec'].mean() == 0:
pass
else:
features += ['UncalGyroZRadPerSec']
index += [4]
rindex+= [3]
q = q_
r = r_[rindex]
meas = df1[features]
meas = meas.fillna(0)
h = lambda x:x[index]
points = MerweScaledSigmaPoints(5, alpha = .1, beta = 2., kappa = -1)
kf = UnscentedKalmanFilter(dim_x = 5, dim_z = len(features), dt = 1, fx = fx, hx = h, points = points)
kf.Q = np.diag(q)
kf.R = np.diag(r)
mu, cov = kf.batch_filter(meas.values)
(xs, Ps, Ks) = kf.rts_smoother(mu, cov)
df2 = df1.copy()
df2['x'] = xs[:,0]
df2['y'] = xs[:,1]
df2['yawDeg'] = np.rad2deg(xs[:,2])
df2['UncalGyroZRadPerSec'] = xs[:,3]
df3 = calc_enu2geo(df2)
return df3
def evaluate(df, q_, r_, get_score = True):
output = df.copy()
mean_before, score_before, mean_after, score_after =0, 0, 0, 0
output.drop(columns = ['latDeg', 'lngDeg'], inplace = True)
df_list = []
for phone in tqdm(df['phone'].unique()):
df1 = df[df['phone'] == phone].copy()
df2 = batch_filter(df1, q_, r_)
df_list.append(df2[['phone', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']])
df3 = pd.concat(df_list)
output = output.merge(df3, on = ['phone', 'millisSinceGpsEpoch'])
if get_score:
_, mean_before, score_before = check_score(df)
_, mean_after, score_after = check_score(output)
return output, mean_before, score_before, mean_after, score_after
# +
q_ = np.array([0.01, 0.01, 0.05, 0.5*np.pi/180, 2.5*np.pi/180])**2
r_ = np.array([0.01, 0.01, 0.5*np.pi/180, 2.5*np.pi/180])**2
phones = df_train['phone'].unique()
sample_phone = np.random.choice(phones, 10, replace = False)
sample_index = df_train['phone'].apply(lambda x: x in sample_phone)
q_gain_ = 1
r_gain_ = 1
result_list = []
best_list = []
iter_count = 0
min_score = np.inf
rate = 0.1
count = 0
while(True):
q_gain = abs(q_gain)
r_gain = abs(r_gain)
iter_count += 1
q = q_gain * q_
r = r_gain * r_
_, mean_before, score_before, mean_after, score_after = evaluate(df_train[sample_index], q, r)
result = [q_gain, r_gain, q, r, mean_after, mean_before, score_after, score_before]
print(f"{q_gain.item():.4f}, {r_gain.item():.4f} : mean chagne - {mean_after - mean_before:.6f}, score change - {score_after - score_before:.6f}")
print(f"after[{mean_after}, {score_after}], before[{mean_before}, {score_before}]")
print(q_gain * q_)
print(r_gain * r_)
result_list.append(result)
if min_score > score_after:
print(f"Best Score! {score_after} - [{q_gain.item():.4f}, {r_gain.item():.4f}]")
best_list.append(result)
count= 0
q_gain_ = q_gain
r_gain_ = r_gain
min_score = score_after
else:
count+=1
q_gain = q_gain_
r_gain = r_gain_
q_gain += np.random.randn(1) * (rate * count)
r_gain += np.random.randn(1) * (rate * count)
# -
q_gain.item()
# +
q_gain_list = np.arange(0.1, 2, 0.1)
r_gain_list = np.arange(0.1, 2, 0.1)
q_ = np.array([0.01, 0.01, 0.1, 1*np.pi/180, 10*np.pi/180])**2
r_ = np.array([0.1, 0.1, 5 * np.pi/180, 20 * np.pi/180])**2
phones = df_train['phone'].unique()
result_list = []
iter_count = 0
for q_gain in q_gain_list:
for r_gain in r_gain_list:
iter_count += 1
q = q_gain * q_
r = r_gain * r_
_, mean_before, score_before, mean_after, score_after = evaluate(df_train, q, r)
result = [q, r, mean_after, mean_before, score_after, score_before]
print(f"{q_gain}, {r_gain} : mean chagne - {mean_after - mean_before:.6f}, score change - {score_after - score_before:.6f}")
print(f"after[{mean_after}, {score_after}], before[{mean_before}, {score_before}]")
result_list.append(result)
# -
df = pd.DataFrame(result_list, columns = ['q', 'r', 'mean_after', 'mean_before', 'score_after', 'score_before'])
df['qgain'] = df['q'].apply(lambda x: x.mean())
df['rgain'] = df['r'].apply(lambda x: x.mean())
df['change_score'] = df['score_after'] - df['score_before']
pt = df.pivot_table('change_score', 'qgain', 'rgain')
sns.heatmap(pt)
q = df.loc[df['change_score'].min() == df['change_score'], 'q'].values[0]
r = df.loc[df['change_score'].min() == df['change_score'], 'r'].values[0]
print(q, r)
_, mean_before, score_before, mean_after, score_after = evaluate(df_train, q, r)
print(mean_before, score_before, mean_after, score_after)
print(f"mean change : {mean_after - mean_before:.4f}")
print(f"score change: {score_after - score_before:.4f}")
submission = pd.read_csv("../input/google-smartphone-decimeter-challenge/sample_submission.csv")
submission = submission[['phone', 'millisSinceGpsEpoch']]
result, _, _, _, _ = evaluate(df_test, q, r, get_score=False)
result = result[['phone', 'millisSinceGpsEpoch', 'latDeg', 'lngDeg']]
submission = submission.merge(result, on = ['phone', 'millisSinceGpsEpoch'])
submission.to_csv(f"./models/{'ComplexKalmanFilter1'}/result-{4}result.csv", index = False)
|
Research/GoogleSmartPhone/code/.ipynb_checkpoints/GSDC4_ComplexKalmanFilter-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Dependencies
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import json
from tweet_utility_scripts import *
from transformers import TFDistilBertModel, DistilBertConfig
from tokenizers import BertWordPieceTokenizer
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.layers import Dense, Input, Dropout, GlobalAveragePooling1D, GlobalMaxPooling1D, Concatenate
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
# -
# # Load data
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
database_base_path = '/kaggle/input/tweet-dataset-split-distilbert-uncased-128/'
hold_out = pd.read_csv(database_base_path + 'hold-out.csv')
train = hold_out[hold_out['set'] == 'train']
validation = hold_out[hold_out['set'] == 'validation']
display(hold_out.head())
# Unzip files
# !tar -xvf /kaggle/input/tweet-dataset-split-distilbert-uncased-128/hold_out.tar.gz
base_data_path = 'hold_out/'
x_train = np.load(base_data_path + 'x_train.npy')
y_train = np.load(base_data_path + 'y_train.npy')
x_valid = np.load(base_data_path + 'x_valid.npy')
y_valid = np.load(base_data_path + 'y_valid.npy')
y_train = y_train.astype('float')
y_valid = y_valid.astype('float')
# Delete data dir
shutil.rmtree(base_data_path)
# -
# # Model parameters
# +
tokenizer_path = database_base_path + 'vocab.txt'
base_path = '/kaggle/input/qa-transformers/distilbert/'
model_path = 'model.h5'
config = {
"MAX_LEN": 128,
"BATCH_SIZE": 64,
"EPOCHS": 20,
"LEARNING_RATE": 1e-5,
"ES_PATIENCE": 3,
"question_size": 3,
"smooth_factor": .1,
"base_model_path": base_path + 'distilbert-base-uncased-distilled-squad-tf_model.h5',
"config_path": base_path + 'distilbert-base-uncased-distilled-squad-config.json'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
# + _kg_hide-input=true _kg_hide-output=true
def smooth_labels(y, smooth_factor=config['smooth_factor']):
y *= 1 - smooth_factor
y += smooth_factor / y.shape[0]
return y
np.apply_along_axis(smooth_labels, -1, y_train)
np.apply_along_axis(smooth_labels, -1, y_valid)
# -
# # Model
# +
module_config = DistilBertConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
token_type_ids = Input(shape=(MAX_LEN,), dtype=tf.int32, name='token_type_ids')
base_model = TFDistilBertModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
sequence_output = base_model({'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids})
last_state = sequence_output[0]
x = GlobalAveragePooling1D()(last_state)
y_start = Dense(MAX_LEN, activation='softmax', name='y_start')(x)
y_end = Dense(MAX_LEN, activation='softmax', name='y_end')(x)
model = Model(inputs=[input_ids, attention_mask, token_type_ids], outputs=[y_start, y_end])
model.compile(optimizers.Adam(lr=config['LEARNING_RATE']),
loss=losses.CategoricalCrossentropy(),
metrics=[metrics.CategoricalAccuracy()])
return model
model = model_fn(config['MAX_LEN'])
model.summary()
# -
# # Train
# + _kg_hide-input=false _kg_hide-output=true
tb_callback = TensorBoard(log_dir='./')
es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'],
restore_best_weights=True, verbose=1)
history = model.fit(list(x_train), list(y_train),
validation_data=(list(x_valid), list(y_valid)),
callbacks=[es, tb_callback],
epochs=config['EPOCHS'],
batch_size=config['BATCH_SIZE'],
verbose=2).history
model.save_weights(model_path)
# + _kg_hide-input=true _kg_hide-output=true
# Compress logs dir
# !tar -cvzf train.tar.gz train
# !tar -cvzf validation.tar.gz validation
# Delete logs dir
if os.path.exists('/kaggle/working/train/'):
shutil.rmtree('/kaggle/working/train/')
if os.path.exists('/kaggle/working/validation/'):
shutil.rmtree('/kaggle/working/validation/')
# -
# # Model loss graph
# + _kg_hide-input=true
sns.set(style="whitegrid")
plot_metrics(history, metric_list=['loss', 'y_start_loss', 'y_end_loss',
'y_start_categorical_accuracy', 'y_end_categorical_accuracy'])
# -
# # Tokenizer
# + _kg_hide-output=true
tokenizer = BertWordPieceTokenizer(tokenizer_path , lowercase=True)
tokenizer.save('./')
# -
# # Model evaluation
# + _kg_hide-input=true
train_preds = model.predict(list(x_train))
valid_preds = model.predict(list(x_valid))
train['start'] = train_preds[0].argmax(axis=-1)
train['end'] = train_preds[1].argmax(axis=-1)
train["end"].clip(0, train["text_len"], inplace=True)
train["start"].clip(0, train["end"], inplace=True)
train['prediction'] = train.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
train["prediction"].fillna('', inplace=True)
validation['start'] = valid_preds[0].argmax(axis=-1)
validation['end'] = valid_preds[1].argmax(axis=-1)
validation["end"].clip(0, validation["text_len"], inplace=True)
validation["start"].clip(0, validation["end"], inplace=True)
validation['prediction'] = validation.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
validation["prediction"].fillna('', inplace=True)
display(evaluate_model(train, validation))
# -
# # Visualize predictions
# + _kg_hide-input=true
print('Train set')
display(train.head(10))
print('Validation set')
display(validation.head(10))
|
Model backlog/Train/23-tweet-train-distilbert-base-lbl-smoothing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from neuron.spiking import HH
def simple_plot(model):
plt.figure(figsize=(15,5))
plt.plot(model.T, model.V, label='Membrane potential')
plt.plot(model.T, model.I, label='Current')
plt.title(model.name); plt.xlabel('Time')
plt.legend(); plt.grid(ls=':'); plt.show()
# +
# 1 - Constant current.
model = HH(i0=0.5)
model.run(400)
simple_plot(model)
# +
# 2 - Variable current.
model = HH()
for _ in range(400):
model.i = np.sin(model.t)*0.01
model.run_step()
simple_plot(model)
|
examples/model_HH.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
import requests
while True:
city = input("请输入城市,按回车键:\n")
if not city:
break
try:
req = requests.get("http://wthrcdn.etouch.cn/weather_mini?city=%s" % city)
except:
print("查询失败")
break
dic_city = req.json()
# print(dic_city)
city_data = dic_city.get("data")
if city_data:
city_forecast = city_data["forecast"][0]
print(city_forecast.get('date'))
print(city_forecast.get('high'))
print(city_forecast.get('low'))
print(city_forecast.get('type'))
else:
print("未获得")
|
get_weather.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Control Tesla Cars
#
# Test setting temp based on local weather.
#
# driver_temp=
#
# passenger_temp=
#
# This is a delivery system. replace buses in cities with electrical cars. These cars run the same route as buses.
#
#
# Going to change the radio
#
# More indepth Tesla car stats. Returns the Raspberry Pi media system onboard that is used to stream media to the car. Pluhs in to the cars stero. Allows passangers in the bar to reclievve calls. Broadcast wifi. Each car has its own router that it uses to comm with the outside world. Canera t oo report how everything is goiing. If things are wrong realtime images of what is happening. The ability to for remote login and control. HAM Radio broadcast. Each car is broadcasting a media center that offers storage space and media. You are able to connect to any of these Tesla cars and access the services they offer. If for some reason they are not to total standards then you can retry.
#
# Nux the
#
# Switch between low and high heat.
#
# passanger temp
#
# config file with times to auto start the car. driverless commands. sync to meetup and drive to where the meetups are happening
#
#
# {
# "response": {
# "inside_temp": 17.0, // degC inside car
# "outside_temp": 9.5, // degC outside car or null
# "driver_temp_setting": 22.6, // degC of driver temperature setpoint
# "passenger_temp_setting": 22.6, // degC of passenger temperature setpoint
# "is_auto_conditioning_on": false, // apparently even if on
# "is_front_defroster_on": null, // null or boolean as integer?
# "is_rear_defroster_on": false,
# "fan_status": 0 // fan speed 0-6 or null
# }
# }
import json
#import nose
from cryptography.fernet import Fernet
import getpass
import pandas
myusr = getpass.getuser()
tespas = getpass.getpass('PASSWORD ')
mydict = ({ "response": { "df": False, "dr": False, "pf": False, "pr": False, "ft": False, "rt": False, "car_verson": "1.19.42", "locked": True, "sun_roof_installed": False, "sun_roof_state": "unknown", "sun_roof_percent_open": 0, "dark_rims": False, "wheel_type": "Base19", "has_spoiler": False, "roof_color": "Colored", "perf_config": "Base" } })
tespas
pandas.DataFrame(mydict)
# +
key = Fernet.generate_key()
f = Fernet(key)
token = f.encrypt(b'straz')
print(token)
'...'
f.decrypt(token)
# -
key
tesveh = 'https://owner-api.teslamotors.com/api/1/vehicles/1/command/remote_start_drive?password='
tesveh
# +
#from urllib2 import Request, urlopen
#headers = {
# 'Authorization': 'Bearer {access_token}'
#}
#request = Request('https://owner-api.teslamotors.com/api/1/vehicles/1/command/remote_start_drive?password=<PASSWORD>', headers=headers)
#response_body = urlopen(request).read()
#print response_body
# -
doorquen = (mydict['response']['df'], mydict['response']['dr'], mydict['response']['pf'], mydict['response']['pr'])
doorquen
key
keyencr = f.encrypt(key)
keyencr
f.decrypt(token)
TESLACONFIG = ('TESLA CONTROL BRUM BRUM')
print(TESLACONFIG)
# +
passwrdz = getpass.getpass("PASSWORD ENTER ")
askmileorkilo = input('AMERICAN SETTINGS Y/n ')
setlowtemp = input('DRIVER LOW TEMP SET AS: ')
sethightemp = input('DRIVER HIGH TEMP SET AS: ')
setpasslow = input('PASS LOW TEMP SET AS: ')
setpashight = input('PASS HIGH TEMP SET AS: ')
openDriversDoor = input('OPEN DRIVERS DOOR Y/n ')
unlockcar = input('UNLOCK CAR Y/n ')
valetmode = input('VALET MODE Y/n ')
valetpass = getpass.getpass('4 PIN NUMBER: ')
# -
passwrdz
valint = int(valetpass)
SETCURRENTZ = input('Current from -1 to 1: ')
flsecur = float(SETCURRENTZ)
flsecur
int(SETCURRENTZ)
import cryptography
# +
#fercypo = cryptography.fernet()
# -
# {
# "portal_url": "https://owner-api.teslamotors.com/api/1/vehicles/",
# "stream_url": "https://streaming.vn.teslamotors.com/stream/",
# "username": "<EMAIL>",
# "password": "<PASSWORD>",
# "output_file": "stream_output.txt"
# }
f.generate_key()
token = f.encrypt(bytes(valint))
print(token)
f.decrypt(token)
int.from_bytes((f.decrypt(token)), byteorder='big')
byedecrpt = (f.decrypt(token))
int.from_bytes((byedecrpt), byteorder='big')
if 'n' in valetmode:
valeton = False
elif 'Y' in valetmode:
valeton = True
if 'n' in askmileorkilo:
miletru = False
elif 'Y' in askmileorkilo:
miletru = True
askmileorkilo
miletru
if miletru == True:
guidisuni = "mi/hr"
elif miletru == False:
guidisuni = "km/hr"
guidisuni
if 'n' in unlockcar:
opddor = False
elif 'Y' in unlockcar:
opddor = True
opddor
# +
if 'n' in openDriversDoor:
opddor = False
elif 'Y' in openDriversDoor:
opddor = True
# -
opddor
import configparser
dfstat = (mydict['response']['df'])
pfstat = (mydict['response']['pf'])
prstat = (mydict['response']['pr'])
drstat = (mydict['response']['dr'])
drstat
prstat
dfstat
pfstat
import arrow
timnow = arrow.now()
print(timnow.datetime)
timnow.for_json()
timnow.isoweekday()
timnow.humanize()
timnow.isoweekday()
timnow.isocalendar()
timnow.ceil
timnow.clone()
timnow.date()
timnow.format()
timnow.floor
timnow.float_timestamp
timz = timnow.for_json()
timz
print(timz)
timz.upper()
timz.split('-' and ':' and '.')
print(timnow)
timutc = timnow.utcnow()
timnow.weekday()
timnow.dst()
timnow.ctime()
arrow.util.total_seconds
arrow.api.factory
artz = arrow.factory.tzinfo(timutc)
calcsee= arrow.locales.calendar.HTMLCalendar()
calcsee.formatmonth(2016, 5)
arnow = arrow.now()
dayweekz = arnow.weekday()
dayweekz
calcsee.getfirstweekday()
calcsee.getfirstweekday()
arnow.strftime('%M')
arnow.strftime('%m')
arnow.strftime('%Y %m')
yrints = int(arnow.strftime('%Y'))
yrints
mondayfor = int(arnow.strftime('%m'))
mondayfor
daydatye = int(arnow.strftime('%d'))
daydatye
calcsee.formatday(2016, 1)
panread = pandas.read_html(calcsee.formatmonth(yrints, mondayfor))
panread[0]
calcsee.formatmonth(yrints, mondayfor)
import os
import random
calcday = calcsee.cssclasses
calcday
calcfulday = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
len(calcfulday)
random.choice(calcfulday)
dayweekz
calcfulday[dayweekz]
print(calcday[dayweekz])
random.choice(calcday)
calcsee.firstweekday
calcsee.formatday
calcsee.formatmonth
calcsee.
passwrdz
f.encrypt
token = f.encrypt(passwrdz)
import hashlib
print(hashlib.algorithms_available)
print(hashlib.algorithms_guaranteed)
#mystring = input('Enter String to hash: ')
# Assumes the default UTF-8
hash_object = hashlib.sha512(passwrdz.encode())
print(hash_object.hexdigest())
print(hash_object.name)
print(hash_object.block_size)
import subprocess
tempchec = subprocess.check_output(["/opt/vc/bin/vcgencmd", "measure_temp"])
print(float(tempchec.split(‘=’)[1][:-3]))
tempchec
subprocess.check_output(o)
s = subprocess.check_output([“/opt/vc/bin/vcgencmd”,”measure_temp”])
print(float(s.split(‘=’)[1][:-3]))
import socket
socket.gethostbyname(socket.gethostname())
socket.gethostname()
# +
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 0)) # connecting to a UDP address doesn't send packets
local_ip_address = s.getsockname()[0]
# -
local_ip_address
s.getsockname()
socket.gethostbyaddr(socket.gethostname())
socket.getaddrinfo
subup = subprocess.check_output(['uptime'])
loaspli = subup.split('load average: ')
'''
import os
import subprocess
BAT_PATH = "/proc/acpi/battery/BAT%d"
def get_full_charge(batt_path):
"""Get the max capacity of the battery
:param batt_path: The dir path to the battery (acpi) processes
:type batt_path: string
:returns: The max capacity of the battery
:rtype: int
"""
p1 = subprocess.Popen(["grep",
"last full capacity",
batt_path + "/info"],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(["awk",
"{print $4}"],
stdin=p1.stdout,
stdout=subprocess.PIPE)
p1.stdout.close()
return int(p2.communicate()[0])
def get_current_charge(batt_path):
"""Get the current capacity of the battery
:param batt_path: The dir path to the battery (acpi) processes
:type batt_path: string
:returns: The current capacity of the battery
:rtype: int
"""
p1 = subprocess.Popen(["grep",
"remaining capacity",
batt_path + "/state"],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(["awk",
"{print $3}"],
stdin=p1.stdout,
stdout=subprocess.PIPE)
p1.stdout.close()
return int(p2.communicate()[0])
def guess_battery_path():
"""Gets the path of the battery (BAT0, BAT1...)
:returns: The path to the battery acpi process information
:rtype: string
"""
i = 0
while True:
if os.path.exists(BAT_PATH % i):
return BAT_PATH % i
i += 1
def is_plugged(batt_path):
"""Returns a flag saying if the battery is plugged in or not
:param batt_path: The dir path to the battery (acpi) processes
:type batt_path: string
:returns: A flag, true is plugged, false unplugged
:rtype: bool
"""
p = subprocess.Popen(["grep",
"charging state",
batt_path + "/state"],
stdout=subprocess.PIPE)
return "discharging" not in p.communicate()[0]
def get_battery_percent(batt_path):
"""Calculates the percent of the battery based on the different data of
the battery processes
:param batt_path: The dir path to the battery (acpi) processes
:type batt_path: string
:returns: The percent translation of the battery total and current capacity
:rtype: int
"""
return get_current_charge(batt_path) * 100 / get_full_charge(batt_path)
def main():
path = guess_battery_path()
print("Current battery percent: %d" % get_battery_percent(path))
print("Plugged in" if is_plugged(path) else "Not plugged in")
if __name__ == "__main__":
main()
'''
s = subprocess.check_output([“uptime”])
load_split = s.split(‘load average: ‘)
load_five = float(load_split[1].split(‘,’)[1])
up = load_split[0]
up_pos = up.rfind(‘,’,0,len(up)-4)
up = up[:up_pos].split(‘up ‘)[1]
return ( up , load_five )
'''
import subprocess
import os
def get_ram():
“Returns a tuple (total ram, available ram) in megabytes. See www.linuxatemyram.com”
try:
s = subprocess.check_output([“free”,”-m”])
lines = s.split(‘\n’)
return ( int(lines[1].split()[1]), int(lines[2].split()[3]) )
except:
return 0def get_process_count():
“Returns the number of processes”
try:
s = subprocess.check_output([“ps”,”-e”])
return len(s.split(‘\n’))
except:
return 0def get_up_stats():
“Returns a tuple (uptime, 5 min load average)”
try:
s = subprocess.check_output([“uptime”])
load_split = s.split(‘load average: ‘)
load_five = float(load_split[1].split(‘,’)[1])
up = load_split[0]
up_pos = up.rfind(‘,’,0,len(up)-4)
up = up[:up_pos].split(‘up ‘)[1]
return ( up , load_five )
except:
return ( ” , 0 )def get_connections():
“Returns the number of network connections”
try:
s = subprocess.check_output([“netstat”,”-tun”])
return len([x for x in s.split() if x == ‘ESTABLISHED’])
except:
return 0def get_temperature():
“Returns the temperature in degrees C”
try:
s = subprocess.check_output([“/opt/vc/bin/vcgencmd”,”measure_temp”])
return float(s.split(‘=’)[1][:-3])
except:
return 0def get_ipaddress():
“Returns the current IP address”
arg=’ip route list’
p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
data = p.communicate()
split_data = data[0].split()
ipaddr = split_data[split_data.index(‘src’)+1]
return ipaddr
def get_cpu_speed():
“Returns the current CPU speed”
f = os.popen(‘/opt/vc/bin/vcgencmd get_config arm_freq’)
cpu = f.read()
return cpu
print ‘Free RAM: ‘+str(get_ram()[1])+’ (‘+str(get_ram()[0])+’)’
print ‘Nr. of processes: ‘+str(get_process_count())
print ‘Up time: ‘+get_up_stats()[0]
print ‘Nr. of connections: ‘+str(get_connections())
print ‘Temperature in C: ‘ +str(get_temperature())
print ‘IP-address: ‘+get_ipaddress()
print ‘CPU speed: ‘+str(get_cpu_speed())
'''
# +
#import ConfigParser
config = configparser.RawConfigParser()
# When adding sections or items, add them in the reverse order of
# how you want them to be displayed in the actual file.
# In addition, please note that using RawConfigParser's and the raw
# mode of ConfigParser's respective set functions, you can assign
# non-string values to keys internally, but will receive an error
# when attempting to write to a file or when you get it in non-raw
# mode. SafeConfigParser does not allow such assignments to take place.
config.add_section('userinfo')
config.set('userinfo', 'username', myusr)
config.set('userinfo', 'password', (hash_object.hexdigest()))
config.add_section('tempsetting')
config.set('tempsetting', 'drivertemphigh', sethightemp)
config.set('tempsetting', 'drivertemplow', setlowtemp)
config.set('tempsetting', 'passtemphigh', sethightemp)
config.set('tempsetting', 'passtemplow', setlowtemp)
config.add_section('dooropenstatus')
#config.set('doorstatus', 'drifronop', )
config.set('dooropenstatus', 'driversfront', dfstat)
config.set('dooropenstatus', 'passfront', pfstat)
config.set('dooropenstatus', 'passrear', prstat)
config.set('dooropenstatus', 'driversrear', drstat)
config.add_section('doorlockstatus')
config.set('doorlockstatus', 'driversfont', opddor)
config.set('doorlockstatus', 'passfront', opddor)
config.set('doorlockstatus', 'passrear', opddor)
config.set('doorlockstatus', 'driverrear', opddor)
config.add_section('chargestatus')
config.set('chargestatus', 'charging', 'complete')
config.set('chargestatus', "battcurrent", flsecur)
config.set('chargestatus', 'chargvolt', 2.5)
config.set('chargestatus', 'timecharge', 60)
#config.set('charges')
#Set Valet Mode
config.add_section('valetmode')
config.set('valetmode', 'valeton', valeton)
config.set('valetmode', 'timenow', (timnow))
config.set('valetmode', 'traveltime', 40)
config.set('valetmode', 'dayname', calcfulday[dayweekz])
config.set('valetmode', 'timehuman', timnow.humanize())
#config.set('templow', 'an_int', setlowtemp)
#config.set('drivefront', 'an_int', '3')
#config.set('passfront', 'baz', 'fun')
#config.set('passback', 'bar', 'Python')
#config.set('driveback', 'foo', '%(bar)s is %(baz)s!')
# Writing our configuration file to 'example.cfg'
with open('tesla.cfg', 'w') as configfile:
config.write(configfile)
# -
crea = config.read
with open('tesla.cfg', 'r') as configfile:
print(config.read(configfile))
# +
temphi = 32
templo = 17
tempstr = ("{'temphi' : " + str(temphi) + '}')
# -
templow = ("{'templow' : " + str(templo) + '}')
templow
#
# {
# "response": {
# "gui_distance_units": "mi/hr",
# "gui_temperature_units": "F",
# "gui_charge_rate_units": "mi/hr",
# "gui_24_hour_time": false,
# "gui_range_display": "Rated"
# }
# }tempstr
#
#
# { "response": { "charging_state": "Complete", // "Charging", ?? "charge_to_max_range": false, // current std/max-range setting "max_range_charge_counter": 0, "fast_charger_present": false, // connected to Supercharger? "battery_range": 239.02, // rated miles "est_battery_range": 155.79, // range estimated from recent driving "ideal_battery_range": 275.09, // ideal miles "battery_level": 91, // integer charge percentage "battery_current": -0.6, // current flowing into battery "charge_starting_range": null, "charge_starting_soc": null, "charger_voltage": 0, // only has value while charging "charger_pilot_current": 40, // max current allowed by charger & adapter "charger_actual_current": 0, // current actually being drawn "charger_power": 0, // kW (rounded down) of charger "time_to_full_charge": null, // valid only while charging "charge_rate": -1.0, // float mi/hr charging or -1 if not charging "charge_port_door_open": true } }
# https://owner-api.teslamotors.com/api/1/vehicles/1/command/set_temps?driver_temp=23.7&passenger_temp=18.1
#
# Returns the current temperature and climate control state.
#
# var TEMP_HI = 32;
# var TEMP_LO = 17;
# function set_temperature( params, cb ) {
# var dtemp = params.dtemp;
# var ptemp = params.ptemp;
# var vid = params.id;
# var error = false;
#
# //var temp_str = "";
# if ( dtemp !== undefined && dtemp <= TEMP_HI && dtemp >= TEMP_LO) {
# //temp_str = 'driver_temp=' + dtemp; // change from string to JSON form data
# } else {
# error = true;
# }
# // if no passenger temp is passed, the driver temp is also used as the passenger temp
# if ( ptemp !== undefined && ptemp <= TEMP_HI && ptemp >= TEMP_LO) {
# //temp_str = temp_str +'&passenger_temp=' + ptemp; // change from string to JSON form data
# } else if ( ptemp === undefined ) {
# ptemp = dtemp;
# } else {
# error = true;
# }
# if (!error) {
# request( {
# method: 'POST',
# url: portal + '/vehicles/' + vid + '/command/set_temps',
# gzip: true,
# headers: http_header,
# form: {
# "driver_temp" : dtemp.toString(),
# "passenger_temp" : ptemp.toString(),
# }
# }, function (error, response, body) {
# if ((!!error) || (response.statusCode !== 200)) return report(error, response, body, cb);
# try {
# var data = JSON.parse(body);
# if (typeof cb == 'function') return cb( data.response );
# else return true;
# } catch (err) {
# return report2('set_temps', body, cb);
# }
# });
# } else {
# if (typeof cb == 'function') return cb( new Error('Invalid temperature setting (' + dtemp + 'C), Passenger (' + ptemp + 'C)'));
# else return false;
# }
#
# }
# exports.set_temperature = set_temperature;
# exports.TEMP_HI = TEMP_HI;
# exports.TEMP_LO = TEMP_LO;
#
# {
# "response": {
# "df": false, // driver's side front door open
# "dr": false, // driver's side rear door open
# "pf": false, // passenger's side front door open
# "pr": false, // passenger's side rear door open
# "ft": false, // front trunk is open
# "rt": false, // rear trunk is open
# "car_verson": "1.19.42", // car firmware version
# "locked": true, // car is locked
# "sun_roof_installed": false, // panoramic roof is installed
# "sun_roof_state": "unknown",
# "sun_roof_percent_open": 0, // null if not installed
# "dark_rims": false, // gray rims installed
# "wheel_type": "Base19", // wheel type installed
# "has_spoiler": false, // spoiler is installed
# "roof_color": "Colored", // "None" for panoramic roof
# "perf_config": "Base"
# }
# }
#
# {
# "response": {
# "charging_state": "Complete", // "Charging", ??
# "charge_to_max_range": false, // current std/max-range setting
# "max_range_charge_counter": 0,
# "fast_charger_present": false, // connected to Supercharger?
# "battery_range": 239.02, // rated miles
# "est_battery_range": 155.79, // range estimated from recent driving
# "ideal_battery_range": 275.09, // ideal miles
# "battery_level": 91, // integer charge percentage
# "battery_current": -0.6, // current flowing into battery
# "charge_starting_range": null,
# "charge_starting_soc": null,
# "charger_voltage": 0, // only has value while charging
# "charger_pilot_current": 40, // max current allowed by charger & adapter
# "charger_actual_current": 0, // current actually being drawn
# "charger_power": 0, // kW (rounded down) of charger
# "time_to_full_charge": null, // valid only while charging
# "charge_rate": -1.0, // float mi/hr charging or -1 if not charging
# "charge_port_door_open": true
# }
# }
#
# More car info
#
# What music is being played.
#
# Is my car unlocked? Unlock when device within certain distance.
#
# Checks that all the doors are shut
#
# https://owner-api.teslamotors.com/api/1/vehicles/vehicle_id/command/door_unlock
#
# https://owner-api.teslamotors.com/api/1/vehicles/vehicle_id/command/door_lock
for doorq in doorquen:
if doorq == True:
print('Warning Door Open')
for doorq in doorquen:
#print(doorq)
if doorq == False:
#for inz in range():
print('Warning Door Close')
mydict['response']['dr']
mydict['response']['pf']
mydict['response']['pr']
# +
def testdoorlock():
assert mydict['response']['locked'] == 'True'
def testdoorunlock():
assert mydict['response']['locked'] == 'False'
# -
mydict['response']['locked']
mydict['response']['df']
mydict['response']['dr']
mydict['response']['pf']
mydict['response']['pr']
vechj = open('/home/wcm/git/vech.json', 'r')
rdvechj = vechj.read()
rdvechj
json.loads(rdvechj)
|
posts/testtesla.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Links zu Dokumentationen/Tutorials für IPython/Python/numpy/matplotlib/git sowie die Sourcodes findet ihr im [GitHub Repo](https://github.com/BerndSchwarzenbacher/numdiff).
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# ##Implizit Two Step Max Order
max_order1 = np.loadtxt('data/ex16_impl_max_order_1.out')
time = max_order1[:,0]
V1 = max_order1[:,1]
V2 = max_order1[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
plt.plot(time[:100], V1[:100])
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
max_ordere1 = np.loadtxt('data/ex16_impl_max_order_e1.out')
time = max_ordere1[:,0]
V1 = max_ordere1[:,1]
V2 = max_ordere1[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
max_ordere1 = np.loadtxt('data/ex16_impl_max_order_e2.out')
time = max_ordere1[:,0]
V1 = max_ordere1[:,1]
V2 = max_ordere1[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
max_ordere3 = np.loadtxt('data/ex16_impl_max_order_e3.out')
time = max_ordere3[:,0]
V1 = max_ordere3[:,1]
V2 = max_ordere3[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
plt.plot(time[:100000], V1[:100000])
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
# ##Implizit Euler
impl_eul1 = np.loadtxt('../ueb3/data/ex14_impl_eul_1.out')
time = impl_eul1[:,0]
V1 = impl_eul1[:,1]
V2 = impl_eul1[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
plt.plot(time[:100], V1[:100])
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
impl_eul_e1 = np.loadtxt('../ueb3/data/ex14_impl_eul_e-1.out')
time = impl_eul_e1[:,0]
V1 = impl_eul_e1[:,1]
V2 = impl_eul_e1[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
plt.plot(time[:1000], V1[:1000])
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
impl_eul_e2 = np.loadtxt('../ueb3/data/ex14_impl_eul_e-2.out')
time = impl_eul_e2[:,0]
V1 = impl_eul_e2[:,1]
V2 = impl_eul_e2[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
plt.plot(time[:10000], V1[:10000])
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
impl_eul_e3 = np.loadtxt('../ueb3/data/ex14_impl_eul_e-3.out')
time = impl_eul_e3[:,0]
V1 = impl_eul_e3[:,1]
V2 = impl_eul_e3[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
plt.plot(time[:100000], V1[:100000])
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
# ## Implizit MP
impl_mp_1 = np.loadtxt('../ueb3/data/ex14_impl_mp_1.out')
time = impl_mp_1[:,0]
V1 = impl_mp_1[:,1]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
impl_mp_e1 = np.loadtxt('../ueb3/data/ex14_impl_mp_e-1.out')
time = impl_mp_e1[:,0]
V1 = impl_mp_e1[:,1]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
impl_mp_e2 = np.loadtxt('../ueb3/data/ex14_impl_mp_e-2.out')
time = impl_mp_e2[:,0]
V1 = impl_mp_e2[:,1]
V2 = impl_mp_e2[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
impl_mp_e3 = np.loadtxt('../ueb3/data/ex14_impl_mp_e-3.out')
time = impl_mp_e3[:,0]
V1 = impl_mp_e3[:,1]
V2 = impl_mp_e3[:,2]
plt.plot(time, V1)
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
plt.plot(time[:100000], V1[:100000])
plt.ylabel(r'$V_{1}(t)$')
plt.xlabel(r'$t$')
plt.grid()
|
ueb4/ex16.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys, os, time
import numpy as np
from tqdm import tqdm
import csv
path = '../../../Downloads/aristo-tuple-kb-v5-mar2017/aristo-tuple-kb-v5-mar2017/COMBINED-KB.tsv'
wpath = '../../../Downloads/aristo-tuple-kb-v5-mar2017/aristo-tuple-kb-v5-mar2017/aristo.txt'
# wpath = '../../../Downloads/aristo-tuple-kb-v5-mar2017/aristo.txt'
# wpath = '../../../Downloads/aristo-tuple-kb-v5-mar2017/aristo_sentences.txt'
# path = '../../../Downloads/aristo-tuple-kb-v5-mar2017/COMBINED-KB.tsv'
with open(path, 'r') as tsvfile:
with open(wpath, 'w') as writefile:
reader = csv.reader(tsvfile, delimiter='\t')
for i, row in tqdm(enumerate(reader)):
# print('\t'.join(row[2:5])+'\n')
# break
if i > 0:
#writefile.write(row[5]+'\n')
writefile.write('\t'.join(row[2:5])+'\n')
|
reformat_aristo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
import gluonnlp as nlp
import mxnet as mx
import pandas as pd
from gluonnlp.data import SentencepieceTokenizer
from kogpt2.mxnet_kogpt2 import get_mxnet_kogpt2_model
from kogpt2.utils import get_tokenizer
from mxnet import gluon, nd
from mxnet.gluon import nn
# -
U_TKN = '<usr>'
S_TKN = '<sys>'
BOS = '<s>'
EOS = '</s>'
MASK = '<unused0>'
SENT = '<unused1>'
# +
class KoGPT2Chat(nn.HybridBlock):
def __init__(self, kogpt2, prefix=None, params=None):
super(KoGPT2Chat, self).__init__(prefix=prefix, params=params)
self.kogpt2 = kogpt2
def hybrid_forward(self, F, inputs):
# (batch, seq_len, hiddens)
output, _ = self.kogpt2(inputs)
return output
if mx.context.num_gpus() > 0:
ctx = mx.gpu()
else:
ctx = mx.cpu()
# -
def chat(model_params, sent='0'):
tok_path = get_tokenizer()
model, vocab = get_mxnet_kogpt2_model(ctx=ctx)
tok = SentencepieceTokenizer(tok_path, num_best=0, alpha=0)
kogptqa = KoGPT2Chat(model)
kogptqa.load_parameters(model_params, ctx=ctx)
sent_tokens = tok(sent)
cnt=0
while 1:
cnt+=1
if cnt>50:
break
q = input('user > ').strip()
if q == 'quit':
break
q_tok = tok(q)
a = ''
a_tok = []
while 1:
input_ids = mx.nd.array([vocab[U_TKN]] + vocab[q_tok] +
vocab[EOS, SENT] + vocab[sent_tokens] +
vocab[EOS, S_TKN] +
vocab[a_tok]).expand_dims(axis=0)
pred = kogptqa(input_ids.as_in_context(ctx))
gen = vocab.to_tokens(
mx.nd.argmax(
pred,
axis=-1).squeeze().astype('int').asnumpy().tolist())[-1]
if gen == EOS:
break
a += gen.replace('▁', ' ')
a_tok = tok(a)
print("Simsimi > {}".format(a.strip()))
chat("kogpt2_chat.params")
|
.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
Market_exp_df = pd.read_pickle('database\MarketExposure26Oct20.pickle')
Momentum_exp_df = pd.read_pickle('database\MomentumExposure26Oct20.pickle')
Quality_exp_df = pd.read_pickle('database\QualityExposure26Oct20.pickle')
Value_exp_df = pd.read_pickle('database\ValueExposure26Oct20.pickle')
Liquidity_exp_df = pd.read_pickle('database\LiquidityExposure26Oct20.pickle')
Size_exp_df = pd.read_pickle('database\SizeExposure26Oct20.pickle')
nifty_200 = pd.read_csv('database\ind_nifty200list.csv')
# +
stock_count = 25
Market_position_df = pd.DataFrame(columns=nifty_200.Symbol.values, index=Market_exp_df.index)
Momentum_position_df = pd.DataFrame(columns=nifty_200.Symbol.values, index=Momentum_exp_df.index)
Quality_position_df = pd.DataFrame(columns=nifty_200.Symbol.values, index=Quality_exp_df.index)
Value_position_df = pd.DataFrame(columns=nifty_200.Symbol.values, index=Value_exp_df.index)
Liquidity_position_df = pd.DataFrame(columns=nifty_200.Symbol.values, index=Liquidity_exp_df.index)
Size_position_df = pd.DataFrame(columns=nifty_200.Symbol.values, index=Size_exp_df.index)
for current_date in Market_exp_df.index:
# print('Processing Date '+ str(current_date))
Market_portfolio_stocks = Market_exp_df.loc[current_date,:].dropna().sort_values(ascending=False).head(stock_count).index
Momentum_portfolio_stocks = Momentum_exp_df.loc[current_date,:].dropna().sort_values(ascending=False).head(stock_count).index
Quality_portfolio_stocks = Quality_exp_df.loc[current_date,:].dropna().sort_values(ascending=False).head(stock_count).index
Value_portfolio_stocks = Value_exp_df.loc[current_date,:].dropna().sort_values(ascending=False).head(stock_count).index
Liquidity_portfolio_stocks = Liquidity_exp_df.loc[current_date,:].dropna().sort_values(ascending=False).head(stock_count).index
Size_portfolio_stocks = Size_exp_df.loc[current_date,:].dropna().sort_values(ascending=False).head(stock_count).index
for stock in nifty_200.Symbol:
if stock in Market_portfolio_stocks:
try:
Market_position_df.loc[current_date, stock] = 1/25
except:
Market_position_df.loc[current_date, stock] = 0
else:
Market_position_df.loc[current_date, stock] = 0
if stock in Momentum_portfolio_stocks:
try:
Momentum_position_df.loc[current_date, stock] = 1/25
except:
Momentum_position_df.loc[current_date, stock] = 0
else:
Momentum_position_df.loc[current_date, stock] = 0
if stock in Quality_portfolio_stocks:
try:
Quality_position_df.loc[current_date, stock] = 1/25
except:
Quality_position_df.loc[current_date, stock] = 0
else:
Quality_position_df.loc[current_date, stock] = 0
if stock in Value_portfolio_stocks:
try:
Value_position_df.loc[current_date, stock] = 1/25
except:
Value_position_df.loc[current_date, stock] = 0
else:
Value_position_df.loc[current_date, stock] = 0
if stock in Liquidity_portfolio_stocks:
try:
Liquidity_position_df.loc[current_date, stock] = 1/25
except:
Liquidity_position_df.loc[current_date, stock] = 0
else:
Liquidity_exp_df.loc[current_date, stock] = 0
if stock in Size_portfolio_stocks:
try:
Size_position_df.loc[current_date, stock] = 1/25
except:
Size_position_df.loc[current_date, stock] = 0
else:
Size_position_df.loc[current_date, stock] = 0
# -
Market_position_df.to_pickle('database\MarketPortfolioWeights26Oct20.pickle')
Momentum_position_df.to_pickle('database\MomentumPortfolioWeights26Oct20.pickle')
Quality_position_df.to_pickle('database\QualityPortfolioWeights26Oct20.pickle')
Value_position_df.to_pickle('database\ValuePortfolioWeights26Oct20.pickle')
Liquidity_position_df.to_pickle('database\LiquidityPortfolioWeights26Oct20.pickle')
Size_position_df.to_pickle('database\SizePortfolioWeights26Oct20.pickle')
|
CreateFactorPortfolios.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # _Common statistical tests are linear models_: Python port
#
# Original post by <NAME> ([blog](https://lindeloev.net), [profile](http://personprofil.aau.dk/117060)). Python port by George Ho ([blog](https://eigenfoo.xyz)).
#
# > This is a Python port of <NAME> post [_Common statistical tests are linear models (or: how to teach stats_)](https://lindeloev.github.io/tests-as-linear/), which originally had accompanying code in R.
# >
# > View this notebook [on GitHub](https://github.com/eigenfoo/tests-as-linear). Launch this notebook [on Binder](https://gke.mybinder.org/v2/gh/eigenfoo/tests-as-linear/master?filepath=tests-as-linear.ipynb).
from datetime import datetime
from IPython.display import display, Markdown
from tests_as_linear.utils import generate_toc
display(Markdown("Last updated: {}".format(datetime.now().strftime("%B %d, %Y"))))
display(Markdown(generate_toc()))
# This document is summarised in the table below. It shows the linear models underlying common parametric and "non-parametric" tests. Formulating all the tests in the same language highlights the many similarities between them. Get it [as an image](cheatsheets/linear_tests_cheat_sheet.png) or [as a PDF](cheatsheets/linear_tests_cheat_sheet.pdf).
#
# ---
#
# [](cheatsheets/linear_tests_cheat_sheet.pdf)
#
# ---
# # 1 The simplicity underlying common tests
#
# Most of the common statistical models (t-test, correlation, ANOVA, chi-square, etc.) are special cases of linear models, or a very close approximation. This beautiful simplicity means that there is less to learn. In particular, it all comes down to $y = a \cdot x + b$ which most students know from high school. Unfortunately, introductory statistics courses are usually taught as if each test is an independent tool, needlessly making life more complicated for students and teachers alike.
#
# This needless complexity multiplies when students try to rote learn the parametric assumptions underlying each test separately rather than deducing them from the linear model.
#
# For this reason, I think that teaching linear models first and foremost and *then* name-dropping the special cases along the way makes for an excellent teaching strategy, emphasizing *understanding* over rote learning. Since linear models are the same across frequentist, Bayesian, and permutation-based inferences, I'd argue that it's better to start with modeling than p-values, type-1 errors, Bayes factors, or other inferences.
#
# Concerning the teaching of *"non-parametric"* tests in intro-courses, I think that we can justify [lying-to-children](https://en.wikipedia.org/wiki/Lie-to-children) and teach "non-parametric"" tests as if they are merely ranked versions of the corresponding parametric tests. It is much better for students to think "ranks!" than to believe that you can magically throw away assumptions. Indeed, the Bayesian equivalents of "non-parametric"" tests implemented in [JASP](https://jasp-stats.org) [literally just do (latent) ranking](https://arxiv.org/abs/1712.06941) and that's it. For the frequentist "non-parametric" tests considered here, this approach is highly accurate for N > 15.
#
# Use the table of contents (above) to jump to your favourite section. There are links to lots of similar (though more scattered) stuff under [sources](#links) and [teaching materials](#course). I hope that you will join in suggesting improvements or submitting improvements yourself in [the Github repo for this page](https://github.com/eigenfoo/tests-as-linear) or [the repo for the original post (in R)](https://github.com/lindeloev/tests-as-linear). Let's make it awesome!
# # 2 Python environment
# !cat requirements.txt
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import patsy
import scipy
import statsmodels.api as sm
import statsmodels.formula.api as smf
# See GitHub repo for supporting Python code.
from tests_as_linear import plots, utils
np.random.seed(1618)
# -
# # 3 Pearson and Spearman correlation
#
# ### 3.0.1 Theory: As linear models
#
# **Model:** the recipe for $y$ is a slope ($\beta_1$) times $x$ plus an intercept ($\beta_0$, aka a straight line).
#
# $y = \beta_0 + \beta_1 x \qquad \mathcal{H}_0: \beta_1 = 0$
#
# ... which is a math-y way of writing the good old $y = ax + b$ (here ordered as $y = b + ax$). Using `patsy` lets us be a bit lazier and write `y ~ 1 + x` which reads like `y = 1 * number + x * othernumber`, and the task of linear models is simply to find the numbers that best predict `y`.
#
# Either way you write it, it's an intercept ($\beta_0$) and a slope ($\beta_1$) yielding a straight line:
plots.linear_regression_plot()
plt.show()
# This is often simply called a *regression* model which can be extended to *multiple regression* where there are several $\beta$s and on the right-hand side multiplied with the predictors. Everything below, from [one-sample t-test](#4.1-One-sample-t-test-and-Wilcoxon-signed-rank) to [two-way ANOVA](#6.2-Two-way-ANOVA) are just special cases of this system. Nothing more, nothing less.
#
# As the name implies, the *Spearman rank correlation* is a *Pearson correlation* on rank-transformed $x$ and $y$:
#
# $\text{rank}(y) = \beta_0 + \beta_1 \cdot \text{rank}(x) \qquad \mathcal{H}_0: \beta_1 = 0$
#
# I'll introduce [ranks](#3.0.2-Theory:-rank-transformation) in a minute. For now, notice that the correlation coefficient of the linear model is identical to a "real" Pearson correlation, but p-values are an approximation which is is [appropriate for samples greater than N = 10 and almost perfect when N > 20](https://lindeloev.github.io/tests-as-linear/simulations/simulate_spearman.html).
#
# Such a nice and non-mysterious equivalence that many students are left unaware of! Visualizing them side by side including data labels, we see this rank-transformation in action:
plots.pearson_spearman_plot()
plt.show()
# ### 3.0.2 Theory: rank-transformation
#
# `scipy.stats.rankdata` simply takes an array of numbers and "replaces" them with the integers of their rank (1st smallest, 2nd smallest, 3rd smallest, etc.). `pd.DataFrame.rank` performs a similar function, but with support for `pandas.DataFrames`. So the result of the rank-transformation `scipy.stats.rankdata([3.6, 3.4, -5.0, 8.2])` is `[3, 2, 1, 4]`. See that in the figure above?
#
# A _signed_ rank is the same, just where we rank according to absolute size first and then add in the sign second. So the signed rank here would be `[2, 1, -3, 4]`. Or in code:
def signed_rank(df):
return np.sign(df) * df.abs().rank()
# I hope I don't offend anyone when I say that ranks are easy; yet it's all you need to do to convert most parametric tests into their "non-parametric" counterparts! One interesting implication is that *many "non-parametric tests" are about as parametric as their parametric counterparts with means, standard deviations, homogeneity of variance, etc. - just on rank-transformed data*. That's why I put "non-parametric" in quotation marks.
# ### 3.0.3 Python code: Pearson correlation
#
# It couldn't be much simpler to run these models with `statsmodels` ([`smf.ols`](https://www.statsmodels.org/stable/example_formulas.html#ols-regression-using-formulas)) or `scipy` ([`scipy.stats.pearson`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html)). They yield identical slopes, `p` and `t` values, but there's a catch: `smf.ols` gives you the *slope* and even though that is usually much more interpretable and informative than the _correlation coefficient_ $r$, you may still want $r$. Luckily, the slope becomes $r$ if `x` and `y` have a standard deviation of exactly 1. You can do this by scaling the data: `data /= data.std()`.
#
# Notice how `scipy.stats.pearsonr` and `smf.ols (scaled)` have the same slopes, $p$ and $t$ values. Also note that statistical functions from `scipy.stats` do not provide confidence intervals, while performing the linear regression with `smf.ols` does.
# +
correlated = pd.DataFrame()
correlated["x"] = np.linspace(0, 1)
correlated["y"] = 1.5 * correlated.x + 2 * np.random.randn(len(correlated.x))
scaled = correlated / correlated.std()
r, p = scipy.stats.pearsonr(correlated["x"], correlated["y"])
res1 = smf.ols(formula="y ~ 1 + x", data=correlated).fit()
res2 = smf.ols(formula="y ~ 1 + x", data=scaled).fit()
# -
utils.tabulate_results([r, p, None, None, None],
[res1, res2],
["scipy.stats.pearsonr", "smf.ols", "smf.ols (scaled)"])
# ### 3.0.4 Python code: Spearman correlation
#
# Note that we can interpret the slope which is the number of ranks $y$ change for each rank on $x$. I think that this is a pretty interesting number. However, the intercept is less interpretable since it lies at $\text{rank}(x) = 0$ which is impossible since x starts at 1.
# +
ranked = np.argsort(correlated, axis=0)
r, p = scipy.stats.spearmanr(ranked["x"], ranked["y"])
res = smf.ols(formula="y ~ 1 + x", data=ranked).fit()
# -
utils.tabulate_results([r, p, None, None, None],
res,
["scipy.stats.spearmanr", "smf.ols (ranked)"])
# # 4 One mean
#
# ## 4.1 One sample t-test and Wilcoxon signed-rank
#
# ### 4.1.1 Theory: As linear models
#
# **t-test** model: A single number predicts $y$.
#
# $y = \beta_0 \qquad \mathcal{H}_0: \beta_0 = 0$
#
# In other words, it's our good old $y = \beta_0 + \beta_1*x$ where the last term is gone since there is no $x$ (essentially $x=0$, see left figure below).
#
# The same is to a very close approximately true for **Wilcoxon signed-rank test**, just with the [signed ranks](#3.0.2-Theory:-rank-transformation) of $y$ instead of $y$ itself (see right panel below).
#
# $\text{signed_rank}(y) = \beta_0$
#
# [This approximation is good enough when the sample size is larger than 14 and almost perfect if the sample size is larger than 50](https://lindeloev.github.io/tests-as-linear/simulations/simulate_wilcoxon.html).
plots.ttest_wilcoxon_plot()
plt.show()
# ### 4.1.2 Example data
# +
data = pd.DataFrame()
data["x"] = np.random.normal(loc=0.0, scale=1.0, size=50) # Used in correlation where this is on x-axis
data["y"] = np.random.normal(loc=0.5, scale=1.0, size=50) # Almost zero mean
data["y2"] = np.random.normal(loc=0.8, scale=1.0, size=50) # Used in two means
data["y_sub_y2"] = data["y"] - data["y2"]
data.head()
# -
# ### 4.1.3 Python code: One-sample t-test
#
# Try running the Python code below and see that the linear model (`smf.ols`) produces the same $t$, $p$, and $r$ as `scipy.stats.ttest_1samp`. The confidence interval is not computed by `scipy` but would theoretically also be identical:
t, p = scipy.stats.ttest_1samp(data.y, 0)
res = smf.ols(formula="y ~ 1", data=data).fit() # Equivalent linear model: intercept-only
utils.tabulate_results([None, p, t, None, None],
res,
["scipy.stats.ttest_1samp", "smf.ols (y ~ 1)"],
coeff="Intercept")
# ### 4.1.4 Python code: Wilcoxon signed-rank test
# +
signed_rank_data = signed_rank(data)
_, p = scipy.stats.wilcoxon(data.y)
res = smf.ols("y ~ 1", data=signed_rank_data).fit()
# -
utils.tabulate_results([None, p, None, None, None],
res,
["scipy.stats.wilcoxon", "smf.ols (y ~ 1, signed rank)"],
coeff="Intercept")
# ## 4.2 Paired samples t-test and Wilcoxon matched pairs
#
# ### 4.2.1 Theory: As linear models
#
# **t-test** model: a single number (intercept) predicts the pairwise differences.
#
# $y_2-y_1 = \beta_0 \qquad \mathcal{H}_0: \beta_0 = 0$
#
# This means that there is just one $y = y_2 - y_1$ to predict and it becomes a [one-sample t-test](#4.1-One-sample-t-test-and-Wilcoxon-signed-rank) on the pairwise differences. The visualization is therefore also the same as for the one-sample t-test. At the risk of overcomplicating a simple substraction, you can think of these pairwise differences as slopes (see left panel of the figure), which we can represent as y-offsets (see right panel of the figure):
plots.pairs_wilcoxon_plot()
plt.show()
# Similarly, the **Wilcoxon matched pairs** only differ from **Wilcoxon signed-rank** in that it's testing the signed ranks of the pairwise $y_2-y_1$ differences.
#
# $\text{signed_rank}(y_2-y_1) = \beta_0 \qquad \mathcal{H}_0: \beta_0 = 0$
# ### 4.2.2 Python code: Paired sample t-test
t, p = scipy.stats.ttest_rel(data.y, data.y2)
res = smf.ols(formula="y_sub_y2 ~ 1", data=data).fit()
utils.tabulate_results([None, p, t, None, None],
res,
["scipy.stats.ttest_rel", "smf.ols (y_sub_y2 ~ 1)"],
coeff="Intercept")
# ### 4.2.3 Python code: Wilcoxon matched pairs
#
# Again, we do the signed-ranks trick. This is still an approximation, but a close one:
_, p = scipy.stats.wilcoxon(data.y, data.y2)
res = smf.ols(formula="y_sub_y2 ~ 1", data=signed_rank_data).fit()
utils.tabulate_results([None, p, None, None, None],
res,
["scipy.stats.wilcoxon", "smf.ols (y_sub_y2 ~ 1, signed rank)"],
coeff="Intercept")
# For large sample sizes (N >> 100), this approaches the **sign test** to a reasonable degree, but this approximation is too inaccurate to flesh out here.
# # 5 Two means
#
# ## 5.1 Independent t-test and Mann-Whitney U
#
# ### 5.1.1 Theory: As linear models
#
# **Independent t-test model:** two means predict $y$.
#
# $y_i = \beta_0 + \beta_1 x_i \qquad \mathcal{H}_0: \beta_1 = 0$
#
# where $x_i$ is an indicator (0 or 1) saying whether data point $i$ was sampled from one or the other group. [Indicator variables (also called "dummy coding")](https://en.wikipedia.org/wiki/Dummy_variable_(statistics)) underly a lot of linear models and we'll take an aside to see how it works in a minute.
#
# **Mann-Whitney U** (also known as **Wilcoxon rank-sum test** for two independent groups; no *signed* rank this time) is the same model to a very close approximation, just on the ranks of $x$ and $y$ instead of the actual values:
#
# $\text{rank}(y_i) = \beta_0 + \beta_1 x_i \qquad \mathcal{H}_0: \beta_1 = 0$
#
# To me, equivalences like this make "non-parametric" statistics much easier to understand. The approximation is appropriate [when the sample size is larger than 11 in each group and virtually perfect when N > 30 in each group](https://lindeloev.github.io/tests-as-linear/simulations/simulate_mannwhitney.html).
# ### 5.1.2 Theory: Dummy coding
#
# Dummy coding can be understood visually. The indicator is on the x-axis so data points from the first group are located at $x = 0$ and data points from the second group is located at $x = 1$. Then $\beta_0$ is the intercept (blue line) and $\beta_1$ is the slope between the two means (red line). Why? Because when $\Delta x = 1$ the slope equals the difference because:
#
# $\text{slope} = \Delta y / \Delta x = \Delta y / 1 = \Delta y = \text{difference}$
#
# Magic! Even categorical differences can be modelled using linear models! It's a true Swiss army knife.
plots.dummy_coding_plot()
plt.show()
# ### 5.1.3 Theory: Dummy coding (continued)
#
# If you feel like you get dummy coding now, just skip ahead to the next section. Here is a more elaborate explanation of dummy coding:
#
# If a data point was sampled from the first group, i.e., when $x_i = 0$, the model simply becomes $y = \beta_0 + \beta_1 \cdot 0 = \beta_0$. In other words, the model predicts that that data point is $beta_0$. It turns out that the $\beta$ which best predicts a set of data points is the *mean* of those data points, so $\beta_0$ is the mean of group 1.
#
# On the other hand, data points sampled from the second group would have $x_i = 1$ so the model becomes $y_i = \beta_0 + \beta_1\cdot 1 = \beta_0 + \beta_1$. In other words, we add $\beta_1$ to "shift" from the mean of the first group to the mean of the second group. Thus $\beta_1$ becomes the *mean difference* between the groups.
#
# As an example, say group 1 is 25 years old ($\beta_0 = 25$) and group 2 is 28 years old ($\beta_1 = 3$), then the model for a person in group 1 is $y = 25 + 3 \cdot 0 = 25$ and the model for a person in group 2 is $y = 25 + 3 \cdot 1 = 28$.
#
# Hooray, it works! For first-timers it takes a few moments to understand dummy coding, but you only need to know addition and multiplication to get there!
# ### 5.1.4 Python code: independent t-test
#
# As a reminder, when we write `y ~ 1 + x` with `patsy`, it is shorthand for $y = \beta_0 \cdot 1 + \beta_1 \cdot x$ and Python goes on computing the $\beta$s for you. Thus `y ~ 1 + x` is the `patsy` (and/or R) way of writing $y = a \cdot x + b$.
#
# Notice the identical `p` estimates.
groups = pd.DataFrame(data=np.concatenate([data.y, data.y2]),
columns=["y"])
groups["group"] = np.concatenate([np.zeros_like(data.y), np.ones_like(data.y2)])
groups = groups.sample(frac=1).reset_index(drop=True) # Shuffle
groups.head()
_, p = scipy.stats.ttest_ind(data.y, data.y2)
res = smf.ols("y ~ 1 + group", groups).fit()
utils.tabulate_results([None, p, None, None, None],
res,
["scipy.stats.ttest_ind", "smf.ols (y ~ 1 + group)"],
coeff="group")
# ### 5.1.5 Python code: Mann-Whitney U
# +
groups.y = signed_rank(groups.y) # Only rank `y`, not `group`
_, p = scipy.stats.mannwhitneyu(data.y, data.y2)
res = smf.ols("y ~ 1 + group", groups).fit()
# -
utils.tabulate_results([None, p, None, None, None],
res,
["scipy.stats.mannwhitneyu", "smf.ols (y ~ 1 + group, signed rank)"],
coeff="group")
# ## 5.2 Welch’s t-test
#
# This is identical to the (Student's) [independent t-test](#5.1.4-Python-code:-independent-t-test) above except that Student's assumes identical variances and **Welch's t-test** does not. So the linear model is the same and the trick is in the variances, which I won't go further into here.
# +
t, p = scipy.stats.ttest_ind(data.y, data.y2, equal_var=False)
# TODO: linear model with per-group variances
# See https://stats.stackexchange.com/q/142685 and https://github.com/eigenfoo/tests-as-linear/issues/12
# -
# # 6 Three or more means
#
# ANOVAs are linear models with (only) categorical predictors so they simply extend everything we did above, relying heavily on dummy coding. Do make sure to read [the section on dummy coding](#5.1.2-Theory:-Dummy-coding) if you haven't already.
# ## 6.1 One-way ANOVA and Kruskal-Wallis
#
# ### 6.1.1 Theory: As linear models
#
# Model: One mean for each group predicts $y$.
#
# $y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \beta_3 x_3 +... \qquad \mathcal{H}_0: y = \beta_0$
#
# where $x_i$ are indicators ($x=0$ or $x=1$) where at most one $x_i=1$ while all others are $x_i=0$.
#
# Notice how this is just "more of the same" of what we already did in other models above. When there are only two groups, this model is $y = \beta_0 + \beta_1*x$, i.e. the [independent t-test](#5.1-Independent-t-test-and-Mann-Whitney-U). If there is only one group, it is $y = \beta_0$, i.e. the [one-sample t-test](#4.1-One-sample-t-test-and-Wilcoxon-signed-rank). This is easy to see in the visualization below - just cover up a few groups and see that it matches the other visualizations above.
plots.one_way_anova_plot()
plt.show()
# A one-way ANOVA has a log-linear counterpart called [goodness-of-fit](#7.1-Goodness-of-fit) test which we'll return to. By the way, since we now regress on more than one $x$, the one-way ANOVA is a **multiple regression** model.
#
# The **Kruskal-Wallis** test is simply a **one-way ANOVA** on the rank-transformed $y$ (`value`):
#
# $\text{rank}(y) = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \beta_3 x_3 +...$
#
# This approximation is [good enough for 12 or more data points](https://lindeloev.github.io/tests-as-linear/simulations/simulate_kruskall.html). Again, if you do this for just one or two groups, we're already acquainted with those equations, i.e. the [Wilcoxon signed-rank test](#4.1-One-sample-t-test-and-Wilcoxon-signed-rank) or the [Mann-Whitney U test](#5.1-Independent-t-test-and-Mann-Whitney-U) respectively.
# ### 6.1.2 Example data
#
# We make a three-level factor with the levels `a`, `b`, and `c` so that the **one-way ANOVA** basically becomes a "three-sample t-test". Then we manually do the [dummy coding](#5.1.2-Theory:-Dummy-coding) of the groups.
# +
num_points = 20
a = np.random.normal(0.0, 1, num_points)
b = np.random.normal(3.0, 1, num_points)
c = np.random.normal(-1.5, 1, num_points)
df = pd.DataFrame()
df["y"] = np.concatenate([a, b, c])
df["group"] = list("".join([num_points * char for char in "abc"]))
df = df.join(pd.get_dummies(df.group, prefix="group", drop_first=True).astype(np.float64))
df.head()
# -
# With group a's intercept omni-present, see how exactly one other parameter is added to predict `value` for group b and c in a given row. Thus data points in group b never affect the estimates in group c.
# ### 6.1.3 Python code: one-way ANOVA
#
# OK, let's see the identity between `scipy`'s dedicated **ANOVA** function (`scipy.stats.f_oneway`) and the dummy-coded in-your-face linear model with `smf.ols`.
F, p = scipy.stats.f_oneway(a, b, c)
res = smf.ols("y ~ 1 + group_b + group_c", df).fit()
# +
table = pd.DataFrame(index=["F statistic", "p value", "df"])
table["scipy.stats.f_oneway"] = [F, p, None]
table["ols (y ~ 1 + group_b + group_c)"] = [res.fvalue, res.f_pvalue, res.df_model]
table.T
# -
# Note that [`sm.stats.anova_lm`](https://www.statsmodels.org/stable/generated/statsmodels.stats.anova.anova_lm.html) gives you a pretty ANOVA table with relevant statistics, if you give it the fitted `res` object. By default it computes type-I sum of squares, which is widely discouraged. There is a BIG polarized debate about whether to use type-II (by passing `typ=2`) or type-III sum of squares (`typ=3`), but let's skip that for now.
# ### 6.1.4 Python code: Kruskal-Wallis
signed_rank_df = df.copy()
signed_rank_df["y"] = signed_rank(signed_rank_df["y"])
_, p = scipy.stats.kruskal(a, b, c)
res = smf.ols("y ~ 1 + group_b + group_c", signed_rank_df).fit()
# +
table = pd.DataFrame(index=["p value", "df"])
table["scipy.stats.kruskal"] = [p, None]
table["ols (y ~ 1 + group_b + group_c, signed rank)"] = [res.f_pvalue, res.df_model]
table.T
# -
# ## 6.2 Two-way ANOVA
#
# ### 6.2.1 Theory: As linear models
#
# Model: one mean per group (main effects) plus these means multiplied across factors (interaction effects). The main effects are the [one-way ANOVAs](#6.1-One-way-ANOVA-and-Kruskal-Wallis) above, though in the context of a larger model. The interaction effect is harder to explain in the abstract even though it's just a few numbers multiplied with each other. I will leave that to the teachers to keep focus on equivalences here :-)
#
# Switching to matrix notation:
#
# $y = \beta_0 + \beta_1 X_1 + \beta_2 X_2 + \beta_3 X_1 X_2 \qquad \mathcal{H}_0: \beta_3 = 0$
#
# Here $\beta_i$ are vectors of betas of which only one is selected by the indicator vector $X_i$. The $\mathcal{H}_0$ shown here is the interaction effect. Note that the intercept $\beta_0$, to which all other $\beta$s are relative, is now the mean for the first level of all factors.
#
# Continuing with the dataset from the one-way ANOVA above, let's add a crossing factor `mood` so that we can test the `group:mood` interaction (a 3x2 ANOVA). We also do the [dummy coding](#5.1.2-Theory:-Dummy-coding) of this factor needed for the linear model.
# +
df["mood"] = (df.shape[0] // 2) * ["happy", "sad"]
df = df.join(pd.get_dummies(df.mood, prefix="mood").astype(np.float64))
df.head()
# -
# $\beta_0$ is now the happy guys from group a!
plots.two_way_anova_plot(df)
plt.show()
# ### 6.2.2 Python code: Two-way ANOVA
#
# <div class="alert alert-warning">
# <b>Note on Python port:</b>
# Unfortunately, <code>scipy.stats</code> does not have a dedicated function to perform two-way ANOVA, so we cannot demonstrate directly that it is fundamentally a linear model. Nevertheless, we will write the code to perform the linear regression.
# </div>
# scipy.stats does not support two-way ANOVA
res = smf.ols("y ~ 1 + group * mood", df).fit()
# ### 6.3 ANCOVA
#
# This is simply ANOVA with a continuous regressor added so that it now contains continuous and (dummy-coded) categorical predictors. For example, if we continue with the [one-way ANOVA](#6.1-One-way-ANOVA-and-Kruskal-Wallis) example, we can add `age` and it is now called a **one-way ANCOVA**:
#
# $y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + ... + \beta_3 \text{age}$
#
# ... where $x_i$ are our usual dummy-coded indicator variables. $\beta_0$ is now the mean for the first group at $\text{age}=0$. You can turn all ANOVAs into ANCOVAs this way, e.g. by adding $\beta_N \cdot \text{age}$ to our **two-way ANOVA** in the previous section. But let us go ahead with our one-way ANCOVA, starting by adding $\text{age}$ to our dataset:
# Update data with a continuous covariate
df["age"] = df["y"] + np.random.randn(df.shape[0]) # Correlated with y
# This is best visualized using colors for groups instead of x-position. The $\beta$s are still the average $y$-offset of the data points, only now we model each group using a slope instead of an intercept. In other words, the one-way ANOVA is sort of [one-sample t-tests](#4.1-One-sample-t-test-and-Wilcoxon-signed-rank) model for each group ($y = \beta_0$) while the **one-way ANCOVA** is sort of [Pearson correlation](#3-Pearson-and-Spearman-correlation) model for each group ($y_i = \beta_0 + \beta_i + \beta_1 \text{age}$):
plots.ancova_plot(df)
plt.show()
# <div class="alert alert-warning">
# <b>Note on Python port:</b>
# Unfortunately, <code>scipy.stats</code> does not have a dedicated function to perform ANCOVA, so again, we cannot demonstrate directly that it is fundamentally a linear model. Nevertheless, we will write the code to perform the linear regression.
# </div>
res = smf.ols("y ~ 1 + group + age", df).fit()
# # 7 Proportions: Chi-square is a log-linear model
#
# Recall that when you take the logarithm, you can easily make statements about *proportions*, i.e., that for every increase in $x$, $y$ increases a certain percentage. This turns out to be one of the simplest (and therefore best!) ways to make count data and contingency tables intelligible. See [this nice introduction](https://www.uni-tuebingen.de/fileadmin/Uni_Tuebingen/SFB/SFB_833/A_Bereich/A1/Christoph_Scheepers_-_Statistikworkshop.pdf) to Chi-Square tests as linear models.
#
#
# ## 7.1 Goodness of fit
#
# ### 7.1.1 Theory: As log-linear model
# Model: a single intercept predicts $\log(y)$.
#
# I'll refer you to take a look at [the section on contingency tables](#contingency) which is basically a "two-way goodness of fit".
#
#
# ### 7.1.2 Example data
#
# For this, we need some wide count data:
# +
df = pd.DataFrame(index=["happy", "sad", "meh"])
df["counts"] = [70, 60, 90]
df = df.join(pd.get_dummies(df.index, prefix='mood', drop_first=True).set_index(df.index))
df
# -
# ### 7.1.3 Python code: Goodness of fit
#
# Now let's see that the Goodness of fit is just a log-linear equivalent to a one-way ANOVA. We set `family=sm.families.Poisson()` which defaults to setting a logarithmic [link function](https://en.wikipedia.org/wiki/Generalized_linear_model#Link_function) (you can verify that `res.model.family.link` is a logarithm).
#
# Note that `smf.ols` does not support GLMs: we need to use `sm.GLM`. While `sm.GLM` does not have a `patsy`-formula interface, we can still use `patsy.dmatrices` to get the [`endog` and `exog` design matrices,](https://www.statsmodels.org/stable/endog_exog.html) and then feed that into `sm.GLM`.
#
# <div class="alert alert-warning">
# <b>Note on Python port:</b>
# Unfortunately, <code>statsmodels</code> <a href="https://stackoverflow.com/q/27328623">does not currently support performing a one-way ANOVA test on GLMs</a> (the <code>anova_lm</code> function only works for linear models), so while we can perform the GLM, there is no support for computing the F-statistic or its p-value. Nevertheless, we will write the code to perform the generalized linear regression.
# </div>
# +
chi2, p = scipy.stats.chisquare(df.counts)
# The `-1` is to remove the intercept term from the model
# See https://patsy.readthedocs.io/en/latest/formulas.html#intercept-handling
endog, exog = patsy.dmatrices("counts ~ 1 + mood_meh + mood_sad - 1", df)
res = sm.GLM(endog, exog, family=sm.families.Poisson()).fit()
# -
# ## 7.2 Contingency tables
#
# ### 7.2.1 Theory: As log-linear model
#
# The theory here will be a bit more convoluted, and I mainly write it up so that you can get the *feeling* that it really is just a log-linear [two-way ANOVA model](#6.2-Two-way-ANOVA). Let's get started...
#
# For a two-way contingency table, the model of the count variable $y$ is a modeled using the marginal proportions of a contingency table. Why this makes sense, is too involved to go into here, but [see the relevant slides by <NAME> here](https://www.uni-tuebingen.de/fileadmin/Uni_Tuebingen/SFB/SFB_833/A_Bereich/A1/Christoph_Scheepers_-_Statistikworkshop.pdf) for an excellent exposition. The model is composed of a lot of counts and the regression coefficients $A_i$ and $B_j$:
#
# $$n_{ij} = N \cdot A_i \frac{n_i}{N} \cdot B_j \frac{n_j}{N} \cdot \frac{n_{ij} N}{n_i n_j}$$
#
# What a mess!!! Here, $i$ is the row index, $j$ is the column index, $n_{\text{something}}$ is the sum of that row and/or column, $N = \sum_{i, j}{n_{ij}}$. Remember that $n$ is a count variable, so $N$ is just the total count.
#
# We can simplify the notation by defining the *proportions*: $\alpha_i = A_i \frac{n_i}{N}$, $\beta_i = B_j \frac{n_j}{N}$ and $\alpha\beta_{ij} = \frac{n_{ij} N}{n_i n_j}$. Let's write the model again:
#
# $$n_{ij} = N \cdot \alpha_i \cdot \beta_j \cdot \alpha\beta_{ij}$$
#
# Ah, much prettier. However, there is still lot's of multiplication which makes it hard to get an intuition about how the actual numbers interact. We can make it much more intelligible when we remember that $\log(A \cdot B) = \log(A) + \log(B)$. Doing logarithms on both sides, we get:
#
# $$\log(n_{ij}) = \log(N) + \log(\alpha_i) + \log(\beta_j) + \log(\alpha\beta_{ij})$$
#
# Snuggly! Now we can get a better grasp on how the regression coefficients (which are proportions) independently contribute to $y$. This is why logarithms are so nice for proportions. Note that this is just [the two-way ANOVA model](#anova2) with some logarithms added, so we are back to our good old linear models - only the interpretation of the regression coefficients have changed! And we cannot use `smf.ols` anymore in `statsmodels`.
# ### 7.2.2 Example data
#
# Here we need some long data and we need it in table format for `chisq.test`:
# +
df = pd.DataFrame(data=[[100, 70], [30, 32], [110, 120]],
columns=["male", "female"],
index=["happy", "sad", "meh"])
df.index.name = "mood"
df.columns.name = "sex"
df = df.reset_index().melt(id_vars=["mood"])
df = df.join(pd.get_dummies(df.mood, prefix="mood", drop_first=True))
df = df.join(pd.get_dummies(df.sex, prefix="sex", drop_first=True))
df
# -
# ### 7.2.3 Python code: Chi-square test
#
# Now let's show the equivalence between a chi-square model and a log-linear model. This is very similar to our [two-way ANOVA](#6.2-Two-way-ANOVA) above:
#
# <div class="alert alert-warning">
# <b>Note on Python port:</b>
# Unfortunately, <code>statsmodels</code> <a href="https://stackoverflow.com/q/27328623">does not currently support performing a two-way ANOVA test on GLMs</a> (the <code>anova_lm</code> function only works for linear models), so while we can perform the GLM, there is no support for computing the F-statistic or its p-value. Nevertheless, we'll go through the motions of performing the generalized linear regression.
# </div>
# +
chi2, p, dof, _ = scipy.stats.chi2_contingency(df.value)
endog, exog = patsy.dmatrices("value ~ 1 + mood_meh*sex_male + mood_sad*sex_male - 1", df)
res = sm.GLM(endog, exog, family=sm.families.Poisson()).fit()
# -
# # 8 Sources and further equivalences
#
# Here are links to other sources who have exposed bits and pieces of this puzzle, including many further equivalences not covered here:
#
# * [My original exposition of the idea](https://stats.stackexchange.com/questions/303269/common-statistical-tests-as-linear-models) at Cross Validated
# * [An earlier question by me](https://stats.stackexchange.com/questions/210529/are-parametric-tests-on-rank-transformed-data-equivalent-to-non-parametric-test?noredirect=1#comment399981_210529) about non-parametric tests and a helpful answer.
# * [This question and replies](https://stats.stackexchange.com/questions/59047/how-are-regression-the-t-test-and-the-anova-all-versions-of-the-general-linear) on t-tests and ANOVA at StackOverflow
# * [These slides by <NAME>](https://www.uni-tuebingen.de/fileadmin/Uni_Tuebingen/SFB/SFB_833/A_Bereich/A1/Christoph_Scheepers_-_Statistikworkshop.pdf) on Chi-Square as log-linear models.
# * [This notebook by <NAME>](https://rpubs.com/palday/glm-test) on Chi-square, binomial, multinomial, and poisson tests as log-linear and logistic models. These "equivalences" are less exact than what I presented above, and were therefore not included here. They are still great for a conceptual understanding of these tests, though!
# * [This article by <NAME>](https://rpsychologist.com/r-guide-longitudinal-lme-lmer) on RM-ANOVA and growth models using `lme4::lmer` mixed models.
# * [This post by <NAME>](https://seriousstats.wordpress.com/2012/02/14/friedman/) on the Friedman test. That post was actually the one that inititated my exploration of linear equivalences to "non-parametric"" tests which ultimately pushed me over the edge to write up the present article.
# # 9 Teaching materials and a course outline
#
# Most advanced stats books (and some intro-books) take the "everything is GLMM" approach as well. However, the "linear model" part often stays at the conceptual level, rather than being made explicit. I wanted to make linear models the *tool* in a concise way. Luckily, more beginner-friendly materials have emerged lately:
#
# * <NAME>'s open-source book "Statistical Thinking for the 21st century" (start at [chapter 5 on modeling](http://statsthinking21.org/fitting-models-to-data.html))
#
# * [<NAME>ouder's course notes](https://jeffrouder.blogspot.com/2019/03/teaching-undergrad-stats-without-p-f-or.html), introducing model comparison using just $R^2$ and BIC. It avoids all the jargon on p-values, F-values, etc. The full materials and slides [are available here](https://drive.google.com/drive/folders/1CiJK--bAuO0F-ug3B5I3FvmsCdpPGZ03).
#
# Here are my own thoughts on what I'd do. I've taught parts of this with great success already, but not the whole program since I'm not assigned to teach a full course yet.
#
# I would spend 50% of the time on linear modeling of data since this contains 70% of what students need to know (bullet 1 below). The rest of the course is fleshing out what happens when you have one group, two groups, etc.
#
# Note that whereas the understanding of sampling and hypothesis testing is usually the first focus of mainstream stats courses, it is saved for later here to build upon students' prior knowledge, rather than throwing a lot of conceptually novel material at them.
#
# 1. **Fundamentals of regression:**
# 1. Recall from high-school: $y = a \cdot x + b$, and getting a really good intuition about slopes and intercepts. Understanding that this can be written using all variable names, e.g., `money = profit * time + starting_money` or $y = \beta_1x + \beta_2*1$ or, suppressing the coefficients, as `y ~ x + 1`. If the audience is receptive, convey the idea of these models [as a solution to differential equations](https://magesblog.com/post/modelling-change), specifying how $y$ *changes* with $x$.
#
# 2. Extend to a few multiple regression as models. Make sure to include plenty of real-life examples and exercises at this point to make all of this really intuitive. Marvel at how briefly these models allow us to represent large datasets.
#
# 3. Introduce the idea of rank-transforming non-metric data and try it out.
#
# 4. Teach the three assumptions: independence of data points, normality of residuals, and homoscedasticity.
#
# 5. Confidence/credible intervals on the parameters. Stress that the Maximum-Likelihood estimate is extremely unlikely, so intervals are more important.
#
# 6. Briefly introduce $R^2$ for the simple regression models above. Mention in passing that this is called [the Pearson and Spearman correlation coefficients](#3-Pearson-and-Spearman-correlation).
#
# 2. **Special case #1: One or two means (t-tests, Wilcoxon, Mann-Whitney):**
#
# 1. **One mean:** When there is only one x-value, the regression model simplifies to $y = b$. If $y$ is non-metric, you can rank-transform it. Apply the assumptions (homoscedasticity doesn't apply since there is only one $x$). Mention in passing that these intercept-only models are called [one-sample t-test and Wilcoxon Signed Rank test respectively](#4.1-One-sample-t-test-and-Wilcoxon-signed-rank).
#
# 2. **Two means:** If we put two variables 1 apart on the x-axis, the difference between the means is the slope. Great! It is accessible to our Swiss army knife called linear modeling. Apply the assumption checks to see that homoscedasticity reduces to equal variance between groups. This is called an [independent t-test](#5.1-Independent-t-test-and-Mann-Whitney-U). Do a few worked examples and exercises, maybe adding Welch's test, and do the rank-transformed version, called Mann-Whitney U.
#
# 3. *Paired samples:* Violates the independence assumption. After computing pairwise differences, this is equivalent to 2.1 (one intercept), though it is called the [paired t-test and Wilcoxon's matched pairs](#4.2-Paired-samples-t-test-and-Wilcoxon-matched-pairs).
#
# 3. **Special case #2: Three or more means (ANOVAs)**
#
# 1. *[Dummy coding](#5.1.2-Theory:-Dummy-coding) of categories:* How one regression coefficient for each level of a factor models an intercept for each level when multiplied by a binary indicator. This is just extending what we did in 2.1. to make this data accessible to linear modeling.
#
# 2. *Means of one variable:* [One-way ANOVA](#6.1-One-way-ANOVA-and-Kruskal-Wallis).
#
# 3. *Means of two variables:* [Two-way ANOVA](#6.2-Two-way-ANOVA).
#
# 4. **Special case #3: Three or more proportions (Chi-Square)**
#
# 1. *Logarithmic transformation:* Making multiplicative models linear using logarithms, thus modeling proportions. See [this excellent introduction](https://www.uni-tuebingen.de/fileadmin/Uni_Tuebingen/SFB/SFB_833/A_Bereich/A1/Christoph_Scheepers_-_Statistikworkshop.pdf) to the equivalence of log-linear models and Chi-Square tests as models of proportions. Also needs to introduce (log-)odds ratios. When the multiplicative model is made summative using logarithms, we just add the dummy-coding trick from 3.1, and see that the models are identical to the ANOVA models in 3.2 and 3.3, only the interpretation of the coefficients have changed.
#
# 2. *Proportions of one variable:* [Goodness of fit](#7.1-Goodness-of-fit).
#
# 3. *Proportions of two variables:* [Contingency tables](#7.2-Contingency-tables).
#
# 5. **Hypothesis testing:**
#
# 1. *Hypothesis testing as model comparisons:* Hypothesis testing is the act of choosing between a full model and one where a parameter is fixed to a particular value (often zero, i.e., effectively excluded from the model) instead of being estimated. For example, when fixing one of the two means to zero in the [t-test](#5.1-Independent-t-test-and-Mann-Whitney-U), we study how well a single mean (a [one-sample t-test](#4.1-One-sample-t-test-and-Wilcoxon-signed-rank)) explains all the data from both groups. If it does a good job, we prefer this model over the two-mean model because it is simpler. So hypothesis testing is just comparing linear models to make more qualitative statements than the truly quantitative statements which were covered in bullets 1-4 above. As tests of single parameters, hypothesis testing is therefore less informative However, when testing multiple parameters at the same time (e.g., a factor in ANOVA), model comparison becomes invaluable.
#
# 2. *Likelihood ratios:* Likelihood ratios are the Swiss army knife which will do model comparison all the way from the one-sample t-test to GLMMs. BIC penalizes model complexity. Moreover, add priors and you've got Bayes Factors. One tool, and you're done. I've used LRTs in the ANOVAs above.
# # 10 Limitations
#
# I have made a few simplifications for clarity:
#
# 1. I have not covered assumptions in the examples. This will be another post! But all assumptions of all tests come down to the usual three: a) independence of data points, b) normally distributed residuals, and c) homoscedasticity.
#
# 2. I assume that all null hypotheses are the absence of an effect, but everything works the same for non-zero null hypotheses.
#
# 3. I have not discussed inference. I am only including p-values in the comparisons as a crude way to show the equivalences between the underlying models since people care about p-values. Parameter estimates will show the same equivalence. How to do *inference* is another matter. Personally, I'm a Bayesian, but going Bayesian here would render it less accessible to the wider audience. Also, doing [robust models](https://en.wikipedia.org/wiki/Robust_statistics) would be preferable, but fail to show the equivalences.
#
# 4. Several named tests are still missing from the list and may be added at a later time. This includes the Sign test (require large N to be reasonably approximated by a linear model), Friedman as RM-ANOVA on `rank(y)`, McNemar, and Binomial/Multinomial. See stuff on these in [the section on links to further equivalences](#8-Sources-and-further-equivalences). If you think that they should be included here, feel free to submit "solutions" to [the GitHub repo](https://github.com/eigenfoo/tests-as-linear/) of this doc!
# # 11 License
#
# <a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a>
#
# _Common statistical tests are linear models_: Python port by [<NAME> and <NAME>](https://eigenfoo.xyz/tests-as-linear/) is licensed under a [Creative Commons Attribution 4.0 International License](https://creativecommons.org/licenses/by/4.0/).
#
# Based on a work at https://lindeloev.github.io/tests-as-linear/.
#
# Permissions beyond the scope of this license may be available at https://github.com/eigenfoo/tests-as-linear.
|
tests-as-linear.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Welcome to gdsfactory tutorial
# ## Reponsive plots
#
# You can get responsive plots thanks to JupyterLab [widgets](https://matplotlib.org/stable/users/interactive.html#jupyter-notebooks-lab)
#
# you will need to add
#
# ```
# # %matplotlib widget
# ```
# +
# %matplotlib widget
import gdsfactory as gf
c = gf.components.text("Welcome to gdsfactory")
c
# + nbsphinx="hidden"
# This is a hidden cell which will be executed but will not show up in Sphinx docs.
# To hide a cell, in Jupyter go to Menu->View->Cell Toolbar->Edit Metadata
# and enter in {"nbsphinx": "hidden"}
# another option is to add
# NBVAL_SKIP
# -
# you can also Set figure size and font size
#
# ```python
# import matplotlib.pyplot as plt
# plt.rcParams['figure.figsize'] = [8, 4]
# plt.rcParams['font.size'] = 12
# ```
|
docs/notebooks/_template.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="./images/DLI_Header.png">
# ## Scaling Learning Rate with Batch Size
#
# It's important to recognize that when we scale our training to multiple GPUs, the batch size that we give each GPU is not the overall effective batch size. After each mini-batch is processed and a single GPU performs backpropagation, those weights are averaged among all the ranks. As a result, one update to the model takes into account every mini-batch, accross all the GPUs. The effective batch size is therefore the size of the mini-batch multiplied by the number of GPUs. As a result, when more and more GPUs are used, the effective batch size can get very large.
#
# As you saw in Lab 1, larger batch sizes can lead to drops in accuracy, especially in a network's ability to generalize. The effect of a larger batch size also brings us closer to full batch gradient descent, and takes some of the variance or noise out of the process. Additionally, with a larger batch size fewer update steps are taken per epoch, which can slow down the training.
#
# The common approach to adding variance back into the process, as well as accounting for fewer steps per epoch, is to increase the learning rate as you increase batch size. As the algorithm takes fewer steps per epoch, adjusting the learning rate compensates for this with a larger step each time.
#
# Theory suggests that if we scale batch size by K, we should scale learning rate by the square root of K to maintain the variance. It's also common in large scale training to adjust the learning rate linearly, by a factor of K.
#
# ### Experimentation
#
# For this exercise, take about 15-20 minutes experimenting with different learning rates and batch sizes. Try holding the batch size steady while increasing the learning rate, and then try increasing the batch size while holding the learning rate steady. Finally, as you continue to increase the batch size, increase the learning rate as well. Try batch sizes up to 128 or even 256, and a learning rate up to .08 or higher. We recommend that you pick a certain validation accuracy for the training to reach using `--target-accuracy`, or to simply run the training for 2-3 minutes each time.
#
# Take a look at the chart regularly to see how training runs compare. Again, feel free to remove datasets from the `training_data` folder if the chart gets too crowded.
#
# The object of the exercise is not necessarily to find the "optimal" combination, but more to get a feel for the trends as you adjust the parameters. Note that because of the randomness in the process, both in terms of the starting weights, as well as the shuffling of the data, you are not guaranteed to get the same results when you run the same training twice.
#
# When you feel you have a good understanding of how the training is affected, move on to the next section.
num_gpus = 4
# !mpirun -np $num_gpus python fashion_mnist.py --base-lr 0.001 --batch-size 32 --target-accuracy 0.82
# +
# The plotting script needs to be defined again, now that you're in a different notebook
# %matplotlib widget
import os
import numpy as np
import matplotlib.pyplot as plt
# By default we skip the first row, which contains the headers
# By skipping 2 rows, you can disregard the first data-point (0,0) to get a closer look
def plot_trainings(skiprows=1):
plt.close()
for filename in os.listdir("training_data"):
if filename == ".ipynb_checkpoints": continue
x, y = np.loadtxt("training_data/" + filename, delimiter=',', unpack=True, skiprows=skiprows)
plt.plot(x,y, label=filename.split('.csv')[0])
plt.xlabel('Time (s)')
plt.ylabel('Validation Accuracy')
plt.title('Training Comparison')
plt.legend()
plt.show()
# -
# You can try running plot_trainings(2) to skip the (0,0) datapoint
plot_trainings()
# ## Adding Learning Rate Warmup
#
# In the course of your experimentation, you might have found that a high enough learning rate caused the network to never converge. You may have seen validation accuracy stay around .10, meaning the model was only as good as guessing at random. If you didn't see this effect yet, run the following training for a couple of epochs. (_Note: the training can "get lucky" and converge. Try running more than once and you should see divergence._)
# !mpirun -np $num_gpus python fashion_mnist.py --base-lr .06 --batch-size 32
# It is often the case that a high learning rate at the beginning of a training can cause divergence. In this scenario, the weights are updated with a high enough magnitude that they overshoot, and never end up finding the slope toward a minimum.
#
# In order to remedy this, we will implement a technique known as learning rate warmup. With this approach, the learning rate will start at a fraction of the target value, and slowly scale up over a series of epochs. This allows the network to move slowly at first, taking "careful steps" as it finds a slope toward the minimum. As the learning rate increases to the target value, the benefits of the larger learning rate will take effect.
#
# ### Implementation
#
# Horovod has a convenient callback for the Keras API that implements the logic: `horovod.tensorflow.keras.callbacks.LearningRateWarmupCallback`. By default, over the first 5 epochs it will gradually increase, starting from the _learning rate / the number of GPUs_. Execute the next cell to see information on the callback.
import horovod.tensorflow.keras as hvd
# ?hvd.callbacks.LearningRateWarmupCallback
# To implement the callback follow the next steps.
#
# Step 1: Register a new `warmup-epochs` argument using the following code.
#
# ```python
# parser.add_argument('--warmup-epochs', type=float, default=5,
# help='number of warmup epochs')
# ```
#
# Step 2: Using `args.warmup_epochs` as the `warmup_epochs` argument, implement the callback in the Horovod callbacks array. To get a printout when the warmup is finished, set the `verbose` argument to `verbose`.
#
# Step 3 (optional): Update the CSV filename to include the number of warmup epochs.
#
# If you have trouble, you can find the solution in `solutions/add_lr_warmup.py`
#
# Once you are finished, try again and see if it looks better.
# !mpirun -np $num_gpus python fashion_mnist.py --base-lr .06 --batch-size 32
# ## Large Scale Optimizations
#
# The Fashion MNIST exercise that we've been running in this course is a fairly small problem for a handful of GPUs. This allows us to run trainings quickly, on the order of minutes, to see interesting results. As problems get bigger, in terms of dataset size and model complexity, larger systems with many more GPUs can be crucial to reduce training times.
#
# However, as scaling continues, and the batch size and learning rate increase, a problem can arise. When the learning rate is large, the update to the weights may be larger than the weights themselves, causing the training to diverge.
#
# To approximate a large scale training with a high learning rate, run the next cell. (_Note that the algorithm can "get lucky" and converge, but if you run multiple times, you should see divergence._)
# !mpirun -np $num_gpus python fashion_mnist.py --base-lr .16 --batch-size 256
# ### The NovoGrad Optimizer
#
# A series of optimizers have been created to address this problem, and allow for scaling to very large batch sizes and learning rates. In this exercise we'll be using the [NovoGrad optimizer](https://arxiv.org/abs/1905.11286). NovoGrad has the standard form of an update to the weights,
#
# \begin{equation*}
# \large
# \Delta \mathbf{w} = -\lambda\, \mathbf{m}
# \end{equation*}
#
# but the $\mathbf{m}$ term appropriately normalizes the gradients to avoid the [vanishing gradient (or exploding gradient) problem](https://en.wikipedia.org/wiki/Vanishing_gradient_problem), using a gradient-averaging scheme similar to how SGD uses momentum to do that normalization. NovoGrad ensures that the learning rate is scaled appropriately on each layer, which empirically is [important in the large batch regime](https://arxiv.org/abs/1708.03888). If you are interested in continuing this exploration after this course, the [LAMB optimizer](https://arxiv.org/abs/1904.00962) is another extremely promising recent method worth exploring, which is very similar to NovoGrad in that it combines both [Adam](https://arxiv.org/abs/1412.6980), a popular variant of SGD, and layer-wise learning rates.
#
# If you want to learn more about the theory behind NovoGrad, you may optionally expand the cell below. Otherwise, feel free to continue on to the exercise.
# + [markdown] jupyter={"source_hidden": true}
#
# #### Layer-wise learning rate control
#
# NovoGrad combines several insights about SGD. First, it recognizes that gradient updates should be decoupled from the absolute magnitude of the gradient -- the direction is more important than the size. The magnitude of an update should be of order the magnitude of the weight multiplied by the learning rate, and since the learning rate is sufficiently small, this means that we make relatively small updates as we search for the optimum. Unfortunately, traditional SGD does not enforce this; the update to the weights $\mathbf{w}$ is in the form:
#
# \begin{equation*}
# \large
# \Delta \mathbf{w} = -\lambda\, \mathbf{g}
# \end{equation*}
#
# where $\lambda$ is the learning rate and $\mathbf{g}$ is the gradient of the loss function. The size of the gradient is determined by the loss function, which is not required to be commensurate with the scale of the weight. Furthermore, backpropagation tends to exacerbate this issue (i.e. the [vanishing gradient (or exploding gradient) problem](https://en.wikipedia.org/wiki/Vanishing_gradient_problem) that plagued deep CNNs until algorithmic improvements like [residual connections](https://arxiv.org/abs/1512.03385) were developed). Most SGD algorithms developed over the past few years attempt to solve this problem in one way or another.
#
# An intuitive way to deal with this is simply to divide the gradients on each layer by the norm of the gradients of that layer:
#
# \begin{equation*}
# \large
# \Delta \mathbf{w} = -\lambda\, \frac{\mathbf{g}}{|\mathbf{g}|}
# \end{equation*}
#
# where the norm $|\mathbf{g}|$ is typically the root-mean-square operation. This can generally be described as [stochastic normalized gradient descent](https://arxiv.org/abs/1507.02030).
#
# Another way to think about this is that, in a sense, the update has the wrong units (see Section 3.2 in the [ADADELTA paper](https://arxiv.org/abs/1212.5701) for a more rigorous discussion than this one). That is, if we imagine that the weights have a dimension (say, meters) and we take the partial derivative with respect to time, then the gradient has units of meters per second (that is, a speed or velocity), and applying an update to a position by adding a velocity does not make sense. We need to update the distance by an amount in meters. Dividing by the gradient by a norm makes the update dimensionless, and we can recover the correct scale by scaling the update by the norm of the weights. That is, we could have an update of the form
#
# \begin{equation*}
# \large
# \Delta \mathbf{w} = -\lambda\, |\mathbf{w}| \frac{\mathbf{g}}{|\mathbf{g}|}
# \end{equation*}
#
# which has the desired scale and units, but still points in the direction of the gradient of the loss function.
#
# Both of these approaches largely prevent vanishing/exploding gradients from causing the optimization process to diverge, because the magnitude of the update is now uncoupled from the absolute scale of the gradient, which could be much larger or much smaller than the weights on that layer.
#
# The second approach was taken with the [LARS optimizer](https://arxiv.org/abs/1708.03888), which defines the update on a given layer as:
#
# \begin{equation*}
# \large
# \Delta \mathbf{w} = -\lambda^{\mathrm{global}}\, \lambda^{\mathrm{local}}\, \mathbf{g}
# \end{equation*}
#
# where the "global" learning rate is the normal learning rate policy you're familiar with (some small number like 0.01 that may decay over time), and the "local" per-layer learning rate is defined as
#
# \begin{equation*}
# \large
# \lambda^{\mathrm{local}} = \eta\, \frac{|\mathbf{w}|}{|\mathbf{g}|}
# \end{equation*}
#
# Here $\eta$ is a "trust coefficient" which should be less than 1 and decides how much we want to update the weights on each layer during an update. Observe that this scheme is essentially equivalent to previous formulation. LARS and related methods have been influential in making possible large-batch SGD.
#
# Note that LARS is very closedly related to [LARC (Layer-wise Adaptive Rate Control)](https://nvidia.github.io/OpenSeq2Seq/html/optimizers.html) and the two terms are sometimes used interchangeably. LARC is a slight variant on LARS that "clips" the local learning rate so that it is not higher than the global learning rate; that is, the update is in the form
#
# \begin{equation*}
# \large
# \Delta \mathbf{w} = -\lambda\, \mathbf{g}
# \end{equation*}
#
# with the learning rate set by
#
# \begin{equation*}
# \large
# \lambda = \mathrm{min}(\lambda^{\mathrm{global}}, \lambda^{\mathrm{local}})
# \end{equation*}
#
# As a side note, in this discussion we are neglecting [weight decay](https://papers.nips.cc/paper/563-a-simple-weight-decay-can-improve-generalization.pdf) for simplicity, but it is straightforward to add it to these optimizers.
#
# #### Gradient averaging and momentum
#
# A separate set of efforts uses the concept of gradient averaging: the gradient we apply should be an average of the gradient in this step and the gradient in the previous step. As an illustration, we have already discussed how momentum can be used to avoid being trapped in local minima and to more efficiently escape saddle points. SGD with momentum can in fact be seen as a form of gradient averaging -- the effective gradient is a linear combination of the gradient in this step and the gradient from the last step. Optimizers such as [RMSprop](https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) implement this idea; the update looks a lot like the LARS update, except that the norm of the gradient used in the denominator is a linear combination of the gradient from this step and the gradient from the last step.
#
# The most popular implementation of this concept is the [Adam](https://arxiv.org/abs/1412.6980) optimizer, which works as follows. Suppose we have an update in this form:
#
# \begin{equation*}
# \large
# \Delta \mathbf{w} = -\lambda\, \frac{\mathbf{m}}{\sqrt{v}}
# \end{equation*}
#
# where $\mathbf{m}$ is a gradient-like term and $v$ is a term that is like the norm of the square of the gradient (so that we recover the earlier form if $\mathbf{m} = \mathbf{g}$ and $v = |\mathbf{g}^2|$). We can implement gradient averaging in the following way:
#
# \begin{equation*}
# \large
# \mathbf{m} = \beta_{1}\, \mathbf{m}_{\mathrm{prev}} + (1 - \beta_{1})\, \mathbf{g}
# \end{equation*}
#
# where $\mathbf{m}_{\mathrm{prev}}$ was the "gradient" term on the previous step, and
#
# \begin{equation*}
# \large
# v = \beta_{2}\, v_{\mathrm{prev}} + (1 - \beta_{2})\, |\mathbf{g}^2|
# \end{equation*}
#
# where $v_{\mathrm{prev}}$ was the "gradient-squared" term on the previous step. This means that we are keeping a running average of the gradient, rather than simply applying the gradient from this step. Adam is thus quite robust as a training optimizer compared to simpler optimizers like traditional SGD; however, it also tends to [generalize worse](https://arxiv.org/abs/1712.07628) than SGD with momentum.
#
# #### Combining layer-wise rate control and gradient averaging
#
# NovoGrad combines both of these concepts. The form of the update is back to
#
# \begin{equation*}
# \large
# \Delta \mathbf{w} = -\lambda\, \mathbf{m}
# \end{equation*}
#
# but the $\mathbf{m}$ term appropriately normalizes the gradients:
#
# \begin{equation*}
# \large
# \mathbf{m} = \beta_{1}\, \mathbf{m}_{\mathrm{prev}} + (1 - \beta_{1})\, \frac{\mathbf{g}}{\sqrt{v}}
# \end{equation*}
#
# (and we calculate the update to $v$ first so that we can use it in the update to $\mathbf{m}$).
# -
# ### NovoGrad Implementation
#
# Your next step will be to replace the SGD optimizer in `fashion_mnist.py` with the NovoGrad optimizer.
#
# Step 1: Import the NovoGrad optimizer:
# ```python
# from tensorflow_addons.optimizers import NovoGrad
# ```
# This is helpfully provided for us by the [TensorFlow Addons](https://github.com/tensorflow/addons) repository.
#
# Step 2: Replace the SGD optimizer with NovoGrad:
# ```python
# opt = NovoGrad(lr=args.base_lr, grad_averaging=True)
# ```
# The `grad_averaging` parameter, which weights the momentum using a mix of the current and previous steps (like Adam), is empirically helpful for this problem.
#
# If you have any trouble, you can find the solution at `solutions/add_novograd.py`. Once you've implemented the optimizer, run the training again in the cell below.
# !mpirun -np $num_gpus python fashion_mnist.py --base-lr .16 --batch-size 256
# ### Results
#
# Hopefully the training converged for you more successfully than with standard stochastic gradient descent on the training dataset. Validation accuracy is probably poor in the early epochs, but after waiting for at least 10-20 epochs, you will likely see validation accuracy start to steadily improve and eventually converge to a desirable result. NovoGrad, as well as the other optimizers that are available are important tools to consider when you build your training pipeline.
# ## Conclusion and Next Steps
#
# Multiple GPU training is quickly becoming a critical tool for engineers and data scientists. Accelerated training has made previously impossible AI challenges, such as complex image classification, solveable. Similarly, we have made amazing strides in natural language processing with the [BERT](https://arxiv.org/abs/1810.04805) model, which is often trained on dozens to hundreds of GPUs. But multi GPU training has the power to transform deep learning work at all levels, not just for high-end production implementations. If your training takes hours or even days, experimentation can become tedious and even impractical. Using multiple GPUs to speed up training allows for more time to be spent on refining and improving your model. It enables you to be more flexible and responsive, both in the initial phases of development and as you update and improve your model over time.
#
# Moving forward, you may have access to a multi GPU system such as an appliance based on the [HGX architecture](https://www.nvidia.com/en-us/data-center/hgx/), or plan to use a cloud server, such as the one provided for this course. In either case, the [NGC Catalog](https://www.nvidia.com/en-us/gpu-cloud/#ngc-platforms) gives you free access to performance-optimized AI software that provide DL frameworks, pre-trained models, and industry-focused end-to-end AI workflows. We use NGC containers in this course. In addition to removing the challenge associated with installing these frameworks, NGC containers are highly optimized for both training and inference on NVIDIA GPUs. Here’s the guide to [getting started with NGC](https://ngc.nvidia.com/catalog/collections/nvidia:gettingstarted); you can find the most up-to-date documentation in the [NVIDIA docs](https://docs.nvidia.com/ngc/index.html), along with specific setup steps for [Azure](https://docs.nvidia.com/ngc/ngc-azure-setup-guide/index.html), [AWS](https://docs.nvidia.com/ngc/ngc-aws-setup-guide/index.html), and [GCP](https://docs.nvidia.com/ngc/index.html#ngc-with-google-cloud-platform-(gcp). NGC containers are also set up to work right out of the box on dense GPU platforms for AI and computing.
#
# Thank you for joining us in the course today. We hope you feel prepared and excited to apply multi GPU training to your next AI challenge.
# <img src="./images/DLI_Header.png">
|
Lab 3/02_Notebook_Exercising_Optimization_Strategies.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] chapterId="By3zOAX-ebM" id="chapter_name"
# # Chapter17 ハイパーパラメータとチューニング(1)
# + [markdown] id="table"
# - **17.1 ハイパーパラメータとチューニング**
# - **17.1.1 ハイパーパラメータとは**
# - **17.1.2 チューニングとは**
# <br><br>
# - **17.2 ロジスティック回帰のハイパーパラメータ**
# - **17.2.1 パラメータ C**
# - **17.2.2 パラメータ penalty**
# - **17.2.3 パラメータ multi_class**
# - **17.2.4 パラメータ random_state**
# <br><br>
# - **17.3 線形SVMのハイパーパラメータ**
# - **17.3.1 パラメータ C**
# - **17.3.2 パラメータ penalty**
# - **17.3.3 パラメータ multi_class**
# - **17.3.4 パラメータ random_state**
# <br><br>
# - **17.4 非線形SVMのハイパーパラメータ**
# - **17.4.1 パラメータ C**
# - **17.4.2 パラメータ kernel**
# - **17.4.3 パラメータ decision_function_shape**
# - **17.4.4 パラメータ random_state**
# <br><br>
# - **添削問題**
# + [markdown] id="section_name" sectionId="BJ6zdRXWeZf"
# ## ●17.1 ハイパーパラメータとチューニング
# + [markdown] courseId=5020 exerciseId="rykx9nUi8xf" id="quiz_session_name" important=true isDL=false timeoutSecs=5
# ### ○17.1.1 ハイパーパラメータとは
# -
# この項にサンプルはありません
# + [markdown] courseId=5020 exerciseId="r1el538i8gf" id="quiz_session_name" important=true isDL=false timeoutSecs=5
# ### ○17.1.2 チューニングとは
# -
# 架空のモデル Classifier を例にしたチューニング方法です
model = Classifier(param1=1.0, param2=True, param3="linear")
# **リスト 17.1:チューニングの例(このコードは例なので実行しないでください)**
# + [markdown] id="section_name" sectionId="ByCfuRQbe-G"
# ## ●17.2 ロジスティック回帰のハイパーパラメータ
# + [markdown] courseId=5020 exerciseId="r1-x93LoIgf" id="code_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.2.1 パラメータ C
# -
# #### □問題
# + id="index"
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_classification
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# %matplotlib inline
# データを生成します
X, y = make_classification(
n_samples=1250, n_features=4, n_informative=2, n_redundant=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
# Cの値の範囲を設定します(ここでは1e-5,1e-4,1e-3,0.01,0.1,1,10,100,1000,10000)
C_list = [10 ** i for i in range(-5, 5)]
# グラフ描画用の空リストを用意します
train_accuracy = []
test_accuracy = []
# 以下にコードを記述してください
for C in C_list:
# コードの編集はここまでです
# グラフを準備します
# semilogx() は x のスケールを 10 の x 乗のスケールに変更します
plt.semilogx(C_list, train_accuracy, label="accuracy of train_data")
plt.semilogx(C_list, test_accuracy, label="accuracy of test_data")
plt.title("accuracy by changing C")
plt.xlabel("C")
plt.ylabel("accuracy")
plt.legend()
plt.show()
# -
# **リスト 17.2:問題**
# #### □解答例
# + id="answer"
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_classification
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# %matplotlib inline
# データを生成します
X, y = make_classification(
n_samples=1250, n_features=4, n_informative=2, n_redundant=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
# Cの値の範囲を設定します(ここでは1e-5,1e-4,1e-3,0.01,0.1,1,10,100,1000,10000)
C_list = [10 ** i for i in range(-5, 5)]
# グラフ描画用の空リストを用意します
train_accuracy = []
test_accuracy = []
# 以下にコードを記述してください
for C in C_list:
model = LogisticRegression(C=C, random_state=42)
model.fit(train_X, train_y)
train_accuracy.append(model.score(train_X, train_y))
test_accuracy.append(model.score(test_X, test_y))
# コードの編集はここまでです
# グラフを準備します
# semilogx() は x のスケールを 10 の x 乗のスケールに変更します
plt.semilogx(C_list, train_accuracy, label="accuracy of train_data")
plt.semilogx(C_list, test_accuracy, label="accuracy of test_data")
plt.title("accuracy by changing C")
plt.xlabel("C")
plt.ylabel("accuracy")
plt.legend()
plt.show()
# -
# **リスト 17.3:解答例**
# + [markdown] courseId=5020 exerciseId="Syfl5hUiIgM" id="quiz_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.2.2 パラメータ penalty
# -
# この項にサンプルはありません
# + [markdown] courseId=5020 exerciseId="rJme52LiUlf" id="quiz_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.2.3 パラメータ multi_class
# -
# この項にサンプルはありません
# + [markdown] courseId=5020 exerciseId="ryVl92Lo8eM" id="quiz_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.2.4 パラメータ random_state
# -
# この項にサンプルはありません
# + [markdown] id="section_name" sectionId="Bk1XO07WeWz"
# ## ●17.3 線形SVMのハイパーパラメータ
# + [markdown] courseId=5020 exerciseId="SJSgcnIsIez" id="code_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.3.1 パラメータ C
# -
# #### □問題
# + id="index"
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.datasets import make_classification
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# データを生成します
X, y = make_classification(
n_samples=1250, n_features=4, n_informative=2, n_redundant=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
# Cの値の範囲を設定します(ここでは1e-5,1e-4,1e-3,0.01,0.1,1,10,100,1000,10000)
C_list = [10 ** i for i in range(-5, 5)]
# グラフ描画用の空リストを用意します
svm_train_accuracy = []
svm_test_accuracy = []
log_train_accuracy = []
log_test_accuracy = []
# 以下にコードを記述してください
for C in C_list:
# コードの編集はここまでです
# グラフを準備します
# semilogx() は x のスケールを 10 の x 乗のスケールに変更します
fig = plt.figure()
plt.subplots_adjust(wspace=0.4, hspace=0.4)
ax = fig.add_subplot(1, 1, 1)
ax.grid(True)
ax.set_title("SVM")
ax.set_xlabel("C")
ax.set_ylabel("accuracy")
ax.semilogx(C_list, svm_train_accuracy, label="accuracy of train_data")
ax.semilogx(C_list, svm_test_accuracy, label="accuracy of test_data")
ax.legend()
ax.plot()
plt.show()
fig2 =plt.figure()
ax2 = fig2.add_subplot(1, 1, 1)
ax2.grid(True)
ax2.set_title("LogisticRegression")
ax2.set_xlabel("C")
ax2.set_ylabel("accuracy")
ax2.semilogx(C_list, log_train_accuracy, label="accuracy of train_data")
ax2.semilogx(C_list, log_test_accuracy, label="accuracy of test_data")
ax2.legend()
ax2.plot()
plt.show()
# -
# **リスト 17.4:問題**
# #### □解答例
# + id="answer"
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.datasets import make_classification
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# データを生成します
X, y = make_classification(
n_samples=1250, n_features=4, n_informative=2, n_redundant=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
# Cの値の範囲を設定します(ここでは1e-5,1e-4,1e-3,0.01,0.1,1,10,100,1000,10000)
C_list = [10 ** i for i in range(-5, 5)]
# グラフ描画用の空リストを用意します
svm_train_accuracy = []
svm_test_accuracy = []
log_train_accuracy = []
log_test_accuracy = []
# 以下にコードを記述してください
for C in C_list:
model1 = LinearSVC(C=C, random_state=42)
model1.fit(train_X, train_y)
svm_train_accuracy.append(model1.score(train_X, train_y))
svm_test_accuracy.append(model1.score(test_X, test_y))
model2 = LogisticRegression(C=C, random_state=42)
model2.fit(train_X, train_y)
log_train_accuracy.append(model2.score(train_X, train_y))
log_test_accuracy.append(model2.score(test_X, test_y))
# コードの編集はここまでです
# グラフを準備します
# semilogx() は x のスケールを 10 の x 乗のスケールに変更します
fig = plt.figure()
plt.subplots_adjust(wspace=0.4, hspace=0.4)
ax = fig.add_subplot(1, 1, 1)
ax.grid(True)
ax.set_title("SVM")
ax.set_xlabel("C")
ax.set_ylabel("accuracy")
ax.semilogx(C_list, svm_train_accuracy, label="accuracy of train_data")
ax.semilogx(C_list, svm_test_accuracy, label="accuracy of test_data")
ax.legend()
ax.plot()
plt.show()
fig2 =plt.figure()
ax2 = fig2.add_subplot(1, 1, 1)
ax2.grid(True)
ax2.set_title("LogisticRegression")
ax2.set_xlabel("C")
ax2.set_ylabel("accuracy")
ax2.semilogx(C_list, log_train_accuracy, label="accuracy of train_data")
ax2.semilogx(C_list, log_test_accuracy, label="accuracy of test_data")
ax2.legend()
ax2.plot()
plt.show()
# -
# **リスト 17.5:解答例**
# + [markdown] courseId=5020 exerciseId="HyLgq2IoIlf" id="quiz_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.3.2 パラメータ penalty
# -
# この項にサンプルはありません
# + [markdown] courseId=5020 exerciseId="ByPg93LjLgf" id="quiz_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.3.3 パラメータ multi_class
# -
# この項にサンプルはありません
# + [markdown] courseId=5020 exerciseId="rkdeqn8o8gz" id="quiz_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.3.4 パラメータ random_state
# -
# この項にサンプルはありません
# + [markdown] id="section_name" sectionId="SyeQ_AQWl-f"
# ## ●17.4 非線形SVMのハイパーパラメータ
# + [markdown] courseId=5020 exerciseId="BJYlc3LsLxf" id="code_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.4.1 パラメータ C
# -
# #### □問題
# + id="index"
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.datasets import make_gaussian_quantiles
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# %matplotlib inline
# データを生成します
X, y = make_gaussian_quantiles(n_samples=1250, n_features=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
# Cの値の範囲を設定します(ここでは1e-5,1e-4,1e-3,0.01,0.1,1,10,100,1000,10000)
C_list = [10 ** i for i in range(-5, 5)]
# グラフ描画用の空リストを用意します
train_accuracy = []
test_accuracy = []
# 以下にコードを記述してください
for C in C_list:
# コードの編集はここまでです
# グラフを準備します
# semilogx() は x のスケールを 10 の x 乗のスケールに変更します
plt.semilogx(C_list, train_accuracy, label="accuracy of train_data")
plt.semilogx(C_list, test_accuracy, label="accuracy of test_data")
plt.title("accuracy with changing C")
plt.xlabel("C")
plt.ylabel("accuracy")
plt.legend()
plt.show()
# -
# **リスト 17.6:問題**
# #### □解答例
# + id="answer"
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.datasets import make_gaussian_quantiles
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# %matplotlib inline
# データを生成します
X, y = make_gaussian_quantiles(n_samples=1250, n_features=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
# Cの値の範囲を設定します(ここでは1e-5,1e-4,1e-3,0.01,0.1,1,10,100,1000,10000)
C_list = [10 ** i for i in range(-5, 5)]
# グラフ描画用の空リストを用意します
train_accuracy = []
test_accuracy = []
# 以下にコードを記述してください
for C in C_list:
model = SVC(C=C)
model.fit(train_X, train_y)
train_accuracy.append(model.score(train_X, train_y))
test_accuracy.append(model.score(test_X, test_y))
# コードの編集はここまでです
# グラフを準備します
# semilogx() は x のスケールを 10 の x 乗のスケールに変更します
plt.semilogx(C_list, train_accuracy, label="accuracy of train_data")
plt.semilogx(C_list, test_accuracy, label="accuracy of test_data")
plt.title("accuracy with changing C")
plt.xlabel("C")
plt.ylabel("accuracy")
plt.legend()
plt.show()
# -
# **リスト 17.7:解答例**
# + [markdown] courseId=5020 exerciseId="r1ql938j8lM" id="quiz_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.4.2 パラメータ kernel
# -
# この項にサンプルはありません
# + [markdown] courseId=5020 exerciseId="ByslqnUiUgM" id="quiz_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.4.3 パラメータ decision_function_shape
# -
# この項にサンプルはありません
# + [markdown] courseId=5020 exerciseId="rJnxqnIjLlG" id="code_session_name" important=false isDL=false timeoutSecs=5
# ### ○17.4.4 パラメータ random_state
# +
import numpy as np
from sklearn.svm import SVC
# 乱数生成器を構築します
random_state = np.random.RandomState()
# 乱数生成器を random_state に指定した非線形 SVM モデルを構築します
model = SVC(random_state=random_state)
# -
# **リスト 17.8 生成器を指定する例**
# #### □問題
# + id="index"
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_classification
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# %matplotlib inline
# データを生成します
X, y = make_classification(
n_samples=1250, n_features=4, n_informative=2, n_redundant=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
# 以下にコードを記述してください
# 乱数生成器を構築してください
# モデルを構築してください
# モデルに学習させてください
# テストデータに対する正解率を出力してください
# -
# **リスト 17.9:問題**
# #### □解答例
# + id="answer"
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_classification
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# %matplotlib inline
# データを生成します
X, y = make_classification(
n_samples=1250, n_features=4, n_informative=2, n_redundant=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
# 以下にコードを記述してください
# 乱数生成器の構築してください
random_state = np.random.RandomState()
# モデルの構築してください
model = SVC(random_state=random_state)
# モデルに学習させてください
model.fit(train_X, train_y)
# テストデータに対する正解率を出力してください
print(model.score(test_X, test_y))
# -
# **リスト 17.10:解答例**
# + [markdown] config=["leap"] id="chapter_exam"
# ## ●添削問題
# -
# #### □問題
# + id="index"
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
# データを生成します
X, y = make_classification(
n_samples=1250, n_features=4, n_informative=2, n_redundant=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
kernel_list = ['linear','rbf','poly','sigmoid']
# 以下にコードを記述してください
# モデルを構築してください
for i in kernel_list:
model =
# モデルに学習させてください
# テストデータに対する正解率を出力してください
print(i)
print()
print()
# -
# **リスト 17.11:問題**
# #### □解答例
# + id="answer"
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
# データを生成します
X, y = make_classification(
n_samples=1250, n_features=4, n_informative=2, n_redundant=2, random_state=42)
train_X, test_X, train_y, test_y = train_test_split(X, y, random_state=42)
kernel_list = ['linear','rbf','poly','sigmoid']
# 以下にコードを記述してください
# モデルを構築してください
for i in kernel_list:
model = SVC(kernel= i ,random_state=42)
# モデルに学習させてください
model.fit(train_X, train_y)
# テストデータに対する正解率を出力してください
print(i)
print(model.score(test_X, test_y))
print()
# -
# **リスト 17.12:解答例**
|
notebooks/ShinsouGakushu_sample/Chapter17_Sample.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="scB-0SJdbWoA" outputId="225f4012-cbfc-474f-c90a-a6f6cc995499"
# !pip install tensorflow==1.15.5
# !pip install keras==2.2.4
# + colab={"base_uri": "https://localhost:8080/", "height": 54} id="i2qioM9dbFip" outputId="75705994-27ef-4f96-dd7e-fd2248fbda34"
import keras
keras.__version__
# + [markdown] id="T0cYt_VVbFir"
# # A first look at a neural network
#
# This notebook contains the code samples found in Chapter 2, Section 1 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.
#
# ----
#
# We will now take a look at a first concrete example of a neural network, which makes use of the Python library Keras to learn to classify
# hand-written digits. Unless you already have experience with Keras or similar libraries, you will not understand everything about this
# first example right away. You probably haven't even installed Keras yet. Don't worry, that is perfectly fine. In the next chapter, we will
# review each element in our example and explain them in detail. So don't worry if some steps seem arbitrary or look like magic to you!
# We've got to start somewhere.
#
# The problem we are trying to solve here is to classify grayscale images of handwritten digits (28 pixels by 28 pixels), into their 10
# categories (0 to 9). The dataset we will use is the MNIST dataset, a classic dataset in the machine learning community, which has been
# around for almost as long as the field itself and has been very intensively studied. It's a set of 60,000 training images, plus 10,000 test
# images, assembled by the National Institute of Standards and Technology (the NIST in MNIST) in the 1980s. You can think of "solving" MNIST
# as the "Hello World" of deep learning -- it's what you do to verify that your algorithms are working as expected. As you become a machine
# learning practitioner, you will see MNIST come up over and over again, in scientific papers, blog posts, and so on.
# + [markdown] id="ExEO4q6ubFis"
# The MNIST dataset comes pre-loaded in Keras, in the form of a set of four Numpy arrays:
# + id="mBELJp-jbFit"
# KerasでのMNISTデータセットの読み込み
from keras.datasets import mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# + [markdown] id="bL1pPMXVbFit"
# `train_images` and `train_labels` form the "training set", the data that the model will learn from. The model will then be tested on the
# "test set", `test_images` and `test_labels`. Our images are encoded as Numpy arrays, and the labels are simply an array of digits, ranging
# from 0 to 9. There is a one-to-one correspondence between the images and the labels.
#
# Let's have a look at the training data:
# + colab={"base_uri": "https://localhost:8080/"} id="dU7vfVwCbFit" outputId="0a0457bb-1025-4dd4-936d-39e901aa258b"
# grayscale images of handwritten digits (28 pixels by 28 pixels), into their 10 categories (0 to 9).
print ('軸の数: ', train_images.ndim)
print ('形状: ', train_images.shape)
print ('データ型: ', train_images.dtype)
# + colab={"base_uri": "https://localhost:8080/"} id="SiwwrIFlbFiu" outputId="d8656b96-51c8-4427-8213-1becb705b0c0"
len(train_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="O0jdUQigbFiu" outputId="8dc846d4-8ab1-430a-e4d7-cfbd1e7c0e1d"
train_labels
# + [markdown] id="t1_faCPHbFiv"
# Let's have a look at the test data:
# + colab={"base_uri": "https://localhost:8080/"} id="Rce7szWvbFiv" outputId="7b13002a-157e-447e-8db6-99e718841f02"
test_images.shape
# + colab={"base_uri": "https://localhost:8080/"} id="O1t0s9wYbFiv" outputId="79635959-0688-4627-966d-709592fed547"
len(test_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="PF3cyiwNbFiw" outputId="d4d721f7-13c2-4b65-f3bf-d74b0a6323d5"
test_labels
# + [markdown] id="Lyer7wx9bFiw"
# Our workflow will be as follow: first we will present our neural network with the training data, `train_images` and `train_labels`. The
# network will then learn to associate images and labels. Finally, we will ask the network to produce predictions for `test_images`, and we
# will verify if these predictions match the labels from `test_labels`.
#
# Let's build our network -- again, remember that you aren't supposed to understand everything about this example just yet.
# + colab={"base_uri": "https://localhost:8080/"} id="wb__qdYnbFiw" outputId="601afce6-341f-46a2-a2cb-1a8b7dc29760"
# ニューラルネットワークのアーキテクチャ
from keras import models
from keras import layers
network = models.Sequential()
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
network.add(layers.Dense(10, activation='softmax'))
# + [markdown] id="zQjItklHbFix"
#
# The core building block of neural networks is the "layer", a data-processing module which you can conceive as a "filter" for data. Some
# data comes in, and comes out in a more useful form. Precisely, layers extract _representations_ out of the data fed into them -- hopefully
# representations that are more meaningful for the problem at hand. Most of deep learning really consists of chaining together simple layers
# which will implement a form of progressive "data distillation". A deep learning model is like a sieve for data processing, made of a
# succession of increasingly refined data filters -- the "layers".
#
# Here our network consists of a sequence of two `Dense` layers, which are densely-connected (also called "fully-connected") neural layers.
# The second (and last) layer is a 10-way "softmax" layer, which means it will return an array of 10 probability scores (summing to 1). Each
# score will be the probability that the current digit image belongs to one of our 10 digit classes.
#
# To make our network ready for training, we need to pick three more things, as part of "compilation" step:
#
# * A loss function: the is how the network will be able to measure how good a job it is doing on its training data, and thus how it will be
# able to steer itself in the right direction.
# * An optimizer: this is the mechanism through which the network will update itself based on the data it sees and its loss function.
# * Metrics to monitor during training and testing. Here we will only care about accuracy (the fraction of the images that were correctly
# classified).
#
# The exact purpose of the loss function and the optimizer will be made clear throughout the next two chapters.
# + colab={"base_uri": "https://localhost:8080/", "height": 159} id="qj0BILAQbFix" outputId="a7f8f8de-2e27-4ee0-80d2-b42a9b5951a7"
# コンパイルステップ:ニューラルネットワークを訓練する準備。次の3つの要素を選択する
# ・損失関数 (loss)
# ・オプティマイザ (optimizer)
# ・訓練とテストを監視するための指標 (metrics)
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
"""
categorical_crossentropyは、重みテンソルの学習に関するフィードバックとして使用される損失関数です。
訓練の際には、この関数の最小化を試みることも分かりました。
この損失関数の最小化がミニバッチ確率的勾配降下法を通じて実現されることも分かりました。
勾配降下法の適用を制御する実際のルールは、1つ目の引数として渡されているrmspropオプティマイザによって定義されます。
"""
# + [markdown] id="_EFqDJqobFiy"
#
# Before training, we will preprocess our data by reshaping it into the shape that the network expects, and scaling it so that all values are in
# the `[0, 1]` interval. Previously, our training images for instance were stored in an array of shape `(60000, 28, 28)` of type `uint8` with
# values in the `[0, 255]` interval. We transform it into a `float32` array of shape `(60000, 28 * 28)` with values between 0 and 1.
# + id="OuUm0pEWbFiy"
# 画像データの準備(データの前処理)
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
# + [markdown] id="hAG0rusAbFiy"
# We also need to categorically encode the labels, a step which we explain in chapter 3:
# + id="oujhOpBDbFiz"
# ラベルをカテゴリ値でエンコード
from keras.utils import to_categorical
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="Ewz7VcehbFiz" outputId="8aea3f1a-977c-4d48-8a13-b5eb42bae30f"
print ('軸の数: ', train_images.ndim)
print ('形状: ', train_images.shape)
print ('データ型: ', train_images.dtype)
# + colab={"base_uri": "https://localhost:8080/"} id="vHPxPGRVbFiz" outputId="061c74dd-30db-4e88-e983-25426a2c7790"
print ('軸の数: ', train_labels.ndim)
print ('形状: ', train_labels.shape)
print ('データ型: ', train_labels.dtype)
# + [markdown] id="pbyfkl3bbFi0"
# We are now ready to train our network, which in Keras is done via a call to the `fit` method of the network:
# we "fit" the model to its training data.
# + colab={"base_uri": "https://localhost:8080/", "height": 550} id="tu5vN-H8bFi0" outputId="1d0eeda5-b3d2-4e4c-f330-fd06ad0dd4c3"
# ネットワークの訓練を行う準備完了。
# fitメソッドを呼び出し、モデルを訓練データに適合(fit)させる
network.fit(train_images, train_labels, epochs=5, batch_size=128)
"""
このネットワークは、128サンプルのミニバッチを使って、訓練データの学習を5回繰り返します。
訓練データ全体にわたるイテレーションはそれぞれエポックと呼ばれます。
"""
# + [markdown] id="qdQTTH7nbFi0"
# Two quantities are being displayed during training: the "loss" of the network over the training data, and the accuracy of the network over
# the training data.
#
# We quickly reach an accuracy of 0.989 (i.e. 98.9%) on the training data. Now let's check that our model performs well on the test set too:
# + colab={"base_uri": "https://localhost:8080/"} id="1hrkQ7k5bFi1" outputId="4d64c1e3-bf5c-47b0-a9d0-a864b238e24b"
# テストデータに対する評価
test_loss, test_acc = network.evaluate(test_images, test_labels)
# + colab={"base_uri": "https://localhost:8080/"} id="mzq9uC0RbFi1" outputId="4ff4b6c2-08e2-4777-9208-71046b4c2154"
print('test_acc:', test_acc)
# + [markdown] id="Ld-q4hr-bFi1"
#
# Our test set accuracy turns out to be 97.8% -- that's quite a bit lower than the training set accuracy.
# This gap between training accuracy and test accuracy is an example of "overfitting",
# the fact that machine learning models tend to perform worse on new data than on their training data.
# Overfitting will be a central topic in chapter 3.
#
# This concludes our very first example -- you just saw how we could build and a train a neural network to classify handwritten digits, in
# less than 20 lines of Python code. In the next chapter, we will go in detail over every moving piece we just previewed, and clarify what is really
# going on behind the scenes. You will learn about "tensors", the data-storing objects going into the network, about tensor operations, which
# layers are made of, and about gradient descent, which allows our network to learn from its training examples.
# + id="R_ZsBS6QbFi2"
|
2_1_a_first_look_at_a_neural_network_colab.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# Author: Anonymous
# project: pulmonary disease classification
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
import matplotlib.pyplot as plt
import glob
from tqdm import tqdm
from skimage import io, transform
from keras.utils import to_categorical
import time
from sklearn.model_selection import train_test_split
seed = 333
np.random.seed(seed)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input/pulmonary-chest-xray-abnormalities"))
# Any results you write to the current directory are saved as output.
# -
kbase = os.listdir("../input/pulmonary-chest-xray-abnormalities/ChinaSet_AllFiles/ChinaSet_AllFiles/CXR_png")
kbase1 = os.listdir("../input/pulmonary-chest-xray-abnormalities/ChinaSet_AllFiles/ChinaSet_AllFiles/ClinicalReadings")
for filet in kbase1:
f = open("../input/pulmonary-chest-xray-abnormalities/ChinaSet_AllFiles/ChinaSet_AllFiles/ClinicalReadings/"+filet, 'r')
file_contents = f.read()
file_con_list = file_contents.split("\n")
file_con_list = file_con_list[-1]
print(file_con_list)
imle = cv2.imread("../input/pulmonary-chest-xray-abnormalities/ChinaSet_AllFiles/ChinaSet_AllFiles/CXR_png/CHNCXR_0507_1.png")
plt.imshow(imle)
plt.show()
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
#path to images
img_dir = "../input/pulmonary-chest-xray-abnormalities/ChinaSet_AllFiles/ChinaSet_AllFiles"
data_dir2 = "../input/pulmonary-chest-xray-abnormalities/ChinaSet_AllFiles/ChinaSet_AllFiles/ClinicalReadings"
#list all available images type
print(os.listdir(img_dir))
print(os.listdir(data_dir2))
# +
def load_data(img_dir,data_dir2):
X = []
y = []
labels = []
idx = 0
for i,folder_name in enumerate(os.listdir(img_dir)):
if folder_name in ( "CXR_png"):
for file_name in tqdm(os.listdir(f'{img_dir}/{folder_name}')):
if file_name.endswith('png'):
print(file_name)
f = open('../input/pulmonary-chest-xray-abnormalities/ChinaSet_AllFiles/ChinaSet_AllFiles/ClinicalReadings/'+file_name[:-4]+'.txt', 'r')
file_contents = f.read()
file_con_list = file_contents.split("\n")
file_con_list = file_con_list[-1]
print (file_con_list)
if file_con_list not in labels:
labels.append(file_con_list)
im = cv2.imread(f'{img_dir}/{folder_name}/{file_name}')
if im is not None:
im = cv2.resize(im, (100, 100))
X.append(im)
y.append(labels.index(file_con_list))
X = np.asarray(X)
y = np.asarray(y)
labels = np.asarray(labels)
return X,y,labels
def load_data1(img_dir2):
X = []
y = []
labels = []
idx = 2
for i,folder_name in enumerate(os.listdir(img_dir2)):
#here we get validation and train
for j,folder_name1 in enumerate(os.listdir(os.path.join(img_dir2,folder_name))):
if folder_name1 in ( "humans"):
if folder_name1 not in labels:
labels.append(folder_name)
for file_name in tqdm(os.listdir(f'{img_dir2}/{folder_name}/{folder_name1}')):
if file_name.endswith('png'):
im = io.imread(f'{img_dir2}/{folder_name}/{folder_name1}/{file_name}')
if im is not None:
im = transform.resize(im, (100, 100))
X.append(im)
y.append(idx)
X = np.asarray(X)
y = np.asarray(y)
labels = np.asarray(labels)
return X,y,labels
# -
X,y,labels = load_data(img_dir,data_dir2)
#X1,y1,labels1 = load_data1(img_dir2)
#X,y,labels = X2,y2,labels1+labels2
#fix y
y = y.reshape(-1,1)
'''for i,_ in enumerate(y):
if y[i] == 1:
y[i]=0
elif y[i] == 2:
pass
else:
y[i]=1
'''
print(y[0])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
train_img = X_train
train_labels = y_train
test_img = X_test
test_labels = y_test
train_img.shape, train_labels.shape, test_img.shape, test_labels.shape
#show random samples
rand_14 = np.random.randint(0, train_img.shape[0],14)
sample_img = train_img[rand_14]
sample_labels = train_labels[rand_14]
num_rows, num_cols = 2, 7
f, ax = plt.subplots(num_rows, num_cols, figsize=(12,5),gridspec_kw={'wspace':0.03, 'hspace':0.01})
for r in range(num_rows):
for c in range(num_cols):
image_index = r * 7 + c
ax[r,c].axis("off")
ax[r,c].imshow(sample_img[image_index])
ax[r,c].set_title('%s' % sample_labels[image_index])
plt.show()
plt.close()
print(labels)
#one-hot-encode the labels
num_classes = len(labels)
train_labels_cat = to_categorical(train_labels,num_classes)
test_labels_cat = to_categorical(test_labels,num_classes)
train_labels_cat.shape, test_labels_cat.shape
# re-shape the images data
train_data = train_img
test_data = test_img
train_data.shape, test_data.shape
# +
# shuffle the training dataset & set aside val_perc % of rows as validation data
for _ in range(5):
indexes = np.random.permutation(len(train_data))
# randomly sorted!
train_data = train_data[indexes]
train_labels_cat = train_labels_cat[indexes]
# now we will set-aside val_perc% of the train_data/labels as cross-validation sets
val_perc = 0.10
val_count = int(val_perc * len(train_data))
print(val_count)
# first pick validation set
val_data = train_data[:val_count,:]
val_labels_cat = train_labels_cat[:val_count,:]
# leave rest in training set
train_data2 = train_data[val_count:,:]
train_labels_cat2 = train_labels_cat[val_count:,:]
train_data2.shape, train_labels_cat2.shape, val_data.shape, val_labels_cat.shape, test_data.shape, test_labels_cat.shape
# -
# a utility function that plots the losses and accuracies for training & validation sets across our epochs
def show_plots(history):
""" Useful function to view plot of loss values & accuracies across the various epochs """
loss_vals = history['loss']
val_loss_vals = history['val_loss']
epochs = range(1, len(history['acc'])+1)
f, ax = plt.subplots(nrows=1,ncols=2,figsize=(16,4))
# plot losses on ax[0]
ax[0].plot(epochs, loss_vals, color='navy',marker='o', linestyle=' ', label='Training Loss')
ax[0].plot(epochs, val_loss_vals, color='firebrick', marker='*', label='Validation Loss')
ax[0].set_title('Training & Validation Loss')
ax[0].set_xlabel('Epochs')
ax[0].set_ylabel('Loss')
ax[0].legend(loc='best')
ax[0].grid(True)
# plot accuracies
acc_vals = history['acc']
val_acc_vals = history['val_acc']
ax[1].plot(epochs, acc_vals, color='navy', marker='o', ls=' ', label='Training Accuracy')
ax[1].plot(epochs, val_acc_vals, color='firebrick', marker='*', label='Validation Accuracy')
ax[1].set_title('Training & Validation Accuracy')
ax[1].set_xlabel('Epochs')
ax[1].set_ylabel('Accuracy')
ax[1].legend(loc='best')
ax[1].grid(True)
plt.show()
plt.close()
# delete locals from heap before exiting
del loss_vals, val_loss_vals, epochs, acc_vals, val_acc_vals
def print_time_taken(start_time, end_time):
secs_elapsed = end_time - start_time
SECS_PER_MIN = 60
SECS_PER_HR = 60 * SECS_PER_MIN
hrs_elapsed, secs_elapsed = divmod(secs_elapsed, SECS_PER_HR)
mins_elapsed, secs_elapsed = divmod(secs_elapsed, SECS_PER_MIN)
if hrs_elapsed > 0:
print('Time taken: %d hrs %d mins %d secs' % (hrs_elapsed, mins_elapsed, secs_elapsed))
elif mins_elapsed > 0:
print('Time taken: %d mins %d secs' % (mins_elapsed, secs_elapsed))
elif secs_elapsed > 1:
print('Time taken: %d secs' % (secs_elapsed))
else:
print('Time taken - less than 1 sec')
def get_commonname(idx):
sciname = labels[idx]
return sciname
# +
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
import numpy as np
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization,Activation,MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
from keras.datasets import mnist
from keras.models import load_model
from sklearn.model_selection import train_test_split
from keras.utils import np_utils
from PIL import Image
# -
#data augmentation
datagen = ImageDataGenerator(
rotation_range=30,
zoom_range = 0.25,
width_shift_range=0.1,
height_shift_range=0.1)
# datagen = ImageDataGenerator(
# rotation_range=8,
# shear_range=0.3,
# zoom_range = 0.08,
# width_shift_range=0.08,
# height_shift_range=0.08)
# +
#create multiple cnn model for ensembling
#model 1
model = Sequential()
model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape = (100, 100, 3)))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, kernel_size = 3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, kernel_size = 5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, kernel_size = 4, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(num_classes, activation='softmax'))
# use adam optimizer and categorical cross entropy cost
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
# +
# after each epoch decrease learning rate by 0.95
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x)
# train
epochs = 50
j=0
start_time = time.time()
history = model.fit_generator(datagen.flow(train_data2, train_labels_cat2, batch_size=64),epochs = epochs, steps_per_epoch = train_data2.shape[0]/64,validation_data = (val_data, val_labels_cat), callbacks=[annealer], verbose=1)
end_time = time.time()
print_time_taken(start_time, end_time)
print("CNN {0:d}: Epochs={1:d}, Train accuracy={2:.5f}, Validation accuracy={3:.5f}".format(j+1,epochs,history.history['acc'][epochs-1],history.history['val_acc'][epochs-1]))
# -
show_plots(history.history)
#
# + active=""
#
# -
test_loss, test_accuracy = model.evaluate(test_data, test_labels_cat, batch_size=64)
print('Test loss: %.4f accuracy: %.4f' % (test_loss, test_accuracy))
im_list = [100,13]
for i in im_list:
# i = 1000 #index from test data to be used, change this other value to see a different image
img = test_data[i]
plt.imshow(img)
plt.show()
pred = model.predict_classes(img.reshape(-1,100,100,3))
actual = test_labels[i]
print(f'actual: {get_commonname(actual)}')
print(f'predicted: {get_commonname(pred)}')
# +
from keras.models import model_from_json
from keras.models import load_model
# serialize model to JSON
# the keras model which is trained is defined as 'model' in this example
model_json = model.to_json()
with open("model_4.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model_4.h5")
|
using cnn detect pulmonary disease in xray_varia ptb.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="RK255E7YoEIt"
# # DeepLabCut Toolbox
# https://github.com/AlexEMG/DeepLabCut
#
# This notebook demonstrates the necessary steps to use DeepLabCut
#
# Note: You can change the .ymal file to customrize your own setting and features.
#
#
# + [markdown] colab_type="text" id="9Uoz9mdPoEIy"
# ## Create a new project
#
# It is always good idea to keep the projects seperate. This function creates a new project with subdirectories and a basic configuration file in the user defined directory otherwise the project is created in the current working directory.
#
# You can always add new videos to the project at any stage of the project.
#
# ### NOTE: USE PYTHON 3.6 AND REMEMBER TO CONDA INSTALL JUPYTER TO RUN INSIDE PARTICULAR ENVIRONMENT
# + colab={} colab_type="code" id="jqLZhp7EoEI0"
import deeplabcut
# -
# Change to your own working directory
import os
os.chdir('/home/donghan/DeepLabCut/data')
# Rotate all the behavior video in order to conveniently annotate
# +
import numpy as np
import cv2
import glob
os.chdir("/home/donghan/DeepLabCut/data")
#Working directory that stores video data
def rotate(image, angle, center=None, scale=1):
#scale = 1: original size
rows,cols,ch = image.shape
if center == None:
center = (cols / 2, rows / 2)
M = cv2.getRotationMatrix2D(center, angle, scale)
#Matrix: Rotate with center by angles
dst = cv2.warpAffine(image,M,(cols,rows))
#After rotation
return dst
def videorotate(filename, output_name, display_video = False):
# capture video
cap = cv2.VideoCapture(filename)
#read video frame by frame
#extract original video frame features
sz = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
fps = int(cap.get(cv2.CAP_PROP_FPS))
#Make a directory to store the rotated videos
path = "./rotated"
try:
os.mkdir(path)
except OSError:
pass
else:
print ("Successfully created the directory %s " % path)
#Automatically name the rotated videos
file = "./rotated/" + output_name
out = cv2.VideoWriter(file, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, sz)
#Integrate all frames to video
#Read videos and rotate by certain degrees
while(cap.isOpened()):
#flip for truning(fliping) frames of video
ret,img = cap.read()
try:
img2 = rotate(img, -4.5)
#Flipped Vertically
out.write(img2)
if display_video == True:
cv2.imshow('rotated video',img2)
k=cv2.waitKey(30) & 0xff
#once you inter Esc capturing will stop
if k==27:
break
except:
print (filename, 'successfully rotated!!!' )
break
cap.release()
out.release()
cv2.destroyAllWindows()
# #Generate all rotating videos
# filenames = glob.glob('*.mp4') #Return the file name with .mp4 extention
# for i in filenames:
# videorotate(i,os.path.splitext(i)[0] + " rotated.mp4")
# -
# ### Extract all mp4 files from rotated videos folder
#Extract mp4 files from video folder
from os import listdir
from os.path import isfile, join
cwd = os.chdir("./rotated")
#we are using rotated videos
cwd = os.getcwd()
mp4files = [f for f in listdir(cwd) if isfile(join(cwd, f)) and os.path.splitext(f)[1] == ".mp4"]
#Get all mp4 files
# ### Starting a new project
#
# Note that if there is an existing folder, depends on the situation, you can either delete it or find an alternative new name.
os.chdir("./rotated")
# + colab={} colab_type="code" id="c9DjG55FoEI7"
task='Reaching' # Enter the name of your experiment Task
experimenter='Donghan' # Enter the name of the experimenter
video="1035 SI_A, Aug 15, 13 17 7 rotated.mp4" # Enter the paths of your videos you want to grab frames from.
path_config_file=deeplabcut.create_new_project(task,experimenter,video, working_directory='/home/donghan/DeepLabCut/data/rotated',copy_videos=True)
#change the working directory to where you want the folders created.
# The function returns the path, where your project is.
# You could also enter this manually (e.g. if the project is already created and you want to pick up, where you stopped...)
#path_config_file = '/home/Mackenzie/Reaching/config.yaml' # Enter the path of the config file that was just created from the above step (check the folder)
# -
path_config_file = "/home/donghan/DeepLabCut/data/rotated/Reaching-Donghan-2019-06-21/config.yaml"
# + [markdown] colab_type="text" id="0yXW0bx1oEJA"
# ## Extract frames from videos
# A key point for a successful feature detector is to select diverse frames, which are typical for the behavior you study that should be labeled.
#
# This function selects N frames either uniformly sampled from a particular video (or folder) (algo=='uniform'). Note: this might not yield diverse frames, if the behavior is sparsely distributed (consider using kmeans), and/or select frames manually etc.
#
# Also make sure to get select data from different (behavioral) sessions and different animals if those vary substantially (to train an invariant feature detector).
#
# Individual images should not be too big (i.e. < 850 x 850 pixel). Although this can be taken care of later as well, it is advisable to crop the frames, to remove unnecessary parts of the frame as much as possible.
#
# Always check the output of cropping. If you are happy with the results proceed to labeling.
# + colab={} colab_type="code" id="t1ulumCuoEJC"
# %matplotlib inline
deeplabcut.extract_frames(path_config_file,algo='kmeans',crop=False, userfeedback=False) #there are other ways to grab frames, such as by clustering 'kmeans'; please see the paper.
#You can change the cropping to false, then delete the checkcropping part!
#userfeedback: ask if users would like to continue or stop
# + [markdown] colab_type="text" id="Gjn6ZDonoEJH"
# ## Label the extracted frames
# ### Remember to change the config.yaml, head and tail, size = 5
# Only videos in the config file can be used to extract the frames. Extracted labels for each video are stored in the project directory under the subdirectory **'labeled-data'**. Each subdirectory is named after the name of the video. The toolbox has a labeling toolbox which could be used for labeling.
# + colab={} colab_type="code" id="iyROSOiEoEJI"
# %matplotlib inline
# %gui wx
deeplabcut.label_frames(path_config_file)
# + [markdown] colab_type="text" id="vim95ZvkPSeN"
# **Check the labels**
#
# Checking if the labels were created and stored correctly is beneficial for training, since labeling is one of the most critical parts for creating the training dataset. The DeepLabCut toolbox provides a function `check\_labels' to do so. It is used as follows:
# + colab={} colab_type="code" id="NwvgPJouPP2O"
deeplabcut.check_labels(path_config_file) #this creates a subdirectory with the frames + your labels
# + [markdown] colab_type="text" id="of87fOjgPqzH"
# If the labels need adjusted, you can use the refinement GUI to move them around! Check that out below.
# + [markdown] colab_type="text" id="xNi9s1dboEJN"
# ## Create a training dataset
# This function generates the training data information for DeepCut (which requires a mat file) based on the pandas dataframes that hold label information. The user can set the fraction of the training set size (from all labeled image in the hd5 file) in the config.yaml file. While creating the dataset, the user can create multiple shuffles.
#
# After running this script the training dataset is created and saved in the project directory under the subdirectory **'training-datasets'**
#
# This function also creates new subdirectories under **dlc-models** and appends the project config.yaml file with the correct path to the training and testing pose configuration file. These files hold the parameters for training the network. Such an example file is provided with the toolbox and named as **pose_cfg.yaml**.
#
# Now it is the time to start training the network!
# + colab={} colab_type="code" id="eMeUwgxPoEJP"
deeplabcut.create_training_dataset(path_config_file)
# + [markdown] colab_type="text" id="c4FczXGDoEJU"
# ## Start training - If you want to use a CPU, continue.
# ### If you want to use your GPU, you need to exit here and either work from the Docker container, your own TensorFlow installation in an Anaconda env
#
# This function trains the network for a specific shuffle of the training dataset.
# + colab={} colab_type="code" id="_pOvDq_2oEJW"
deeplabcut.train_network(path_config_file, shuffle=1, saveiters=200, displayiters=10,autotune = True)
#Other parameters include trainingsetindex=0,gputouse=None,max_snapshots_to_keep=5,autotune=False,maxiters=None
#Detailed function explanation can be found here https://github.com/AlexEMG/DeepLabCut/blob/efa95129061b1ba1535f7361fe76e9267568a156/deeplabcut/pose_estimation_tensorflow/training.py
# + [markdown] colab_type="text" id="xZygsb2DoEJc"
# ## Start evaluating
# This funtion evaluates a trained model for a specific shuffle/shuffles at a particular state or all the states on the data set (images)
# and stores the results as .csv file in a subdirectory under **evaluation-results**
# + colab={} colab_type="code" id="nv4zlbrnoEJg"
deeplabcut.evaluate_network(path_config_file)
# + [markdown] colab_type="text" id="OVFLSKKfoEJk"
# ## Start Analyzing videos
# This function analyzes the new video. The user can choose the best model from the evaluation results and specify the correct snapshot index for the variable **snapshotindex** in the **config.yaml** file. Otherwise, by default the most recent snapshot is used to analyse the video.
#
# The results are stored in hd5 file in the same directory where the video resides.
# + colab={} colab_type="code" id="Y_LZiS_0oEJl"
videofile_path = ["1035 SI_A, Aug 15, 13 17 7 rotated.mp4"] #Enter the list of videos to analyze.
deeplabcut.analyze_videos(path_config_file,videofile_path)
# + [markdown] colab_type="text" id="iGu_PdTWoEJr"
# ## Extract outlier frames [optional step]
# This is an optional step and is used only when the evaluation results are poor i.e. the labels are incorrectly predicted. In such a case, the user can use the following function to extract frames where the labels are incorrectly predicted. Make sure to provide the correct value of the "iterations" as it will be used to create the unique directory where the extracted frames will be saved.
# + colab={} colab_type="code" id="gkbaBOJVoEJs"
deeplabcut.extract_outlier_frames(path_config_file,['1035 SI_A, Aug 15, 13 17 7 rotated.mp4'])
# + [markdown] colab_type="text" id="8ib0uvhaoEJx"
# ## Refine Labels [optional step]
# Following the extraction of outlier frames, the user can use the following function to move the predicted labels to the correct location. Thus augmenting the training dataset.
# + colab={} colab_type="code" id="n_FpEXtyoEJy"
# %gui wx
deeplabcut.refine_labels(path_config_file)
# + colab={} colab_type="code" id="CHzstWr8oEJ2"
#Once all folders are relabeled, check them and advance. See how to check labels, above!
deeplabcut.merge_datasets(path_config_file)
# + [markdown] colab_type="text" id="QCHj7qyboEJ6"
# ## Create a new iteration of training dataset [optional step]
# Following the refine labels, append these frames to the original dataset to create a new iteration of training dataset.
# + colab={} colab_type="code" id="ytQoxIldoEJ7"
deeplabcut.create_training_dataset(path_config_file)
# + [markdown] colab_type="text" id="pCrUvQIvoEKD"
# ## Create labeled video
# This funtion is for visualiztion purpose and can be used to create a video in .mp4 format with labels predicted by the network. This video is saved in the same directory where the original video resides.
# + colab={} colab_type="code" id="6aDF7Q7KoEKE"
# videofile_path = ["1035 SI_A, Aug 15, 13 17 7 rotated.mp4"] #Enter the list of videos to analyze.
deeplabcut.create_labeled_video(path_config_file,videofile_path, draw_skeleton = True)
# + [markdown] colab_type="text" id="8GTiuJESoEKH"
# ## Plot the trajectories of the analyzed videos
# This function plots the trajectories of all the body parts across the entire video. Each body part is identified by a unique color.
# + colab={} colab_type="code" id="gX21zZbXoEKJ"
# %matplotlib notebook #for making interactive plots.
deeplabcut.plot_trajectories(path_config_file,videofile_path)
|
DeepLabCut/Deeplabcut Tutorial/Deeplabcut_tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image Generation
# In this notebook we will continue our exploration of image gradients using the deep model that was pretrained on TinyImageNet. We will explore various ways of using these image gradients to generate images. We will implement class visualizations, feature inversion, and DeepDream.
# +
# As usual, a bit of setup
import time, os, json
import numpy as np
# from scipy.misc import imread, imresize
from imageio import imread
from skimage.transform import resize
import matplotlib.pyplot as plt
from cs231n.classifiers.pretrained_cnn import PretrainedCNN
from cs231n.data_utils import load_tiny_imagenet
from cs231n.image_utils import blur_image, deprocess_image, preprocess_image
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# -
# # TinyImageNet and pretrained model
# As in the previous notebook, load the TinyImageNet dataset and the pretrained model.
data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A', subtract_mean=True)
model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')
# # Class visualization
# By starting with a random noise image and performing gradient ascent on a target class, we can generate an image that the network will recognize as the target class. This idea was first presented in [1]; [2] extended this idea by suggesting several regularization techniques that can improve the quality of the generated image.
#
# Concretely, let $I$ be an image and let $y$ be a target class. Let $s_y(I)$ be the score that a convolutional network assigns to the image $I$ for class $y$; note that these are raw unnormalized scores, not class probabilities. We wish to generate an image $I^*$ that achieves a high score for the class $y$ by solving the problem
#
# $$
# I^* = \arg\max_I s_y(I) + R(I)
# $$
#
# where $R$ is a (possibly implicit) regularizer. We can solve this optimization problem using gradient descent, computing gradients with respect to the generated image. We will use (explicit) L2 regularization of the form
#
# $$
# R(I) + \lambda \|I\|_2^2
# $$
#
# and implicit regularization as suggested by [2] by peridically blurring the generated image. We can solve this problem using gradient ascent on the generated image.
#
# In the cell below, complete the implementation of the `create_class_visualization` function.
#
# [1] <NAME>, <NAME>, and <NAME>. "Deep Inside Convolutional Networks: Visualising
# Image Classification Models and Saliency Maps", ICLR Workshop 2014.
#
# [2] Yosinski et al, "Understanding Neural Networks Through Deep Visualization", ICML 2015 Deep Learning Workshop
def create_class_visualization(target_y, model, **kwargs):
"""
Perform optimization over the image to generate class visualizations.
Inputs:
- target_y: Integer in the range [0, 100) giving the target class
- model: A PretrainedCNN that will be used for generation
Keyword arguments:
- learning_rate: Floating point number giving the learning rate
- blur_every: An integer; how often to blur the image as a regularizer
- l2_reg: Floating point number giving L2 regularization strength on the image;
this is lambda in the equation above.
- max_jitter: How much random jitter to add to the image as regularization
- num_iterations: How many iterations to run for
- show_every: How often to show the image
"""
learning_rate = kwargs.pop('learning_rate', 10000)
blur_every = kwargs.pop('blur_every', 1)
l2_reg = kwargs.pop('l2_reg', 1e-6)
max_jitter = kwargs.pop('max_jitter', 4)
num_iterations = kwargs.pop('num_iterations', 100)
show_every = kwargs.pop('show_every', 25)
X = np.random.randn(1, 3, 64, 64)
for t in range(num_iterations):
# As a regularizer, add random jitter to the image
ox, oy = np.random.randint(-max_jitter, max_jitter+1, 2)
X = np.roll(np.roll(X, ox, -1), oy, -2)
dX = None
############################################################################
# TODO: Compute the image gradient dX of the image with respect to the #
# target_y class score. This should be similar to the fooling images. Also #
# add L2 regularization to dX and update the image X using the image #
# gradient and the learning rate. #
############################################################################
scores, cache = model.forward(X, mode='test')
dscores = np.zeros_like(scores)
dscores[range(dscores.shape[0]), target_y] = 1
dX, grads = model.backward(dscores, cache)
dX += 2 * l2_reg * X
X += learning_rate * dX
############################################################################
# END OF YOUR CODE #
############################################################################
# Undo the jitter
X = np.roll(np.roll(X, -ox, -1), -oy, -2)
# As a regularizer, clip the image
X = np.clip(X, -data['mean_image'], 255.0 - data['mean_image'])
# As a regularizer, periodically blur the image
if t % blur_every == 0:
X = blur_image(X)
# Periodically show the image
if t % show_every == 0:
plt.imshow(deprocess_image(X, data['mean_image']))
plt.gcf().set_size_inches(3, 3)
plt.axis('off')
plt.show()
return X
# You can use the code above to generate some cool images! An example is shown below. Try to generate a cool-looking image. If you want you can try to implement the other regularization schemes from Yosinski et al, but it isn't required.
target_y = 43 # Tarantula
print(data['class_names'][target_y])
X = create_class_visualization(target_y, model, show_every=25)
# # Feature Inversion
# In an attempt to understand the types of features that convolutional networks learn to recognize, a recent paper [1] attempts to reconstruct an image from its feature representation. We can easily implement this idea using image gradients from the pretrained network.
#
# Concretely, given a image $I$, let $\phi_\ell(I)$ be the activations at layer $\ell$ of the convolutional network $\phi$. We wish to find an image $I^*$ with a similar feature representation as $I$ at layer $\ell$ of the network $\phi$ by solving the optimization problem
#
# $$
# I^* = \arg\min_{I'} \|\phi_\ell(I) - \phi_\ell(I')\|_2^2 + R(I')
# $$
#
# where $\|\cdot\|_2^2$ is the squared Euclidean norm. As above, $R$ is a (possibly implicit) regularizer. We can solve this optimization problem using gradient descent, computing gradients with respect to the generated image. We will use (explicit) L2 regularization of the form
#
# $$
# R(I') + \lambda \|I'\|_2^2
# $$
#
# together with implicit regularization by periodically blurring the image, as recommended by [2].
#
# Implement this method in the function below.
#
# [1] <NAME>, <NAME>, "Understanding Deep Image Representations by Inverting them", CVPR 2015
#
# [2] Yosinski et al, "Understanding Neural Networks Through Deep Visualization", ICML 2015 Deep Learning Workshop
def invert_features(target_feats, layer, model, **kwargs):
"""
Perform feature inversion in the style of Mahendran and Vedaldi 2015, using
L2 regularization and periodic blurring.
Inputs:
- target_feats: Image features of the target image, of shape (1, C, H, W);
we will try to generate an image that matches these features
- layer: The index of the layer from which the features were extracted
- model: A PretrainedCNN that was used to extract features
Keyword arguments:
- learning_rate: The learning rate to use for gradient descent
- num_iterations: The number of iterations to use for gradient descent
- l2_reg: The strength of L2 regularization to use; this is lambda in the
equation above.
- blur_every: How often to blur the image as implicit regularization; set
to 0 to disable blurring.
- show_every: How often to show the generated image; set to 0 to disable
showing intermediate reuslts.
Returns:
- X: Generated image of shape (1, 3, 64, 64) that matches the target features.
"""
learning_rate = kwargs.pop('learning_rate', 10000)
num_iterations = kwargs.pop('num_iterations', 500)
l2_reg = kwargs.pop('l2_reg', 1e-7)
blur_every = kwargs.pop('blur_every', 1)
show_every = kwargs.pop('show_every', 50)
X = np.random.randn(1, 3, 64, 64)
for t in range(num_iterations):
############################################################################
# TODO: Compute the image gradient dX of the reconstruction loss with #
# respect to the image. You should include L2 regularization penalizing #
# large pixel values in the generated image using the l2_reg parameter; #
# then update the generated image using the learning_rate from above. #
############################################################################
feats, cache = model.forward(X, end=layer, mode='test')
dfeats = 2 * (feats - target_feats)
dX, grads = model.backward(dfeats, cache)
dX += 2 * l2_reg * X
X -= learning_rate * dX
############################################################################
# END OF YOUR CODE #
############################################################################
# As a regularizer, clip the image
X = np.clip(X, -data['mean_image'], 255.0 - data['mean_image'])
# As a regularizer, periodically blur the image
if (blur_every > 0) and t % blur_every == 0:
X = blur_image(X)
if (show_every > 0) and (t % show_every == 0 or t + 1 == num_iterations):
plt.imshow(deprocess_image(X, data['mean_image']))
plt.gcf().set_size_inches(3, 3)
plt.axis('off')
plt.title('t = %d' % t)
plt.show()
# ### Shallow feature reconstruction
# After implementing the feature inversion above, run the following cell to try and reconstruct features from the fourth convolutional layer of the pretrained model. You should be able to reconstruct the features using the provided optimization parameters.
from imageio import imread
from skimage.transform import resize
# +
filename = 'sky.jpg'
layer = 3 # layers start from 0 so these are features after 4 convolutions
img = imresize(imread(filename), (64, 64))
plt.imshow(img)
plt.gcf().set_size_inches(3, 3)
plt.title('Original image')
plt.axis('off')
plt.show()
# Preprocess the image before passing it to the network:
# subtract the mean, add a dimension, etc
img_pre = preprocess_image(img, data['mean_image'])
# Extract features from the image
feats, _ = model.forward(img_pre, end=layer)
# Invert the features
kwargs = {
'num_iterations': 400,
'learning_rate': 5000,
'l2_reg': 1e-8,
'show_every': 100,
'blur_every': 10,
}
X = invert_features(feats, layer, model, **kwargs)
# -
# ### Deep feature reconstruction
# Reconstructing images using features from deeper layers of the network tends to give interesting results. In the cell below, try to reconstruct the best image you can by inverting the features after 7 layers of convolutions. You will need to play with the hyperparameters to try and get a good result.
#
# HINT: If you read the paper by Mahendran and Vedaldi, you'll see that reconstructions from deep features tend not to look much like the original image, so you shouldn't expect the results to look like the reconstruction above. You should be able to get an image that shows some discernable structure within 1000 iterations.
# +
filename = 'kitten.jpg'
layer = 6 # layers start from 0 so these are features after 7 convolutions
img = imresize(imread(filename), (64, 64))
plt.imshow(img)
plt.gcf().set_size_inches(3, 3)
plt.title('Original image')
plt.axis('off')
plt.show()
# Preprocess the image before passing it to the network:
# subtract the mean, add a dimension, etc
img_pre = preprocess_image(img, data['mean_image'])
# Extract features from the image
feats, _ = model.forward(img_pre, end=layer)
# Invert the features
# You will need to play with these parameters.
kwargs = {
'num_iterations': 1000,
'learning_rate': 1e4,
'l2_reg': 1e-12,
'show_every': 400,
'blur_every': 10,
}
X = invert_features(feats, layer, model, **kwargs)
# -
# # DeepDream
# In the summer of 2015, Google released a [blog post](http://googleresearch.blogspot.com/2015/06/inceptionism-going-deeper-into-neural.html) describing a new method of generating images from neural networks, and they later [released code](https://github.com/google/deepdream) to generate these images.
#
# The idea is very simple. We pick some layer from the network, pass the starting image through the network to extract features at the chosen layer, set the gradient at that layer equal to the activations themselves, and then backpropagate to the image. This has the effect of modifying the image to amplify the activations at the chosen layer of the network.
#
# For DeepDream we usually extract features from one of the convolutional layers, allowing us to generate images of any resolution.
#
# We can implement this idea using our pretrained network. The results probably won't look as good as Google's since their network is much bigger, but we should still be able to generate some interesting images.
def deepdream(X, layer, model, **kwargs):
"""
Generate a DeepDream image.
Inputs:
- X: Starting image, of shape (1, 3, H, W)
- layer: Index of layer at which to dream
- model: A PretrainedCNN object
Keyword arguments:
- learning_rate: How much to update the image at each iteration
- max_jitter: Maximum number of pixels for jitter regularization
- num_iterations: How many iterations to run for
- show_every: How often to show the generated image
"""
X = X.copy()
learning_rate = kwargs.pop('learning_rate', 5.0)
max_jitter = kwargs.pop('max_jitter', 16)
num_iterations = kwargs.pop('num_iterations', 100)
show_every = kwargs.pop('show_every', 25)
for t in range(num_iterations):
# As a regularizer, add random jitter to the image
ox, oy = np.random.randint(-max_jitter, max_jitter+1, 2)
X = np.roll(np.roll(X, ox, -1), oy, -2)
dX = None
############################################################################
# TODO: Compute the image gradient dX using the DeepDream method. You'll #
# need to use the forward and backward methods of the model object to #
# extract activations and set gradients for the chosen layer. After #
# computing the image gradient dX, you should use the learning rate to #
# update the image X. #
############################################################################
activations, cache = model.forward(X, end=layer, mode='test')
dactivations = activations
dX, grads = model.backward(dactivations, cache)
X += learning_rate*dX
############################################################################
# END OF YOUR CODE #
############################################################################
# Undo the jitter
X = np.roll(np.roll(X, -ox, -1), -oy, -2)
# As a regularizer, clip the image
mean_pixel = data['mean_image'].mean(axis=(1, 2), keepdims=True)
X = np.clip(X, -mean_pixel, 255.0 - mean_pixel)
# Periodically show the image
if t == 0 or (t + 1) % show_every == 0:
img = deprocess_image(X, data['mean_image'], mean='pixel')
plt.imshow(img)
plt.title('t = %d' % (t + 1))
plt.gcf().set_size_inches(8, 8)
plt.axis('off')
plt.show()
return X
# # Generate some images!
# Try and generate a cool-looking DeepDeam image using the pretrained network. You can try using different layers, or starting from different images. You can reduce the image size if it runs too slowly on your machine, or increase the image size if you are feeling ambitious.
# +
def read_image(filename, max_size):
"""
Read an image from disk and resize it so its larger side is max_size
"""
img = imread(filename)
H, W, _ = img.shape
if H >= W:
img = imresize(img, (max_size, int(W * float(max_size) / H)))
elif H < W:
img = imresize(img, (int(H * float(max_size) / W), max_size))
return img
filename = 'kitten.jpg'
max_size = 256
img = read_image(filename, max_size)
plt.imshow(img)
plt.axis('off')
# Preprocess the image by converting to float, transposing,
# and performing mean subtraction.
img_pre = preprocess_image(img, data['mean_image'], mean='pixel')
out = deepdream(img_pre, 7, model, learning_rate=2000)
# -
|
assignment3/ImageGeneration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bayesian fixed effects meta analysis for WQC and BMI
#
# From a list of Pearson correlation coefficients and corresponding sample sizes, we are going to infer the population level true correlation coefficient.
#
# **Acknowledgement:** Development of the Bayesian meta analysis code greatly benefited from advice from <NAME>.
# +
import metaBayes.correlation as bm_corr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pymc3 as pm # optionally used for diagnosing the MCMC trace
# %config InlineBackend.figure_format = 'retina'
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial']
# -
pm.__version__
# %load_ext autoreload
# %autoreload 2
data_weight = pd.read_csv('data/meta_analysis/meta_analysis_correlations_weightloss.csv')
data_weight
results, prior, posterior, model = bm_corr.fit(data_weight, type='fixed effects')
pm.traceplot(posterior, var_names='effect_size_population');
pm.plot_posterior(posterior, var_names='effect_size_population');
# +
bm_corr.plot(results, prior, posterior, figsize=(10, 6), height_ratios=[1, 1], sort_by='effect_size')
results.to_csv('output/meta analysis results - WCQ - fixed effects.csv')
plt.savefig('figs/meta_analysis_WCQ.pdf', bbox_inches='tight')
plt.savefig('img/meta_analysis_WCQ.png', bbox_inches='tight', dpi=300)
# -
|
bayesian meta analysis WCQ.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
import os
import pandas as pd
np.random.seed(56)
# # Process:
# 1. Make Random Data
# 2. Create Trace
# 3. Put it inside Data List
# 4. Create Layout
# 5. Pass Data and Layout to figure object
# 6. Plot Figure Object
#
# ### Repeat this twice, adding new graphs to [data]
# 1
x_values = np.linspace(0,1,100)
y_values = np.random.randn(100)
# +
#2
trace = go.Scatter(x=x_values, y=y_values+5,
mode = 'markers',
name='markers')
#3
data = [trace]
#4
layout = go.Layout(title='Line Charts Boi')
#5
fig = go.Figure(data=data, layout=layout)
#6
pyo.plot(fig)
# +
'''
Now Lets Conver the Scatter Plot to a Line Plot
'''
#2
trace0 = go.Scatter(x=x_values, y=y_values+5,
mode = 'markers',
name='markers')
trace1 = go.Scatter(x=x_values, y=y_values,
mode = 'lines', name='myLinesYo')
#3
data = [trace0, trace1]
#4
layout = go.Layout(title='Line Charts Boi')
#5
fig = go.Figure(data=data, layout=layout)
#6
pyo.plot(fig)
# +
'''
Now Lets Conver the Scatter Plot to a Line Plot
'''
#2
trace0 = go.Scatter(x=x_values, y=y_values+5,
mode = 'markers',
name='markers')
trace1 = go.Scatter(x=x_values, y=y_values,
mode = 'lines', name='myLinesYo')
trace2 = go.Scatter(x=x_values, y=y_values-5,
mode = 'lines+markers', name='myFavLinesYo')
#3
data = [trace0, trace1, trace2]
#4
layout = go.Layout(title='Line Charts Boi')
#5
fig = go.Figure(data=data, layout=layout)
#6
pyo.plot(fig)
# -
# ## Now let's add real Census data!
#
# Process:
# 1. Load Data
# 2. Filter Columns and set Index
# 3. Feed Data to
# +
root = os.getcwd()
fn = 'nst-est2017-alldata.csv'
abs_path = os.path.join(root, 'data', fn)
df = pd.read_csv(abs_path)
print(df.shape)
print(df.columns)
df.head()
# +
df2 = df[df['DIVISION'] == '1'] #Filter where DIVISION IS 1
df2.set_index('NAME', inplace=True)
#Grab Pop Columns
pop_cols = [col for col in df2.columns if col.startswith('POP')]
df2 = df2[pop_cols]
df2.head()
# +
#Use List Comprehensions to make Traces Fast!
data = [go.Scatter(x=df2.columns,
y=df2.loc[name],
mode='lines',
name=name) for name in df2.index ]
pyo.plot(data)
# -
|
3.2-whs-LineCharts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
import os
base_path = os.getcwd()
relative_file_path = "harmonic_oscillator"
file_name = "time.txt"
absolute_file_path = os.path.abspath(os.path.join(base_path, relative_file_path, file_name))
print(absolute_file_path)
with open(absolute_file_path, "r") as file:
for i, line in enumerate(file):
if i == 25:
print(line)
elif i == 29:
print(line)
elif i > 29:
break
|
docs/source/notebooks/potential-wall.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from arc import *
# +
import numpy as np
import pandas as pd
import os
import json
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
from pathlib import Path
# +
cmap = colors.ListedColormap(
['#000000', '#0074D9','#FF4136','#2ECC40','#FFDC00',
'#AAAAAA', '#F012BE', '#FF851B', '#7FDBFF', '#870C25'])
norm = colors.Normalize(vmin=0, vmax=9)
# 0:black, 1:blue, 2:red, 3:greed, 4:yellow,
# 5:gray, 6:magenta, 7:orange, 8:sky, 9:brown
plt.figure(figsize=(5, 2), dpi=200)
plt.imshow([list(range(10))], cmap=cmap, norm=norm)
plt.xticks(list(range(10)))
plt.yticks([])
plt.show()
def plot_task(task):
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(2, n, figsize=(4*n,8), dpi=50)
plt.subplots_adjust(wspace=0, hspace=0)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
fig_num += 1
plt.tight_layout()
plt.show()
# -
num2color = ["black", "blue", "red", "green", "yellow", "gray", "magenta", "orange", "sky", "brown"]
color2num = {c: n for n, c in enumerate(num2color)}
all_solutions = solve_all_tasks(6, training='training')
# print(list(all_solutions.keys()))
for key, value in all_solutions.items():
print(key)
print(value[0])
print()
def check(task, imgTree):
pred_func = lambda x: compute(imgTree, Image(x)).matrix
n = len(task["train"]) + len(task["test"])
fig, axs = plt.subplots(3, n, figsize=(4*n,12), dpi=50)
plt.subplots_adjust(wspace=0.3, hspace=0.3)
fig_num = 0
for i, t in enumerate(task["train"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
t_pred = pred_func(t_in)
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Train-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Train-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
axs[2][fig_num].imshow(t_pred, cmap=cmap, norm=norm)
axs[2][fig_num].set_title(f'Train-{i} pred')
axs[2][fig_num].set_yticks(list(range(t_pred.shape[0])))
axs[2][fig_num].set_xticks(list(range(t_pred.shape[1])))
fig_num += 1
for i, t in enumerate(task["test"]):
t_in, t_out = np.array(t["input"]), np.array(t["output"])
t_pred = pred_func(t_in)
axs[0][fig_num].imshow(t_in, cmap=cmap, norm=norm)
axs[0][fig_num].set_title(f'Test-{i} in')
axs[0][fig_num].set_yticks(list(range(t_in.shape[0])))
axs[0][fig_num].set_xticks(list(range(t_in.shape[1])))
axs[1][fig_num].imshow(t_out, cmap=cmap, norm=norm)
axs[1][fig_num].set_title(f'Test-{i} out')
axs[1][fig_num].set_yticks(list(range(t_out.shape[0])))
axs[1][fig_num].set_xticks(list(range(t_out.shape[1])))
axs[2][fig_num].imshow(t_pred, cmap=cmap, norm=norm)
axs[2][fig_num].set_title(f'Test-{i} pred')
axs[2][fig_num].set_yticks(list(range(t_pred.shape[0])))
axs[2][fig_num].set_xticks(list(range(t_pred.shape[1])))
fig_num += 1
for key in all_solutions:
print(key)
task = get_data(str(training_path / key))
check(task, all_solutions[key][0])
|
Viz.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook demonstrates how to use the rms simulator adapter
# +
import os
import matplotlib.pyplot as plt
# %matplotlib inline
from tests.common import run_minimal
from t3.common import IPYTHON_SIMULATOR_EXAMPLES_PATH
from t3.main import T3
from t3.simulate.rms_constantTP import RMSConstantTP
from arc.common import read_yaml_file
# -
# define path that contains the input file and T3 iteration folders
EXAMPLE_DIR = os.path.join(IPYTHON_SIMULATOR_EXAMPLES_PATH, 'rms_simulator_data')
# read in the input dictionary to use T3 via its API
minimal_input = os.path.join(EXAMPLE_DIR, 'input.yml')
input_dict = read_yaml_file(path=minimal_input)
input_dict['verbose'] = 10
input_dict['project_directory'] = EXAMPLE_DIR
# create an instance of T3, which stores information used by the rms adapter
t3 = T3(**input_dict)
t3.set_paths()
# simulate ideal gas with constant V and perform sensitivity analysis
rms_simulator_adapter = RMSConstantTP(t3=t3.t3,
rmg=t3.rmg,
paths=t3.paths,
logger=t3.logger,
atol=t3.rmg['model']['atol'],
rtol=t3.rmg['model']['rtol'],
observable_list=observable_list,
sa_atol=t3.t3['sensitivity']['atol'],
sa_rtol=t3.t3['sensitivity']['rtol'],
)
rms_simulator_adapter.simulate()
# get the sensitivity analysis coefficients returned in a standard dictionary format
sa_dict = rms_simulator_adapter.get_sa_coefficients()
# plot the results
species = 'H(3)'
k = 5
plt.plot(sa_dict['time'], sa_dict['kinetics'][species][k])
plt.xlabel('time (s)')
plt.ylabel(f'dln({species})/dln(k{k})')
plt.title('Sensitivity over time')
|
ipython/simulator_adapter_examples/rms_simulator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda3]
# language: python
# name: conda-env-anaconda3-py
# ---
# <br>
# # Painters Identification using ConvNets
# ### <NAME>
# <br>
#
#
#
#
# ## Index
#
# - [Building Convolutional Neural Networks](#convnets)
# - [Small ConvNets](#smallconvnets)
# - [Imports for Convnets](#importconvnets)
# - [Preprocessing](#keraspreprocessing)
# - [Training the model](#traincnn)
# - [Plotting the results](#plotting)
# - [Transfer learning: Using InceptionV3](#VGG16)
# - [Comments](#comments)
# - [References](#ref)
#
# <br>
# <br>
#
#
#
print('Created using Python', platform.python_version())
# ## Introduction
#
# The challenge of recognizing artists given their paintings has been, for a long time, far beyond the capability of algorithms. Recent advances in deep learning, specifically the development of convolutional neural networks, have made that task possible. One of the advantages of these methods is that, in contrast to several methods employed by art specialists, they are not invasive and do not interfere with the painting.
#
#
# ## Overview
#
# I used Convolutional Neural Networks (ConvNets) to identify the artist of a given painting. The dataset contains a minimum of 400 paintings per artist <br> from a set of 37 famous artists.
# <br><br>
# I trained a small ConvNet built from scratch, and also used transfer learning, fine-tuning the top layers of a deep pre-trained networks (VGG16).
# ## Problems with small datasets
# The number of training examples in our dataset is small (for image recognition standards). Therefore, making predictions with high accuracy avoiding overfitting becomes a difficult task. To build classification systems with the level of capability of current state-of-the-art models would need millions of training examples. Example of such models are the ImageNet models. Examples of these models include:
#
# - VGG16
# - VGG19
# - ResNet50
# - Inception V3
# - Xception
#
#
#
# ## Preprocessing
#
# The `Keras` class `keras.preprocessing.image.ImageDataGenerator` generates batches of image data with real-time data augmentation and defines the configuration for both image data preparation and image data augmentation. Data augmentation is particularly useful in cases like the present one, where the number of images in the training set is not large, and overfitting can become an issue.
#
# To create an augmented image generator we can follow these steps:
#
# - We must first create an instance i.e. an augmented image generator (using the command below) where several arguments can be chosen. These arguments will determine the alterations to be performed on the images during training:
#
# datagen = ImageDataGenerator(arguments)
#
# - To use `datagen` to create new images we call the function `fit_generator( )` with the desired arguments.
#
# I will quickly explain some possible arguments of `ImageDataGenerator`:
# - `rotation range` defines the amplitude that the images will be rotated randomly during training. Rotations aren't always useful. For example, in the MNIST dataset all images have normalized orientation, so random rotations during training are not needed. In tour present case it is not clear how useful rotations are so I will choose an small argument (instead of just setting it to zero).
# - `rotation_range`, `width_shift_range`, `height_shift_range` and `shear_range`: the ranges of random shifts and random shears should be the same in our case, since the images were resized to have the same dimensions.
# - I set `fill mode` to be `nearest` which means that pixels that are missing will be filled by the nearest ones.
# - `horizontal_flip`: horizontal (and vertical) flips can be useful here since in many examples in our dataset there is no clear definition of orientation (again the MNIST dataset is an example where flipping is not useful)
# - We can also standardize pixel values using the `featurewise_center` and `feature_std_normalization` arguments.
# ***
#
# ## Transfer Learning
# One way to circunvent this issue is to use 'Transfer Learning', where we use a pre-trained model, modify its final layers and apply to our dataset. When the dataset is too small, these pre-trained models act as feature generators only (see discussion below). As will be illustrated later on, when the dataset in question has some reasonable size, one can drop some layers from the original model, stack a model on top of the network and perform some parameters fine-tuning.
#
# Before following this approach, I will, in the next section, build a small ConvNet "from scratch".
# +
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras.preprocessing import image
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.utils.np_utils import to_categorical
from keras import applications
from keras.applications.imagenet_utils import preprocess_input
from imagenet_utils import decode_predictions
import math, cv2
folder_train = './train_toy_3/'
folder_test = './test_toy_3/'
# -
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
rescale = 1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.callbacks import EarlyStopping, Callback
K.image_data_format() # this means that "backend": "tensorflow". Channels are RGB
from keras import applications
from keras.utils.np_utils import to_categorical
import math, cv2
# ## Defining the new size of the image
#
# - The images from Wikiart.org had a extremely large size, I wrote a simple function `preprocess( )` (see the notebook about data analysis in this repo) to resize the images. In the next cell I resize them again and play with the size to see how it impacts accuracy.
# - The reason why cropping the image is partly justified is that I believe, the style of the artist is present everywhere in the painting, so cropping shouldn't cause major problems.
img_width, img_height = 120,120
# +
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
print('Theano Backend')
else:
input_shape = (img_width, img_height, 3)
print('TensorFlow Backend')
input_shape
# +
nb_train_samples = 0
for p in range(len(os.listdir(os.path.abspath(folder_train)))):
nb_train_samples += len(os.listdir(os.path.abspath(folder_train) +'/'+ os.listdir(
os.path.abspath(folder_train))[p]))
nb_train_samples
nb_test_samples = 0
for p in range(len(os.listdir(os.path.abspath(folder_test)))):
nb_test_samples += len(os.listdir(os.path.abspath(folder_test) +'/'+ os.listdir(
os.path.abspath(folder_test))[p]))
nb_test_samples
# -
# ## Batches and Epochs:
#
# - Batch: a set of $N$ samples. The samples in a batch are processed independently, in parallel. If training, a batch results in only one update to the model (extracted from the docs).
# - Epoch: an arbitrary cutoff, generally defined as "one pass over the entire dataset", used to separate training into distinct phases, which is useful for logging and periodic evaluation. When using `evaluation_data` or `evaluation_split` with the `fit` method of Keras models, evaluation will be run at the end of every epoch (extracted from the docs).
# - Larger batch sizes:faster progress in training, but don't always converge as fast.
# - Smaller batch sizes: train slower, but can converge faster. It's definitely problem dependent.
train_data_dir = os.path.abspath(folder_train) # folder containing training set already subdivided
validation_data_dir = os.path.abspath(folder_test) # folder containing test set already subdivided
nb_train_samples = nb_train_samples
nb_validation_samples = nb_test_samples
epochs = 100
batch_size = 16 # batch_size = 16
num_classes = len(os.listdir(os.path.abspath(folder_train)))
print('The painters are',os.listdir(os.path.abspath(folder_train)))
# ### Class for early stopping
#
# Model stops training when 10 epochs do not show gain in accuracy.
# rdcolema
class EarlyStoppingByLossVal(Callback):
"""Custom class to set a val loss target for early stopping"""
def __init__(self, monitor='val_loss', value=0.45, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
early_stopping = EarlyStopping(monitor='val_loss', patience=10, mode='auto') #
top_model_weights_path = 'bottleneck_fc_model.h5'
# ### Creating InceptionV3 model
#
# We now create the InceptionV3 model without the final fully-connected layers (setting `include_top=False`) and loading the ImageNet weights (by setting `weights ='imagenet`)
from keras.applications.inception_v3 import InceptionV3
model = applications.InceptionV3(include_top=False, weights='imagenet')
applications.InceptionV3(include_top=False, weights='imagenet').summary()
type(applications.InceptionV3(include_top=False, weights='imagenet').summary())
# ### Training and running images on InceptionV3
#
# We first create the generator. The generator is an iterator that generates batches of images when requested using e.g. `flow( )`.
# +
datagen = ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
nb_train_samples = len(generator.filenames)
num_classes = len(generator.class_indices)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
print('Number of training samples:',nb_train_samples)
print('Number of classes:',num_classes)
# -
# ### Bottleneck features
#
# The extracted features, which are the last activation maps before the fully-connected layers in the pre-trained model, are called "bottleneck features". The function `predict_generator( )` generates predictions for the input samples from a data generator.
bottleneck_features_train = model.predict_generator(generator, predict_size_train) # these are numpy arrays
bottleneck_features_train[0].shape
bottleneck_features_train.shape
# In the next cell, we save the bottleneck features to help training our data:
np.save('bottleneck_features_train.npy', bottleneck_features_train)
# Using `predict( )` we see that, indeed, `ResNet50` is able to identify some objects in the painting. The function `decode_predictions` decodes the results into a list of tuples of the form (class, description, probability). We see below that the model identifies the house in the image as a castle or mosque and shows correctly a non-zero probability of finding a seashore in the painting. In this case, `ResNet50` acts as a feature generator.
# Repeating the steps for the validation data:
# +
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
nb_validation_samples = len(generator.filenames)
predict_size_validation = int(math.ceil(nb_validation_samples / batch_size))
print('Number of testing samples:',nb_validation_samples)
# +
bottleneck_features_validation = model.predict_generator(
generator, predict_size_validation)
np.save('bottleneck_features_validation.npy', bottleneck_features_validation)
# -
# ### Training the fully-connected network (the top-model)
#
# We now load the features just obtained, get the class labels for the training set and convert the latter into categorial vectors:
# +
datagen_top = ImageDataGenerator(rescale=1./255)
generator_top = datagen_top.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
nb_train_samples = len(generator_top.filenames)
num_classes = len(generator_top.class_indices)
# -
# Loading the features:
train_data = np.load('bottleneck_features_train.npy')
# Converting training data into vectors of categories:
train_labels = generator_top.classes
print('Classes before dummification:',train_labels)
train_labels = to_categorical(train_labels, num_classes=num_classes)
print('Classes after dummification:\n\n',train_labels)
# Again repeating the process with the validation data:
# +
generator_top = datagen_top.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
nb_validation_samples = len(generator_top.filenames)
# -
validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = generator_top.classes
validation_labels = to_categorical(validation_labels, num_classes=num_classes)
# ### Building the small FL model using bottleneck features as input
# +
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
# model.add(Dense(1024, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu')) # Not valid for minimum = 500
model.add(Dropout(0.5))
# model.add(Dense(4, activation='relu')) # Not valid for minimum = 500
# model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='sigmoid'))
model.compile(optimizer='Adam',
loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
(eval_loss, eval_accuracy) = model.evaluate(
validation_data, validation_labels,
batch_size=batch_size, verbose=1)
print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100))
print("[INFO] Loss: {}".format(eval_loss))
# -
train_data.shape[1:]
# +
# model.evaluate(
# validation_data, validation_labels, batch_size=batch_size, verbose=1)
# +
# model.predict_classes(validation_data)
# +
# model.metrics_names
# +
#top_k_categorical_accuracy(y_true, y_pred, k=5)
# -
# ### Plotting the accuracy history
# +
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
#pylab.ylim([0.4,0.68])
plt.legend(['train', 'test'], loc='upper left')
# -
# ### Plotting the loss history
import pylab
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
pylab.xlim([0,60])
# pylab.ylim([0,1000])
plt.show()
import matplotlib.pyplot as plt
import pylab
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
fig = plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Classification Model Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
pylab.xlim([0,60])
plt.legend(['Test', 'Validation'], loc='upper right')
fig.savefig('loss.png')
plt.show();
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
fig = plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.plot(figsize=(15,15))
plt.title('Classification Model Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
pylab.xlim([0,100])
plt.legend(['Test', 'Validation', 'Success Metric'], loc='lower right')
fig.savefig('acc.png')
plt.show();
# -
# !pwd
# ### Predictions
os.listdir(os.path.abspath('train_toy_3/Pierre-Auguste_Renoir))
image_path = os.path.abspath('test_toy_3/Pierre-Auguste_Renoir/91485.jpg')
orig = cv2.imread(image_path)
image = load_img(image_path, target_size=(120,120))
image
image = img_to_array(image)
image
image = image / 255.
image = np.expand_dims(image, axis=0)
image
# build the VGG16 network
#model = applications.VGG16(include_top=False, weights='imagenet')
model = applications.InceptionV3(include_top=False, weights='imagenet')
# get the bottleneck prediction from the pre-trained VGG16 model
bottleneck_prediction = model.predict(image)
# build top model
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
# model.add(Dense(1024, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu')) # Not valid for minimum = 500
model.add(Dropout(0.5))
# model.add(Dense(4, activation='relu')) # Not valid for minimum = 500
# model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='sigmoid'))
# +
model.load_weights(top_model_weights_path)
# use the bottleneck prediction on the top model to get the final classification
class_predicted = model.predict_classes(bottleneck_prediction)
# +
inID = class_predicted[0]
class_dictionary = generator_top.class_indices
inv_map = {v: k for k, v in class_dictionary.items()}
label = inv_map[inID]
# get the prediction label
print("Image ID: {}, Label: {}".format(inID, label))
# display the predictions with the image
cv2.putText(orig, "Predicted: {}".format(label), (10, 30), cv2.FONT_HERSHEY_PLAIN, 1.5, (43, 99, 255), 2)
cv2.imshow("Classification", orig)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
painters-identification/notebooks/capstone-models-final-model-building.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="04dC_NGgzmcf" colab_type="code" colab={}
from IPython.display import JSON
from google.colab import output
from subprocess import getoutput
import os
def shell(command):
if command.startswith('cd'):
path = command.strip().split(maxsplit=1)[1]
os.chdir(path)
return JSON([''])
return JSON([getoutput(command)])
output.register_callback('shell', shell)
# + id="ZL-iY8n7zn_J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 367} cellView="both" outputId="960be0ba-a94e-4358-cd1c-b742064e0ec3"
#@title Colab Shell
# %%html
<div id=colab_shell></div>
<script src="https://code.jquery.com/jquery-latest.js"></script>
<script src="https://cdn.jsdelivr.net/npm/jquery.terminal/js/jquery.terminal.min.js"></script>
<link href="https://cdn.jsdelivr.net/npm/jquery.terminal/css/jquery.terminal.min.css" rel="stylesheet"/>
<script>
$('#colab_shell').terminal(async function(command) {
if (command !== '') {
try {
let res = await google.colab.kernel.invokeFunction('shell', [command])
let out = res.data['application/json'][0]
this.echo(new String(out))
} catch(e) {
this.error(new String(e));
}
} else {
this.echo('');
}
}, {
greetings: 'Welcome to Colab Shell',
name: 'colab_shell',
height: 350,
prompt: 'colab > '
});
# + id="NPFcQabP0VRE" colab_type="code" colab={}
|
colab-terminal.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from urllib.request import urlopen
from bs4 import BeautifulSoup
def getNgrams(content, n):
content = content.split(' ')
output = []
for i in range(len(content)-n+1):
output.append(content[i:i+n])
return output
html = urlopen('http://en.wikipedia.org/wiki/Python_(programming_language)')
bs = BeautifulSoup(html, 'html.parser')
content = bs.find('div', {'id':'mw-content-text'}).get_text()
ngrams = getNgrams(content, 2)
print(ngrams)
print('2-grams count is: '+str(len(ngrams)))
# +
import re
def getNgrams(content, n):
content = re.sub('\n|[[\d+\]]', ' ', content)
content = bytes(content, 'UTF-8')
content = content.decode('ascii', 'ignore')
content = content.split(' ')
content = [word for word in content if word != '']
output = []
for i in range(len(content)-n+1):
output.append(content[i:i+n])
return output
# -
html = urlopen('http://en.wikipedia.org/wiki/Python_(programming_language)')
bs = BeautifulSoup(html, 'html.parser')
content = bs.find('div', {'id':'mw-content-text'}).get_text()
ngrams = getNgrams(content, 2)
print(ngrams)
print('2-grams count is: '+str(len(ngrams)))
# +
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import string
def cleanSentence(sentence):
sentence = sentence.split(' ')
sentence = [word.strip(string.punctuation+string.whitespace) for word in sentence]
sentence = [word for word in sentence if len(word) > 1 or (word.lower() == 'a' or word.lower() == 'i')]
return sentence
def cleanInput(content):
content = content.upper()
content = re.sub('\n|[[\d+\]]', ' ', content)
content = bytes(content, "UTF-8")
content = content.decode("ascii", "ignore")
sentences = content.split('. ')
return [cleanSentence(sentence) for sentence in sentences]
def getNgramsFromSentence(content, n):
output = []
for i in range(len(content)-n+1):
output.append(content[i:i+n])
return output
def getNgrams(content, n):
content = cleanInput(content)
ngrams = []
for sentence in content:
ngrams.extend(getNgramsFromSentence(sentence, n))
return(ngrams)
# -
html = urlopen('http://en.wikipedia.org/wiki/Python_(programming_language)')
bs = BeautifulSoup(html, 'html.parser')
content = bs.find('div', {'id':'mw-content-text'}).get_text()
print(len(getNgrams(content, 2)))
# +
from collections import Counter
def getNgrams(content, n):
content = cleanInput(content)
ngrams = Counter()
ngrams_list = []
for sentence in content:
newNgrams = [' '.join(ngram) for ngram in getNgramsFromSentence(sentence, n)]
ngrams_list.extend(newNgrams)
ngrams.update(newNgrams)
return(ngrams)
# -
print(getNgrams(content, 2))
|
Chapter08-CleaningYourDirtyData.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Xarray-spatial
# ### User Guide: Surface tools
# -----
# With the Surface tools, you can quantify and visualize a terrain landform represented by a digital elevation model.
#
# Starting with a raster elevation surface, represented as an Xarray DataArray, these tools can help you identify some specific patterns that may not be readily apparent in the original surface. The return of each function is also an Xarray DataArray.
#
# [Hillshade](#Hillshade): Creates a shaded relief from a surface raster by considering the illumination source angle and shadows.
#
# [Slope](#Slope): Identifies the slope for each cell of a raster.
#
# [Curvature](#Curvature): Calculates the curvature of a raster surface.
#
# [Aspect](#Aspect): Derives the aspect for each cell of a raster surface.
#
# [Viewshed](#Viewshed): Determines visible locations in the input raster surface from a viewpoint with an optional observer height.
#
# -----------
#
# #### Let's use datashader to render our images...
# We'll need the basic Numpy and Pandas, as well as datashader,
# a data rasterization package highly compatible with Xarray-spatial.
# Along with the base package, we'll import several nested functions (shade, stack...)
# including Elevation, which we'll use below.
# +
import numpy as np
import pandas as pd
import datashader as ds
from datashader.transfer_functions import shade
from datashader.transfer_functions import stack
from datashader.transfer_functions import dynspread
from datashader.transfer_functions import set_background
from datashader.colors import Elevation
import xrspatial
# -
# ## Generate Terrain Data
#
# The rest of the geo-related functions focus on raster data, i.e. data that's been aggregated into the row-column grid of cells in a raster image. Datashader's Canvas object provides a convenient frame to set up a new raster, so we'll use that with our `generate_terrain` function to generate some fake terrain as an elevation raster. Once we have that, we'll use datashader's shade for easy visualization.
# +
from xrspatial import generate_terrain
W = 800
H = 600
cvs = ds.Canvas(plot_width=W, plot_height=H, x_range=(-20e6, 20e6), y_range=(-20e6, 20e6))
terrain = generate_terrain(canvas=cvs)
shade(terrain, cmap=['black', 'white'], how='linear')
# -
# The grayscale values in the image above show elevation, scaled linearly in black-to-white color intensity (with the large black areas indicating low elevation). This shows the data, but it would look more like a landscape if we map the lowest values to colors representing water, and the highest to colors representing mountaintops. Let's try the Elevation colormap we imported above:
shade(terrain, cmap=Elevation, how='linear')
# ## Hillshade
#
# [Hillshade](https://en.wikipedia.org/wiki/Terrain_cartography) is a technique used to visualize terrain as shaded relief by illuminating it with a hypothetical light source. The illumination value for each cell is determined by its orientation to the light source, which can be calculated from slope and aspect.
#
# Let's apply Hillshade to our terrain and visualize the result with shade.
# +
from xrspatial import hillshade
illuminated = hillshade(terrain)
hillshade_gray_white = shade(illuminated, cmap=['gray', 'white'], alpha=255, how='linear')
hillshade_gray_white
# -
# Applying hillshade reveals a lot of detail in the 3D shape of the terrain.
#
# To add even more detail, we can add the Elevation colormapped terrain from earlier and combine it with the hillshade terrain using datashader's stack function.
terrain_elevation = shade(terrain, cmap=Elevation, alpha=128, how='linear')
stack(hillshade_gray_white, terrain_elevation)
# ## Slope
# [Slope](https://en.wikipedia.org/wiki/Slope) is the inclination of a surface.
# In geography, *slope* is the amount of change in elevation for an area in a terrain relative to its surroundings.
#
# Xarray-spatial's slope function returns the slope at each cell in degrees.
# Because Xarray-spatial is integrated with Xarray and Numpy, we can apply standard Numpy filters. For example, we can highlight only slopes in the [avalanche risk](http://wenatcheeoutdoors.org/2016/04/07/avalanche-abcs-for-snowshoers/) range of 25 - 50 degrees. (Note the use of risky.data since these are DataArrays).
# Stacking the resulting raster with the hillshaded and plain terrain ones from above gives an image with areas of avalanche risk neatly highlighted.
# +
from xrspatial import slope
risky = slope(terrain)
risky.data = np.where(np.logical_and(risky.data > 25, risky.data < 50), 1, np.nan)
stack(shade(terrain, cmap=['black', 'white'], how='linear'),
shade(illuminated, cmap=['black', 'white'], how='linear', alpha=128),
shade(risky, cmap='red', how='linear', alpha=200))
# -
# ## Curvature
# [Curvature](https://desktop.arcgis.com/en/arcmap/10.3/tools/spatial-analyst-toolbox/curvature.htm) is the second derivative of a surface's elevation, or the *slope-of-the-slope*; in other words, how fast the slope is increasing or decreasing as we move along a surface.
# - A positive curvature means the surface is curving up (upwardly convex) at that cell.
# - A negative curvature means the surface is curving down (downwardly convex) at that cell.
# - A curvature of 0 means the surface is striaght and constant in whatever angle it's sloped towards.
#
# The Xarray-spatial curvature function returns a raster in units one hundredth (1/100) of the z-factor, or scaling factor (which you can set explicitly in generate _terrain as "zfactor").
# Reasonably expected values in the curvature raster for a hilly area (moderate relief) would be between -0.5 and 0.5, while for steep, rugged mountains (extreme relief) these can range as far as -4 and 4. For certain raster surfaces it is possible to go even larger than that.
#
# Let's generate a terrain with an appropriate z-factor and apply the curvature function to it. Then, we can apply some Numpy filtering (remember, we have access to all those functions) to highlight steeper and gentler curves in the slopes.
# Stacking these with the hillshaded and plain terrains gives us a fuller picture of the slopes.
# +
from xrspatial import curvature
terrain_z_one = generate_terrain(canvas=cvs, zfactor=1)
curv = curvature(terrain_z_one)
curv_hi, curv_low = curv.copy(), curv.copy()
curv_hi.data = np.where(np.logical_and(curv_hi.data > 1, curv_hi.data < 4), 1, np.nan)
curv_low.data = np.where(np.logical_and(curv_low.data > 0.5, curv_low.data < 1), 1, np.nan)
stack(shade(terrain, cmap=['black', 'white'], how='linear'),
shade(illuminated, cmap=['black', 'white'], how='linear', alpha=128),
shade(curv_hi, cmap='red', how='log', alpha=200),
shade(curv_low, cmap='green', how='log', alpha=200))
# -
# ## Aspect
#
# [Aspect](https://en.wikipedia.org/wiki/Aspect_(geography)) is the orientation of a slope, measured clockwise in degrees from 0 to 360, where 0 is north-facing, 90 is east-facing, 180 is south-facing, and 270 is west-facing.
#
# The Xarray-spatial aspect function returns the aspect in degrees for each cell in an elevation terrain.
#
# We can apply aspect to our terrain, then use Numpy to filter out only slopes facing close to North. Then, we can stack that with the hillshaded and plain terrains.
# (Note: the printout images are from a North point-of-view.)
# +
from xrspatial import aspect
north_faces = aspect(terrain)
north_faces.data = np.where(np.logical_or(north_faces.data > 350 ,
north_faces.data < 10), 1, np.nan)
stack(shade(terrain, cmap=['black', 'white'], how='linear'),
shade(illuminated, cmap=['black', 'white'], how='linear', alpha=128),
shade(north_faces, cmap=['aqua'], how='linear', alpha=100))
# -
# ## Viewshed
#
# The `xrspatial.viewshed` function operates on a given aggregate to calculate the viewshed (the visible cells in the raster) for a given viewpoint, or *observer location*.
#
# The visibility model is as follows: Two cells are visible to each other if the line of sight that connects their centers is not blocked at any point by another part of the terrain. If the line of sight does not pass through the cell center, elevation is determined using bilinear interpolation.
# ##### Simple Viewshed Example
#
# - The example below creates a datashader aggregate from a 2d normal distribution.
# - To calculate the viewshed, we need an observer location so we'll set up an aggregate for that as well.
# - Then, we can visualize all of that with hillshade, shade, and stack.
# - The observer location is indicated by the orange point in the upper-left of the plot.
# +
from xrspatial import viewshed
OBSERVER_X = -12.5
OBSERVER_Y = 10
canvas = ds.Canvas(plot_width=W, plot_height=H,
x_range=(-20, 20), y_range=(-20, 20))
normal_df = pd.DataFrame({
'x': np.random.normal(.5, 1, 10000000),
'y': np.random.normal(.5, 1, 10000000)
})
normal_agg = canvas.points(normal_df, 'x', 'y')
normal_agg.values = normal_agg.values.astype("float64")
normal_shaded = shade(normal_agg)
observer_df = pd.DataFrame({'x': [OBSERVER_X], 'y': [OBSERVER_Y]})
observer_agg = canvas.points(observer_df, 'x', 'y')
observer_shaded = dynspread(shade(observer_agg, cmap=['orange']),
threshold=1, max_px=4)
normal_illuminated = hillshade(normal_agg)
normal_illuminated_shaded = shade(normal_illuminated, cmap=['black', 'white'],
alpha=128, how='linear')
stack(normal_illuminated_shaded, observer_shaded)
# -
# ##### Calculate viewshed using the observer location
#
# Now we can apply viewshed to the normal_agg, with the observer_agg for the viewpoint. We can then visualize it and stack it with the hillshade and observer rasters.
# +
# Will take some time to run...
# %time view = viewshed(normal_agg, x=OBSERVER_X, y=OBSERVER_Y)
view_shaded = shade(view, cmap=['white', 'red'], alpha=128, how='linear')
stack(normal_illuminated_shaded, observer_shaded, view_shaded)
# -
# As you can see, the image highlights in red all points visible from the observer location marked with the orange dot. As one might expect, the areas behind the normal distribution *mountain* are blocked from the viewer.
# #### Viewshed on Terrain
# Now we can try using viewshed on our more complicated terrain.
#
# - We'll set up our terrain aggregate and apply hillshade and shade for easy visualization.
# - We'll also set up an observer location aggregate, setting the location to the center, at (x, y) = (0, 0).
# +
from xrspatial import viewshed
cvs = ds.Canvas(plot_width=W, plot_height=H, x_range=(-20e6, 20e6), y_range=(-20e6, 20e6))
terrain = generate_terrain(canvas=cvs)
terrain_shaded = shade(terrain, cmap=Elevation, alpha=128, how='linear')
illuminated = hillshade(terrain)
OBSERVER_X = 0.0
OBSERVER_Y = 0.0
observer_df = pd.DataFrame({'x': [OBSERVER_X],'y': [OBSERVER_Y]})
observer_agg = cvs.points(observer_df, 'x', 'y')
observer_shaded = dynspread(shade(observer_agg, cmap=['orange']),
threshold=1, max_px=4)
stack(shade(illuminated, cmap=['black', 'white'], alpha=128, how='linear'),
terrain_shaded,
observer_shaded)
# -
# Now we can apply viewshed.
# - Notice the use of the `observer_elev` argument, which is the height of the observer above the terrain.
# +
# %time view = viewshed(terrain, x=OBSERVER_X, y=OBSERVER_Y, observer_elev=100)
view_shaded = shade(view, cmap='fuchsia', how='linear')
stack(shade(illuminated, cmap=['black', 'white'], alpha=128, how='linear'),
terrain_shaded,
view_shaded,
observer_shaded)
# -
# The fuchsia areas are those visible to an observer of the given height at the indicated orange location.
#
#
#
# ### References
# - An overview of the Surface toolset: https://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/an-overview-of-the-surface-tools.htm
# - <NAME>., and <NAME>., 1998. Principles of Geographical Information Systems (Oxford University Press, New York), p. 406.
# - Making Maps with Noise Functions: https://www.redblobgames.com/maps/terrain-from-noise/
# - How Aspect Works: http://desktop.arcgis.com/en/arcmap/10.3/tools/spatial-analyst-toolbox/how-aspect-works.htm#ESRI_SECTION1_4198691F8852475A9F4BC71246579FAA
|
examples/user_guide/1_Surface.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # SageMaker で scikit-learn コンテナを使った学習・推論を行う
#
# #### ノートブックに含まれる内容
#
# - scikit-learn を SageMaker で行うときの,基本的なやりかた
#
# #### ノートブックで使われている手法の詳細
#
# - アルゴリズム: DecisionTreeClassifier
# - データ: iris
# ## セットアップ
# +
import sagemaker
from sagemaker import get_execution_role
sagemaker_session = sagemaker.Session()
# Get a SageMaker-compatible role used by this Notebook Instance.
role = get_execution_role()
# -
# ## 学習用データを S3 にアップロード
#
# SageMaker の学習時につかうデータは,S3 に置く必要があります.ここでは,ローカルにある iris データをいったん SageMaker SDK の `session` クラスにある `upload_data()` メソッドを使って,ノートブックインスタンスのローカルから S3 にアップロードします.
#
# デフォルトでは SageMaker は `sagemaker-{region}-{your aws account number}` というバケットを使用します.当該バケットがない場合には,自動で新しく作成します.`upload_data()` メソッドの引数に `bucket=XXXX` という形でデータを配置するバケットを指定することが可能です.
#
# 以下を実行する前に,**<span style="color: red;">5 行目の `data/scikit-byo-iris/XX` の `XX` を指定された適切な数字に変更</span>**してください
# +
import numpy as np
import os
from sklearn import datasets
PREFIX = 'data/scikit-iris/XX'
# Load Iris dataset, then join labels and features
iris = datasets.load_iris()
joined_iris = np.insert(iris.data, 0, iris.target, axis=1)
# Create directory and write csv
os.makedirs('./data', exist_ok=True)
np.savetxt('./data/iris.csv', joined_iris, delimiter=',', fmt='%1.1f, %1.3f, %1.3f, %1.3f, %1.3f')
train_input = sagemaker_session.upload_data('data', key_prefix=PREFIX)
# -
# ## モデルの学習を実行
#
# SageMaker で学習を行うために,SageMaker SDK で `Estimator` オブジェクトをつくります.このオブジェクトには,学習をおこなうために以下の設定が含まれます.その上で,`fit()` メソッドで学習を実施します.学習には 5 分程度時間がかかります.
#
# * __role__: ジョブを実行する IAM role
# * __instance count__: 学習ジョブに使うインスタンス数
# * __instance type__ 学習ジョブに使うインスタンスタイプ
# * __output path__: 学習の成果物が置かれる S3 の場所
# * __session__: すぐ上で作成した,SageMaker セッション
#
# scikit-learn コンテナを使う場合,基本的にはスクリプトの `__main__` 関数内に,学習処理をベタ書きすれば OK です.その際に,入力データの場所やモデルファイルを出力する場所などは,環境変数として SageMaker から引き渡されます.具体的な環境変数の一覧は以下のとおりです.
#
# * `SM_MODEL_DIR`: 出力モデルを配置する,コンテナ内のディレクトリのパスをさします.このパスにモデルファイルを出力しておけば,SageMaker が学習終了時にフォルダの中身を tar.gz にまとめて,S3 に出力してくれます.
# * `SM_OUTPUT_DIR`: モデルファイル以外の出力ファイルを置くためのディレクトリパスです.こちらも同様に,SageMaker が S3 にデータを出力します.
# * `SM_CHANNEL_TRAIN`: `fit()` を実行する際に指定するデータのうち,`train` タグのついた学習用データが置かれる,コンテナ内のディレクトリパスをさします
# * `SM_CHANNEL_TEST`: 上と同様に,`test` タグのついた検証用データのパスをさします.
#
# また `Estimator` オブジェクト作成時に,Hyperparameter として指定したものは,引数としてスクリプトに渡ってくるので,argparse パッケージを用いて取得可能です.
# こちらは `scilit_learn_iris.py` をご覧ください.
#
# もし追加のモジュールインストールが必要な場合には,`source_dir` 直下に `requirements.txt` を配置することで,コンテナ起動時にインストールされます.
# +
from sagemaker.sklearn.estimator import SKLearn
script_path = 'scikit_learn_iris.py'
source_dir='src/'
sklearn = SKLearn(
entry_point=script_path,
source_dir=source_dir,
train_instance_type="ml.m4.xlarge",
role=role,
sagemaker_session=sagemaker_session,
hyperparameters={'max_leaf_nodes': 10})
sklearn.fit({'train': train_input})
# -
# ## モデルの推論を実行
#
# 推論を行うために,まず学習したモデルをデプロイします.`deploy()` メソッドでは,デプロイ先エンドポイントのインスタンス数,インスタンスタイプを指定します.また併せて,オプションで(リクエストで渡されるデータの)シリアライザと(レスポンスで返されるデータの)デシリアライザを指定することも可能です.モデルのデプロイには 10 分程度時間がかかります.
predictor = sklearn.deploy(initial_instance_count=1, instance_type="ml.m4.xlarge")
# +
import pandas as pd
# 推論用のデータを準備
test_X = pd.DataFrame([[5.0, 3.2, 1.2, 4.3], [4.5, 2.3, 1.3, 0.3], [5.7, 2.8, 4.1, 1.3]])
print(test_X)
# -
# 推論を実行して,結果を表示
result = predictor.predict(test_X.values)
print(result)
# ## エンドポイントの削除
#
# 全て終わったら,エンドポイントを削除します.
sklearn.delete_endpoint()
|
supported_frameworks/scikit_learn_iris/scikit_learn_iris.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ZmvoPsz11NKI"
import numpy as np
import pandas as pd
from scipy.stats import entropy
from scipy.io import arff
from google.colab import files
from tensorflow.keras.layers import GaussianNoise
from tensorflow.keras.layers import Dense,Input
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import os
from numpy.random import seed
from sklearn.preprocessing import minmax_scale
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
# deals with noisy data
# + colab={"base_uri": "https://localhost:8080/"} id="V7GrTiI9b7BI" outputId="06adc21e-a0fe-4d03-8d6d-2f965ef92cb6"
from google.colab import drive
drive.mount('/content/drive')
# + id="vjqL1slh1sAB" colab={"base_uri": "https://localhost:8080/", "height": 441} outputId="34a15dc9-5a4e-43ea-bfb2-e05aec6acac8"
test_set=pd.read_csv('/content/UNSW_NB15_testing-set.csv',header=None)
train_set=pd.read_csv('/content/UNSW_NB15_training-set unlabd - UNSW_NB15_training-set.csv',header=None)
train_set.dropna(inplace=True,axis=1)#drop na's
# The CSV file has no column heads, so add them
train_set.columns = [
'dur',
'proto',
'service',
'state',
'spkts',
'dpkts',
'sbytes',
'dbytes',
'rate',
'sttl',
'dttl',
'sload',
'dload',
'sloss',
'dloss',
'sinpkt',
'dinpkt',
'sjit','djit','swin','stcpb','dtcpb','dwin','tcprtt','synack',
'ackdat','smean','dmean','trans_depth','response_body_len','ct_srv_src',
'ct_state_ttl','ct_dst_ltm','ct_src_dport_ltm','ct_dst_sport_ltm','ct_dst_src_ltm','is_ftp_login',
'ct_ftp_cmd','ct_flw_http_mthd','ct_src_ltm','ct_srv_dst','is_sm_ips_ports',
'attack_cat',
'label'
]
test_set.columns = [
'dur',
'proto',
'service',
'state',
'spkts',
'dpkts',
'sbytes',
'dbytes',
'rate',
'sttl',
'dttl',
'sload',
'dload',
'sloss',
'dloss',
'sinpkt',
'dinpkt',
'sjit','djit','swin','stcpb','dtcpb','dwin','tcprtt','synack',
'ackdat','smean','dmean','trans_depth','response_body_len','ct_srv_src',
'ct_state_ttl','ct_dst_ltm','ct_src_dport_ltm','ct_dst_sport_ltm','ct_dst_src_ltm','is_ftp_login',
'ct_ftp_cmd','ct_flw_http_mthd','ct_src_ltm','ct_srv_dst','is_sm_ips_ports',
'attack_cat',
'label'
]
train_set
# + id="YRSyggsp6lkJ" colab={"base_uri": "https://localhost:8080/"} outputId="c7df8a83-bf00-47ee-b797-b32c42db9d92"
print('Train data shape', train_set.shape)
print('Test data shape', test_set.shape)
# + id="wKx-bZb3xaDu"
#state
map = {'ACC':1,'CLO':2,'CON':3,'FIN':4,'INT':5,'REQ':6,'RST':7,'ECO':8,'no':9,'PAR':10,'URN':11}
#service
map2 ={'http':1,'ftp':2,'ftp-data':3,'smtp':4,'pop3':5,'dns':6,'snmp':7,'ssl':8,'dhcp':9,'irc':10,'radius':11,'ssh':12}
#protocol
map3 = {'udp': 1, 'arp': 2, 'tcp': 3, 'igmp': 4, 'ospf': 5, 'sctp': 6, 'gre': 7, 'ggp': 8, 'ip': 9, 'ipnip': 10, 'st2': 11, 'argus': 12, 'chaos': 13, 'egp': 14, 'emcon': 15, 'nvp': 16, 'pup': 17, 'xnet': 18, 'mux': 19, 'dcn': 20, 'hmp': 21, 'prm': 22, 'trunk-1': 23, 'trunk-2': 24, 'xns-idp': 25, 'leaf-1': 26, 'leaf-2': 27, 'irtp': 28, 'rdp': 29, 'netblt': 30, 'mfe-nsp': 31, 'merit-inp': 32, '3pc': 33, 'idpr': 34, 'ddp': 35, 'idpr-cmtp': 36, 'tp++': 37, 'ipv6': 38, 'sdrp': 39, 'ipv6-frag': 40, 'ipv6-route': 41, 'idrp': 42, 'mhrp': 43, 'i-nlsp': 44, 'rvd': 45, 'mobile': 46, 'narp': 47, 'skip': 48, 'tlsp': 49, 'ipv6-no': 50, 'any': 51, 'ipv6-opts': 52, 'cftp': 53, 'sat-expak': 54, 'ippc': 55, 'kryptolan': 56, 'sat-mon': 57, 'cpnx': 58, 'wsn': 59, 'pvp': 60, 'br-sat-mon': 61, 'sun-nd': 62, 'wb-mon': 63, 'vmtp': 64, 'ttp': 65, 'vines': 66, 'nsfnet-igp': 67, 'dgp': 68, 'eigrp': 69, 'tcf': 70, 'sprite-rpc': 71, 'larp': 72, 'mtp': 73, 'ax.25': 74, 'ipip': 75, 'aes-sp3-d': 76, 'micp': 77, 'encap': 78, 'pri-enc': 79, 'gmtp': 80, 'ifmp': 81, 'pnni': 82, 'qnx': 83, 'scps': 84, 'cbt': 85, 'bbn-rcc': 86, 'igp': 87, 'bna': 88, 'swipe': 89, 'visa': 90, 'ipcv': 91, 'cphb': 92, 'iso-tp4': 93, 'wb-expak': 94, 'sep': 95, 'secure-vmtp': 96, 'xtp': 97, 'il': 98, 'rsvp': 99, 'unas': 100, 'fc': 101, 'iso-ip': 102, 'etherip': 103, 'pim': 104, 'aris': 105, 'a/n': 106, 'ipcomp': 107, 'snp': 108, 'compaq-peer': 109, 'ipx-n-ip': 110, 'pgm': 111, 'vrrp': 112, 'l2tp': 113, 'zero': 114, 'ddx': 115, 'iatp': 116, 'stp': 117, 'srp': 118, 'uti': 119, 'sm': 120, 'smp': 121, 'isis': 122, 'ptp': 123, 'fire': 124, 'crtp': 125, 'crudp': 126, 'sccopmce': 127, 'iplt': 128, 'pipe': 129, 'sps': 130,'ib':131,'icmp':132,'rtp':133}
#attack_cat
map4={'Normal': 1, 'Reconnaissance': 2, 'Backdoor': 3, 'DoS': 4, 'Exploits': 5, 'Analysis': 6, 'Fuzzers': 7, 'Worms': 8, 'Shellcode': 9, 'Generic': 10}
df=pd.DataFrame(train_set)
df=df.replace({'-':np.nan})
df=df.replace(map3)
df=df.replace(map)
df=df.replace(map2)
df=df.replace(map4)
train_set=df
train_set.dropna(inplace=True,axis=0)
# + id="oq8FpW4e0dXI"
df=pd.DataFrame(test_set)
df=df.replace(map3)
df=df.replace(map)
df=df.replace(map2)
df=df.replace(map4)
df=df.replace({'-':np.nan})
test_set=df
test_set.dropna(inplace=True,axis=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 441} id="9SAu2Sxb0Htj" outputId="61aae62d-aae9-48a7-adc0-937ba6db70a9"
test_set
# + colab={"base_uri": "https://localhost:8080/", "height": 441} id="u46rLnB-39HU" outputId="aee24a59-9b63-41d3-942b-ae1b6009d176"
train_set
# + id="8ZOe6nlc1DXR"
target = train_set['label']
test_target = test_set['label']
train_set.drop(['label'], axis=1, inplace=True)
test_set.drop(['label'], axis=1, inplace=True)
# + id="qcR23KrvNbY0" colab={"base_uri": "https://localhost:8080/", "height": 441} outputId="9bdb70f5-1808-4355-cc78-4e25fcd2e9c9"
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(train_set)
train_set = scaler.transform(train_set)
test_set = scaler.transform(test_set)
pd.DataFrame(train_set)
# + [markdown] id="1XoK4j2_7SYO"
# Stack Denoising AutoEncoder
# + id="yb2W8V1z6Pgy" colab={"base_uri": "https://localhost:8080/"} outputId="6804293a-fe1d-46ce-8b0e-8ef574ce1671"
#adding noise to the dataset
clean_data = train_set
mu, sigma = 0, 0.1
# creating a noise with the same dimension as the dataset (2,2)
noise = np.random.normal(mu, sigma, [35179, 43])
print(noise[0:5])
# + id="sb3zT_T-65Ey" colab={"base_uri": "https://localhost:8080/"} outputId="ce25b6d8-03f8-4c36-f182-144fc9e87a57"
noisy_data = clean_data + noise
print(noisy_data[0:5])
print('train data shape', noisy_data.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 441} id="gdbu__d-o_jV" outputId="c24cb18f-f710-4a29-8d4d-0cf327e06b64"
pd.DataFrame(noisy_data)
# + [markdown] id="OTdz5Pvc8VAy"
# Denoising AutoEncoder 1
# + id="WoDNluxOOSoZ"
input_dim = noisy_data.shape[1]
feature_dim = [35, 30, 25, 20]
inputs = Input(shape=(input_dim,))
encoded = inputs
encoded = Dense(feature_dim[0], kernel_initializer="uniform")(encoded)
encoded = Dense(feature_dim[1], kernel_initializer="uniform")(encoded)
encoded = Dense(feature_dim[2], kernel_initializer="uniform")(encoded)
encoded = Dense(feature_dim[3], kernel_initializer="uniform")(encoded)
decoded = encoded
decoded = Dense(feature_dim[2], kernel_initializer="uniform")(decoded)
decoded = Dense(feature_dim[1], kernel_initializer="uniform")(decoded)
decoded = Dense(feature_dim[0], kernel_initializer="uniform")(decoded)
decoded = Dense(input_dim, kernel_initializer="uniform")(decoded)
autoencoder = Model(inputs, decoded)
autoencoder.compile(optimizer='adadelta', loss='mse')
# + colab={"base_uri": "https://localhost:8080/"} id="PYA1P3P6tlKX" outputId="63cd8e62-5b89-4349-9f23-6564c302d1bd"
autoencoder.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="-ghMPz70X8QG" outputId="9a553ed5-b480-487c-c3a3-549d7275f9a1"
history = autoencoder.fit(noisy_data,train_set,epochs=50 ,shuffle = False).history
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="2MaB3K3Yo4VM" outputId="80d78c4b-5a74-4d96-9841-3b9b47ca4f23"
plt.plot(history['loss'], linewidth=2, label='Train')
plt.legend(loc='upper right')
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
#plt.ylim(ymin=0.70,ymax=1)
plt.show()
# + id="hOySbL_pPmuu"
from keras.models import Sequential
featuremodel = Sequential()
featuremodel.add(Dense(feature_dim[0], input_shape=(input_dim,), weights=autoencoder.layers[1].get_weights()))
featuremodel.add(Dense(feature_dim[1], weights=autoencoder.layers[2].get_weights()))
featuremodel.add(Dense(feature_dim[2], weights=autoencoder.layers[3].get_weights()))
featuremodel.add(Dense(feature_dim[3], weights=autoencoder.layers[4].get_weights()))
featuremodel.compile(optimizer='adadelta', loss='mse')
# + [markdown] id="fbGA-u-A-giN"
# DAE 2
# + colab={"base_uri": "https://localhost:8080/"} id="6wW2ONRBbwjb" outputId="5a0b36ea-2424-473c-d308-0c9610637b3e"
clean_data1 = featuremodel.predict(train_set)
mu, sigma = 0, 0.1
noise1 = np.random.normal(mu, sigma, [35179, 20])
print(noise[0:5])
# + colab={"base_uri": "https://localhost:8080/"} id="XAnIuoglbwND" outputId="44a204a1-5fd2-42cf-ee01-2437d789735f"
noisy_data1 = clean_data1 + noise1
print(noisy_data1[0:5])
print('train data shape', noisy_data1.shape)
# + id="-_NOqzIycP4q"
input_dim1 = noisy_data1.shape[1]
feature_dim1 = [18, 14, 10, 6]
inputs1 = Input(shape=(input_dim1,))
encoded1 = inputs1
encoded1 = Dense(feature_dim1[0], kernel_initializer="uniform")(encoded1)
encoded1 = Dense(feature_dim1[1], kernel_initializer="uniform")(encoded1)
encoded1 = Dense(feature_dim1[2], kernel_initializer="uniform")(encoded1)
encoded1 = Dense(feature_dim1[3], kernel_initializer="uniform")(encoded1)
decoded1 = encoded1
decoded1 = Dense(feature_dim1[2], kernel_initializer="uniform")(decoded1)
decoded1 = Dense(feature_dim1[1], kernel_initializer="uniform")(decoded1)
decoded1 = Dense(feature_dim1[0], kernel_initializer="uniform")(decoded1)
decoded1 = Dense(input_dim1, kernel_initializer="uniform")(decoded1)
autoencoder1 = Model(inputs1, decoded1)
autoencoder1.compile(optimizer='adadelta', loss='mse')
# + colab={"base_uri": "https://localhost:8080/"} id="zsfTadcWt5Si" outputId="48b5edb9-5e26-4d86-8677-b8ffffeb9737"
autoencoder1.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="p_UiLSpVcudA" outputId="68fc55d5-7daf-48e4-a763-253639d20b6a"
history1 = autoencoder1.fit(noisy_data1,featuremodel.predict(train_set),epochs=50, shuffle = False).history
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="1Ql7SAE_pRCC" outputId="fe7a66ae-4e87-4f07-8eb2-e4439b203328"
plt.plot(history1['loss'], linewidth=2, label='Train')
plt.legend(loc='upper right')
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
#plt.ylim(ymin=0.70,ymax=1)
plt.show()
# + id="B17vrKRSdBXV"
featuremode2 = Sequential()
featuremode2.add(Dense(feature_dim1[0], input_shape=(input_dim1,), weights=autoencoder1.layers[1].get_weights()))
featuremode2.add(Dense(feature_dim1[1], weights=autoencoder1.layers[2].get_weights()))
featuremode2.add(Dense(feature_dim1[2], weights=autoencoder1.layers[3].get_weights()))
featuremode2.add(Dense(feature_dim1[3], weights=autoencoder1.layers[4].get_weights()))
featuremode2.compile(optimizer='adadelta', loss='mse')
# + colab={"base_uri": "https://localhost:8080/"} id="ByKssmAuSMit" outputId="afbdde75-46de-4a2a-d895-e2c131d1aee0"
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
clf = SGDClassifier(loss="hinge", penalty="l2")
clf.fit(featuremode2.predict(featuremodel.predict(train_set)), target)
y_pred = clf.predict(featuremode2.predict(featuremodel.predict(test_set)))
print('Accuracy: {:.2f}'.format(accuracy_score(test_target, y_pred)))
# + id="bzI7UZcNU19u" colab={"base_uri": "https://localhost:8080/", "height": 392} outputId="af3f074c-b3bc-40df-e59f-af9e883bb08f"
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from sklearn.metrics import mean_squared_error
plot1 = plt.figure(figsize=(12,6))
plt.plot(y_pred, label='Predicted')
plt.plot(test_target, label='Actual')
plt.legend(prop={'size': 16})
plt.show()
print('Mean Squared Error :',mean_squared_error(test_target, y_pred))
|
Network Intrusion Detection using Deep Learning/NetworkIntrusionDetection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 02: Numbers and Strings Exercises
#
# These exercises are to help you become more familar working with numbers and strings in Python.
#
# ## Ex. 1: Guessing Game
#
# 1. Generate a random number between 1 and 10.
# 2. Ask the user to guess the number and tell them whether they guessed too high/low or guessed the number.
# 3. Keep the game going until the user guesses the number or types `exit` (hint: `while True:`
# 4. Keep track of how many guesses the user takes and when the game ends, print this to the console.
# +
# Your solution goes here
# -
# ## Ex. 2: String List
#
# 1. Ask the user for a string (this can be anything) and print whether the string is a palindrome or not.
# +
# Your solution goes here
# -
# ## Ex. 3: Rock, Paper, Scissors
#
# 1. Make a two-player rock-paper-scissors game.
#
# Hint: ask for each player to input a play, compare, print a message of congratulations to the winner and ask if they want to start a new game. Remember: rock > scissors > paper > rock.
# +
# Your solution goes here
# -
# ## Ex. 4: Fibonacci
#
# 1. Write a program that asks the user how many Fibonacci numbers to generate and then generate that many.
# 2. Try and use functions to complete this task.
#
# Hint: the Fibonacci sequence is a sequence of numbers where the next number is the sum of the previous two. It begins at 1, then 1 again, then 1+1=2 so the next number is 2, 2+1=3 so then we move to 3, then 5, etc. so we have 1, 1, 2, 3, 5, 8, 13, ...
# +
# Your solution goes here
# -
# ## Ex. 5: Reverse word
#
# 1. Write a program (with functions) that gets a long string containing multiple words from the user.
# 2. Print the same string back to the user with the _words_ in the _reverse order._ For instance, "My name is Ethan" becomes"Ethan is name My".
# +
# Your solution goes here
# -
# ## Ex. 6: Password generator
#
# 1. Write a program that generates passwords. Be creative - use a mix of lowercase and uppercase letters, numbers and symbols.
# 2. Make sure passwords are random and a new one is generated each time you run the program.
# +
# Your solution goes here
# -
# ## Ex. 7: Check for Primality
#
# Every good prime number generator starts with code that can check whether a given number is prime or not (so it knows which numbers to output and which to ignore).
#
# 1. Ask the user for a number and determine whether it is prime or not.
# 2. To do this, you might write code that asks the user for a number and then prints out a list of all the numbers that divide evenly into that number (e.g. 18/9 gives a whole number, so 9 is a divisor of 18 and it would be printed).
# 3. Recall that a prime number's only divisors are 1 and itself - you can then perform a logical test to see whether this is true of a given number, thereby seeing if it is prime or not.
# +
# Your solution goes here
# -
|
02 Numbers and Strings/02_Exercises.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dictionaries
import unittest
# Create a function, `make_planet_gravities` that:
# - takes no arguments
# - returns a dictionary with the following keys
# ```
# sun, jupyter, saturn, uranus, neptune, earth, venus, mars, mercury, pluto
# ```
# - ...and the corresponding values (gravities in m/s^2):
# ```
# 274, 24.79, 10.445, 8.69, 11.15, 9.81, 8.872, 3.7, 3.7, 0.61
# ```
#
# - Call the function and store the result in a variable named `planet_gravities`.
# - Print the result to the terminal.with the following keys:
# +
def make_planet_gravities():
return {
'sun' : 274,
'jupyter' : 24.79,
'saturn' : 10.445,
'uranus' : 8.69,
'neptune' : 11.15,
'earth' : 9.81,
'venus' : 8.872,
'mars' : 3.7,
'mercury' : 3.7,
'pluto' : 0.61
}
planet_gravities = make_planet_gravities()
print(planet_gravities)
# -
# Create a function, `get_keys` that:
# - takes a dictionary as an argument
# - returns all the keys of the dictionary as a list
#
# Input `planet_gravities` into this function and print the resultant list to the terminal.
# +
def get_keys(d):
keys = []
for key in d.keys():
keys.append(key)
return keys
print(get_keys(planet_gravities))
# -
# Create a function, `get_values` that:
# - takes a dictionary as an argument
# - returns all the values of the dictionary as a list
#
# Input `planet_gravities` into this function and print the resultant list to the terminal.
# +
def get_values(d):
vals = []
for value in d.values():
vals.append(value)
return vals
print(get_values(planet_gravities))
# -
# Create a function, `add_kv` (add key/value) that:
# - takes the following positional arguments:
# - dict : the input dictionary
# - string : the key to be added
# - float : the value to be added which corresponds to k
# - the function adds the key value pair to the input dictionary and returns _a copy_ of the original dictionary with the item added.
#
# Input `planet_gravities` into this function with a key of `charon` and a value of `0.279` and print the resultant dictionary to the terminal.
# +
def add_kv(d, k, v):
dc = d.copy()
dc[k] = v
return dc
print(add_kv(planet_gravities, 'charon', 0.279))
# -
# Create a function, `rm_k` (remove key) that:
# - takes the following positional arguments:
# - dict : the input dictionary
# - string : the key to be removed
# - the function removes the key value pair from the input dictionary and returns _a copy_ of the original dictionary with the item removed.
#
# Input `planet_gravities` into this function with a key of `earth` and print the resultant dictionary to the terminal.
# +
def rm_k(d, k):
dc = d.copy()
removed_value = dc.pop(k)
return removed_value, dc
print(rm_k(planet_gravities, 'earth'))
# -
# Create a function, `scale_grav` that:
# - takes the following positional arguments:
# - dict : the input dictionary to be modified
# - float : a multiplier value
# - the function scales each value (gravity) by the input multiplier value and returns _a copy_ of the original dictionary with the scaled gravities.
#
# Input `planet_gravities` into this function with a multiplier of `1.5` and print the resultant dictionary to the terminal.
# +
def scale_grav(d, multiplier):
dc = d.copy()
for k, v in dc.items():
dc[k] = v * multiplier
return dc
print(scale_grav(planet_gravities, 1.5))
# -
# #### Unit Tests
# Run the cell below to determine if your functions were correctly written.
#
# You will see the following if all your functions were correctly written:
# ```
# ......
# Currently testing 'test_is_word_palindrome'...
# Currently testing 'test_make_unique'...
# Currently testing 'test_max_of_three'...
# Currently testing 'test_multiply_list'...
# Currently testing 'test_reverse_string'...
# Currently testing 'test_sum_of_list'...
#
# ----------------------------------------------------------------------
# Ran 6 tests in 0.008s
#
# OK
# ```
#
#
# If you have an error in your function, you will see an output similar to the following:
# ```
# ..F...
# Currently testing 'test_is_word_palindrome'...
# Currently testing 'test_make_unique'...
# Currently testing 'test_max_of_three'...
# Currently testing 'test_multiply_list'...
# Currently testing 'test_reverse_string'...
# Currently testing 'test_sum_of_list'...
#
# ======================================================================
# FAIL: test_max_of_three (__main__.TestAllFunctions)
# ----------------------------------------------------------------------
# Traceback (most recent call last):
# File "<ipython-input-10-e39454a386b1>", line 17, in test_max_of_three
# self.assertEqual(max_of_three(3, 6, -5), 6)
# AssertionError: -5 != 6
#
# ----------------------------------------------------------------------
# Ran 6 tests in 0.021s
#
# FAILED (failures=1)
# ```
#
# The error we've encountered in the above output occurred at test 3 of 6 (note how the `F` occurrs at the 3rd dot of 6 dots along the top of the output). Our function `test_max_of_three` returned a value of `-5` but the test we used expected a value of `6`. We will need to revisit our function `test_max_of_three` to ensure it returns a value of `6` when passed values of `3`, `6`, and `-5`.
# +
class TestAllFunctions(unittest.TestCase):
def setUp(self):
print("Currently testing \'{0}\'...".format(self._testMethodName))
self.planet_gravities = {
'sun' : 274,
'jupyter' : 24.79,
'saturn' : 10.445,
'uranus' : 8.69,
'neptune' : 11.15,
'earth' : 9.81,
'venus' : 8.872,
'mars' : 3.7,
'mercury' : 3.7,
'pluto' : 0.61
}
def test_make_planet_gravities(self):
self.assertEqual(make_planet_gravities(), self.planet_gravities)
def test_get_keys(self):
self.planet_keys = [
'sun',
'jupyter',
'saturn',
'uranus',
'neptune',
'earth',
'venus',
'mars',
'mercury',
'pluto'
]
self.assertEqual(get_keys(self.planet_gravities), self.planet_keys)
def test_get_values(self):
self.planet_values = [274, 24.79, 10.445, \
8.69, 11.15, 9.81, 8.872, 3.7, 3.7, 0.61]
self.assertEqual(get_values(self.planet_gravities), self.planet_values)
def test_add_kv(self):
self.added_kv = {
'sun': 274,
'jupyter': 24.79,
'saturn': 10.445,
'uranus': 8.69,
'neptune': 11.15,
'earth': 9.81,
'venus': 8.872,
'mars': 3.7,
'mercury': 3.7,
'pluto': 0.61,
'charon': 0.279
}
self.assertEqual(add_kv(self.planet_gravities, 'charon', 0.279), self.added_kv)
def test_rm_k(self):
self.removed_k = (9.81, {
'sun': 274,
'jupyter': 24.79,
'saturn': 10.445,
'uranus': 8.69,
'neptune': 11.15,
'venus': 8.872,
'mars': 3.7,
'mercury': 3.7,
'pluto': 0.61})
self.assertEqual(rm_k(self.planet_gravities, 'earth'), self.removed_k)
def test_scale_grav(self):
self.scaled_grav = {
'sun': 411.0,
'jupyter': 37.185,
'saturn': 15.6675,
'uranus': 13.035,
'neptune': 16.725,
'earth': 14.715,
'venus': 13.308,
'mars': 5.550000000000001,
'mercury': 5.550000000000001,
'pluto': 0.915
}
self.assertEqual(scale_grav(self.planet_gravities, 1.5), self.scaled_grav)
unittest.main(argv=[''], verbosity=1, exit=False);
# -
|
dictionaries/dictionaries_solutions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/vincev16/Linear-Algebra-58019/blob/main/(LA)%20Midterm_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="yClj2NZS3d6X"
# ##Question 1. Create a Python code that displays a square matrix whose length is 5 (10 points)
# + colab={"base_uri": "https://localhost:8080/"} id="-HXiekja3XN5" outputId="482964b8-5126-4532-8cdc-0fe8e6864812"
import numpy as np
Matrix1 = np.full((5,5),2) #using numpy full function with a 5x5 length and width, full of number 2
print(Matrix1)
# + [markdown] id="AZg-PpZf4rB2"
# ##Question 2. Create a Python code that displays a square matrix whose elements below the principal diagonal are zero (10 points)
# + colab={"base_uri": "https://localhost:8080/"} id="gwCzSjnI4qe6" outputId="156cf02f-e5d3-434e-d478-74039421745d"
Matrix2 = np.triu([[1,2,3],[4,5,6],[7,8,9]]) #using the numpy function 'triu' meaning upper triangular
print(Matrix2)
#another way
Matrix2_1 = np.array([[1,2,3],[0,5,6],[0,0,9]]) #manually inputting the zeroes in the matrix
print("\n")
print(Matrix2_1)
# + [markdown] id="csIXIM_j5tL-"
# ##Question 3. Create a Python code that displays a square matrix which is symmetrical (10 points)
# + colab={"base_uri": "https://localhost:8080/"} id="K8G-lrw95u1N" outputId="6e8ad7a1-b9ab-435e-f3dd-c263eaba6a6a"
Matrix3 = np.full((3,3),1) #using the numpy full function with 3x3 length and width making it symmetrical square matrix
print(Matrix3)
# + [markdown] id="FaCVF0u25xd_"
# ##Question 4. What is the inverse of matrix C? Show your solution by python coding. (20 points)
# + colab={"base_uri": "https://localhost:8080/"} id="9Q6OFfqX5yxs" outputId="5514d263-8b20-4296-cb25-bd092e7a3087"
Matrix4C = np.array([[1,2,3],[2,3,3],[3,4,-2]]) #given matrix C
print("Matrix C")
print(Matrix4C)
inverseC = np.linalg.inv(Matrix4C) #the inversed of the given matrix
print("\nInversed Matrix C")
print(inverseC)
# + [markdown] id="-5jfQp6p5zDX"
# ##Question 5. What is the determinant of the given matrix in Question 4? Show your solution by python coding. (20 points)
# + colab={"base_uri": "https://localhost:8080/"} id="v85l-cAx52a8" outputId="9e90a35d-576e-40a7-ae7a-75f3e3a417d6"
print("Matrix C") #displaying matrix C from the previous cell
print(Matrix4C)
print("\nDeterminant of Matrix C") #displaying the determinant of matrix
print("|C| =",round(np.linalg.det(Matrix4C))) #using the numpy function to get the determinant
# + [markdown] id="eZ4nABPg52w6"
# ##Question 6. Find the roots of the linear equations by showing its python codes (30 points)
#
# 5X1 + 4X2 + X3 = 3.4
#
# 10X1 + 9X2 + 4X3 = 8.8
#
# 10X1 + 13X2 + 15X3 = 19.2
# + colab={"base_uri": "https://localhost:8080/"} id="TM1WUBBP55fW" outputId="451fcb87-2c5f-431a-d7de-773e93e4f2c6"
Matrix6 = np.array([[5,4,1],[10,9,4],[10,13,15]]) #the matrix based on the given linear equations
print("Given Matrix A")
print(Matrix6)
inverse6 = np.linalg.inv(Matrix6) #inverting matrix A
print("\nInversed Matrix A")
print(inverse6)
MConstant = np.array([[3.4],[8.8],[19.2]]) #matrix B based on the given linear equations
print("\nMatrix B")
print(MConstant)
print("\n**To get the value or roots of the equation, proceed using Dot Product to Inversed Matrix A and Matrix B**")
dotAB = np.dot(inverse6,MConstant) #dot product of inversed matrix A and matrix B
print("\nDOT product of Inversed Matrix A and Matrix B")
print(dotAB)
print("\n**For checking if correct, proceed using Dot product to the solved dot product and Matrix A. The answer must be the same as Matrix B**")
check1 = np.dot(Matrix6,dotAB) #dot product of matrix A and dot product AB
print(check1)
|
(LA) Midterm_Exam.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# I look at the formal uncertainty of the coordinates for radio sources.
# +
from astropy.table import Table, join
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# Load the statistical information.
# +
# 4-step
ts_stats4 = Table.read("../logs/ts_stat_nju_20210531.log", format="ascii")
# 8-step
ts_stats8 = Table.read("../logs/ts_stat_nju_20210608.log", format="ascii")
# 10-step
ts_stats10 = Table.read("../logs/ts_stat_nju_20210605.log", format="ascii")
# 20-step
ts_stats20 = Table.read("../logs/ts_stat_nju_20210609.log", format="ascii")
# -
bin_array = np.arange(0, 100, 5) * 0.01
# +
fig, (ax0, ax1, ax2, ax3) = plt.subplots(figsize=(4, 8),
nrows=4,
sharex=True,
sharey=True)
ax0.hist(ts_stats4["med_err_ra"],
bins=bin_array,
edgecolor="r",
fill=False,
label="R.A.")
ax0.hist(ts_stats4["med_err_dec"],
bins=bin_array,
edgecolor="b",
fill=False,
label="Dec.")
ax1.hist(ts_stats8["med_err_ra"],
bins=bin_array,
edgecolor="r",
fill=False,
label="R.A.")
ax1.hist(ts_stats8["med_err_dec"],
bins=bin_array,
edgecolor="b",
fill=False,
label="Dec.")
ax2.hist(ts_stats10["med_err_ra"],
bins=bin_array,
edgecolor="r",
fill=False,
label="R.A.")
ax2.hist(ts_stats10["med_err_dec"],
bins=bin_array,
edgecolor="b",
fill=False,
label="Dec.")
ax3.hist(ts_stats20["med_err_ra"],
bins=bin_array,
edgecolor="r",
fill=False,
label="R.A.")
ax3.hist(ts_stats20["med_err_dec"],
bins=bin_array,
edgecolor="b",
fill=False,
label="Dec.")
ax3.set_xlabel("$\\sigma$ (mas)", fontsize=12)
ax0.set_ylabel("Nb Sources", fontsize=12)
ax1.set_ylabel("Nb Sources", fontsize=12)
ax2.set_ylabel("Nb Sources", fontsize=12)
ax3.set_ylabel("Nb Sources", fontsize=12)
loc_x, loc_y = 0.4, 1000
ax0.text(loc_x, loc_y, " 4-step")
ax1.text(loc_x, loc_y, " 8-step")
ax2.text(loc_x, loc_y, "10-step")
ax3.text(loc_x, loc_y, "20-step")
ax0.legend(loc="upper right")
plt.tight_layout()
# -
# Also format the output in a table.
icrf3_def = Table.read("../data/icrf3sx-def-sou.txt", format="ascii")
def simple_sigma_stats(ts_stats, icrf3_def):
ts_stats_def = join(icrf3_def, ts_stats, keys="iers_name")
# For all sources
median_sigmara_all = np.median(ts_stats["med_err_ra"])
median_sigmadec_all = np.median(ts_stats["med_err_dec"])
# For defining sources only
median_sigmara_def = np.median(ts_stats_def["med_err_ra"])
median_sigmadec_def = np.median(ts_stats_def["med_err_dec"])
# Format the output
line_format = "{:8.3f} {:8.3f} "
line_all = line_format.format(
median_sigmara_all, median_sigmadec_all)
line_def = line_format.format(
median_sigmara_def, median_sigmadec_def)
line = line_all + line_def
return line
line4 = simple_sigma_stats(ts_stats4, icrf3_def)
line8 = simple_sigma_stats(ts_stats8, icrf3_def)
line10 = simple_sigma_stats(ts_stats10, icrf3_def)
line20 = simple_sigma_stats(ts_stats20, icrf3_def)
print("=======================================================")
print(" All ICRF3 defining")
print(" ----------------- ------------------")
print("Solution RA DEC RA DEC ")
print("R.A.")
print(" 4-step sol {:s}".format(line4))
print(" 8-step sol {:s}".format(line8))
print("10-step sol {:s}".format(line10))
print("20-step sol {:s}".format(line20))
print("-------------------------------------------------------")
print("Unit: mas")
print("The values given are median.")
|
progs/error-distribution-of-coordinate-time-series.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Lab 10
# 1. item 1
# 2. item 2
# 3. item 3
print("hello world")
# [jmu](https://www.jmu.edu/index.shtml)
a = 3
print(a)
a = 5
print(a)
# %matplotlib inline
# +
import numpy as np
import matplotlib.pyplot as plt
X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C)
plt.plot(X, S)
plt.show()
# -
|
Lab 10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="2VRipkaPZSvM"
# **Setup PySpark**
# + id="8JdYdFWfZTiS" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620225245141, "user_tz": 240, "elapsed": 35291, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="db3f3199-1ff5-4d25-c9c5-e02f873ca4aa"
import os
#Install JAVA JDK
# !apt-get update
# !apt-get install openjdk-8-jdk-headless -qq > /dev/null
#Install PySpark
sparkfile = 'spark-3.1.1-bin-hadoop2.7.tgz'
sparkfile_unzip = 'spark-3.1.1-bin-hadoop2.7'
if not os.path.isfile(sparkfile):
# !wget -q http://downloads.apache.org/spark/spark-3.1.1/$sparkfile
if not os.path.isdir(sparkfile_unzip):
# !tar xf $sparkfile
#Setup resources path
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-3.1.1-bin-hadoop2.7"
#Find spark install and locate
# !pip install -q findspark
import findspark
findspark.init()
# + [markdown] id="FnGA4reTZFvL"
# **Cloning the data from github repo.**
# + colab={"base_uri": "https://localhost:8080/"} id="dg4mvXCvY0YO" executionInfo={"status": "ok", "timestamp": 1620225257956, "user_tz": 240, "elapsed": 48091, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="b150df0f-5372-4f1e-c08e-7359486ebe0c"
import os
git_folder = 'NYC-Crime'
if not os.path.isdir(git_folder):
# !git clone https://github.com/duketran1996/NYC-Crime.git
else:
# %cd NYC-Crime/
# !git pull
# %cd ..
# + [markdown] id="XMcY67hAZKbN"
# **Create spark session**
# + id="QHSqeQRibhQQ" executionInfo={"status": "ok", "timestamp": 1620225263779, "user_tz": 240, "elapsed": 53910, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext
from pyspark.sql.functions import *
sc = SparkSession.builder\
.master("local")\
.appName("Crime Data Analysis")\
.config('spark.ui.port', '4050')\
.getOrCreate()
# + [markdown] id="yJKCGuRsqyrN"
# **Load data set as SQLContext**
# + id="RMUS_Tn_q2_k" executionInfo={"status": "ok", "timestamp": 1620225272562, "user_tz": 240, "elapsed": 62690, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
import glob
files_list = sorted(glob.glob('./NYC-Crime/clean-dataset/*.csv'))
df_name_list = ['CrimeSixteen', 'CrimeSeventeen', 'CrimeEighteen', 'CrimeNineteen', 'CrimeTwenty']
sqlContext = SQLContext(sc)
for i in range(0, len(files_list)):
df = sqlContext.read.csv(files_list[i], header=True)
sqlContext.registerDataFrameAsTable(df, df_name_list[i])
# + [markdown] id="TU1K7a18S2Co"
# **Analysis 1: Overall look between 2016 to 2020**
# + [markdown] id="wNMS4DV-FSrH"
# **Crime rate differences between 2016 to 2020 based on month**
# + id="6kYbml0zFVbz" executionInfo={"status": "ok", "timestamp": 1620225273775, "user_tz": 240, "elapsed": 63899, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
crime_years_monthly_list = []
for item in df_name_list:
query = "SELECT date_format(to_date(ARREST_DATE, 'MM/dd/yyyy'), 'yyyy') as YEAR, date_format(to_date(ARREST_DATE, 'MM/dd/yyyy'), 'MMM') as MONTH, COUNT(*) as TOTAL_CRIME FROM " + item + " GROUP BY MONTH, YEAR HAVING MONTH != 'null' ORDER BY date_format(to_date(MONTH, 'MMM'), 'MM')"
crime_years_monthly_list.append(sqlContext.sql(query))
# for item in crime_years_monthly_list:
# item.show()
# + [markdown] id="X3LN5rNxVuqX"
# **Convert to pandas dataframe.**
# + id="1ehaZDiy-za9" executionInfo={"status": "ok", "timestamp": 1620225307196, "user_tz": 240, "elapsed": 97316, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
crime_years_monthly_pandas_list = []
for item in crime_years_monthly_list:
crime_years_monthly_pandas_list.append(item.toPandas())
# + [markdown] id="HJx2TWR4Vz5i"
# **Get month list for sort.**
# + colab={"base_uri": "https://localhost:8080/"} id="bFhHeq9B-sMK" executionInfo={"status": "ok", "timestamp": 1620225307200, "user_tz": 240, "elapsed": 97306, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="114f1d39-93a7-4b10-b610-e6f54abd30ce"
month_list = crime_years_monthly_pandas_list[0]['MONTH'].values
print(month_list)
# + [markdown] id="zMRXMRQhV3Uj"
# **Concat all dataframe vertically.**
# + id="CSNKbOYOS_wV" executionInfo={"status": "ok", "timestamp": 1620225307201, "user_tz": 240, "elapsed": 97304, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
import pandas as pd
crime_years_monthly_pandas_reduce = pd.concat(crime_years_monthly_pandas_list)
# + [markdown] id="VX76HhrOV97_"
# **Visualize crime rate based on month.**
# + colab={"base_uri": "https://localhost:8080/", "height": 731} id="Z54Le5tGUgr-" executionInfo={"status": "ok", "timestamp": 1620225307649, "user_tz": 240, "elapsed": 97739, "user": {"displayName": "Duc Tran", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="b90742bd-3a52-4aca-bbea-7a9d9e6ce482"
import altair as alt
line = alt.Chart(crime_years_monthly_pandas_reduce).mark_line().encode(
x=alt.X('MONTH', title='Month', sort=month_list),
y=alt.Y('TOTAL_CRIME', title='Total',),
color = alt.Color('YEAR', legend=alt.Legend( title='Years')),
)
nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['MONTH'], empty='none')
selectors = alt.Chart(crime_years_monthly_pandas_reduce).mark_point().encode(
x=alt.X('MONTH', title='Month', sort=month_list),
y=alt.Y('TOTAL_CRIME', title='Total',),
opacity=alt.value(0),
).add_selection(
nearest
)
points = line.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5, fontSize=14, fontWeight=400).encode(
text=alt.condition(nearest, 'TOTAL_CRIME', alt.value(' ')),
)
# Draw a rule at the location of the selection
rules = alt.Chart(crime_years_monthly_pandas_reduce).mark_rule(color='gray').encode(
x=alt.X('MONTH', title='Month', sort=month_list),
y=alt.Y('TOTAL_CRIME', title='Total',),
).transform_filter(
nearest
)
# Put the five layers into a chart and bind the data
crime_rate_month_graph = alt.layer(
line, selectors, points, text, rules
).configure_title(
fontSize=20,
font='Courier',
anchor='middle',
color='gray'
).configure_axis(
titleFontSize=14,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).configure_axisY(
titlePadding=14,
).configure_axisX(
titlePadding=14,
).configure_view(
strokeOpacity=0
).configure_legend(
strokeColor='gray',
fillColor='#EEEEEE',
padding=20,
cornerRadius=10,
titleFontSize=14,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).properties(
width = 600,
height = 600,
title='NYC Crime Rate 2016-2020 Monthly'
)
crime_rate_month_graph
# + [markdown] id="U5GeDANvnevw"
# **Crime rate differences between 2016 to 2020 based on datename**
# + id="r4nAFXckgfSj" executionInfo={"status": "ok", "timestamp": 1620225307986, "user_tz": 240, "elapsed": 98074, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
crime_years_datename_list = []
for item in df_name_list:
query = "SELECT date_format(to_date(ARREST_DATE, 'MM/dd/yyyy'), 'yyyy') as YEAR, date_format(to_date(ARREST_DATE, 'MM/dd/yyyy'), 'EEEE') as DATENAME, COUNT(*) as TOTAL_CRIME FROM " + item + " GROUP BY DATENAME, YEAR HAVING DATENAME != 'null'"
crime_years_datename_list.append(sqlContext.sql(query))
#for item in crime_years_datename_list:
#item.show()
# + [markdown] id="LacqkhMBnj3J"
# **Convert to pandas dataframe.**
# + id="GNgyvpx0g7jy" executionInfo={"status": "ok", "timestamp": 1620225329562, "user_tz": 240, "elapsed": 119647, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
crime_years_datename_pandas_list = []
for item in crime_years_datename_list:
crime_years_datename_pandas_list.append(item.toPandas())
# + [markdown] id="B-g7d5OFnpik"
# **Combine all dataframe vertically.**
# + id="4mnk20WQhEOI" executionInfo={"status": "ok", "timestamp": 1620225329569, "user_tz": 240, "elapsed": 119651, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
import pandas as pd
crime_years_datename_pandas_reduce = pd.concat(crime_years_datename_pandas_list)
# + [markdown] id="Eqi7UdifnvvP"
# **Get datename list in sorted order.**
# + id="x7uDoN_SjfOd" executionInfo={"status": "ok", "timestamp": 1620225329570, "user_tz": 240, "elapsed": 119649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
datename_list = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
# + [markdown] id="M9VOVamOny-q"
# **Visualize crime rate based on datename.**
# + colab={"base_uri": "https://localhost:8080/", "height": 772} id="IRppaBQ-hS2E" executionInfo={"status": "ok", "timestamp": 1620225329572, "user_tz": 240, "elapsed": 119635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="4774438e-79da-41bd-9d77-7e3a11e99f9d"
import altair as alt
line = alt.Chart(crime_years_datename_pandas_reduce).mark_line().encode(
x=alt.X('DATENAME', title='Day', sort=datename_list),
y=alt.Y('TOTAL_CRIME', title='Total',),
color = alt.Color('YEAR', legend=alt.Legend( title='Years')),
)
nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['DATENAME'], empty='none')
selectors = alt.Chart(crime_years_datename_pandas_reduce).mark_point().encode(
x=alt.X('DATENAME', title='Day', sort=datename_list),
y=alt.Y('TOTAL_CRIME', title='Total',),
opacity=alt.value(0),
).add_selection(
nearest
)
points = line.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5, fontSize=14, fontWeight=400).encode(
text=alt.condition(nearest, 'TOTAL_CRIME', alt.value(' ')),
)
# Draw a rule at the location of the selection
rules = alt.Chart(crime_years_datename_pandas_reduce).mark_rule(color='gray').encode(
x=alt.X('DATENAME', title='Day', sort=datename_list),
y=alt.Y('TOTAL_CRIME', title='Total',),
).transform_filter(
nearest
)
# Put the five layers into a chart and bind the data
crime_rate_datename_graph = alt.layer(
line, selectors, points, text, rules
).configure_title(
fontSize=20,
font='Courier',
anchor='middle',
color='gray'
).configure_axis(
titleFontSize=14,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).configure_axisY(
titlePadding=14,
).configure_axisX(
titlePadding=14,
).configure_view(
strokeOpacity=0
).configure_legend(
strokeColor='gray',
fillColor='#EEEEEE',
padding=20,
cornerRadius=10,
titleFontSize=14,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).properties(
width = 600,
height = 600,
title='NYC Crime Rate 2016-2020 Datename'
)
crime_rate_datename_graph
# + [markdown] id="WKkEnuZtrJD1"
# **Crime rate difference between 2016-2020 based on borough.**
# + colab={"base_uri": "https://localhost:8080/"} id="HJ4ZR6eRp-it" executionInfo={"status": "ok", "timestamp": 1620225344937, "user_tz": 240, "elapsed": 134987, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="582d9e35-b857-4b1b-84f1-cefb4a30823d"
crime_years_borough_list = []
for item in df_name_list:
query = "SELECT date_format(to_date(ARREST_DATE, 'MM/dd/yyyy'), 'yyyy') as YEAR, ARREST_BORO, COUNT(*) as TOTAL_CRIME FROM " + item + " GROUP BY ARREST_BORO, YEAR HAVING YEAR != 'null' AND ARREST_BORO != 'null' ORDER BY ARREST_BORO"
crime_years_borough_list.append(sqlContext.sql(query))
for item in crime_years_borough_list:
item.show()
# + [markdown] id="31OCGZnKwVtM"
# **Convert to pandas dataframe.**
# + id="oNgylliTwGXO" executionInfo={"status": "ok", "timestamp": 1620225362478, "user_tz": 240, "elapsed": 152525, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
crime_years_borough_pandas_list = []
for item in crime_years_borough_list:
crime_years_borough_pandas_list.append(item.toPandas())
# + [markdown] id="qq3spe1twkwL"
# **Combine all dataframe vertically.**
# + id="FsFyIL54wnwQ" executionInfo={"status": "ok", "timestamp": 1620225362479, "user_tz": 240, "elapsed": 152523, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
import pandas as pd
crime_years_borough_pandas_reduce = pd.concat(crime_years_borough_pandas_list)
# + [markdown] id="5CdKF4Cfwss2"
# **Visualize crime rate based on borough.**
# + colab={"base_uri": "https://localhost:8080/", "height": 779} id="0S9Nl0OCwzpi" executionInfo={"status": "ok", "timestamp": 1620225363111, "user_tz": 240, "elapsed": 153137, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="31fb5f94-cea1-46f6-dd3f-a9d06ec33e59"
import altair as alt
line = alt.Chart(crime_years_borough_pandas_reduce).mark_line().encode(
x=alt.X('ARREST_BORO', title='Borough'),
y=alt.Y('TOTAL_CRIME', title='Total',),
color = alt.Color('YEAR', legend=alt.Legend( title='Years')),
)
nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['ARREST_BORO'], empty='none')
selectors = alt.Chart(crime_years_borough_pandas_reduce).mark_point().encode(
x=alt.X('ARREST_BORO'),
y=alt.Y('TOTAL_CRIME'),
opacity=alt.value(0),
).add_selection(
nearest
)
points = line.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5, fontSize=14, fontWeight=400).encode(
text=alt.condition(nearest, 'TOTAL_CRIME', alt.value(' ')),
)
# Draw a rule at the location of the selection
rules = alt.Chart(crime_years_borough_pandas_reduce).mark_rule(color='gray').encode(
x=alt.X('ARREST_BORO'),
y=alt.Y('TOTAL_CRIME'),
).transform_filter(
nearest
)
# Put the five layers into a chart and bind the data
crime_rate_borough_graph = alt.layer(
line, selectors, points, text, rules
).configure_title(
fontSize=20,
font='Courier',
anchor='middle',
color='gray'
).configure_axis(
titleFontSize=14,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).configure_axisY(
titlePadding=14,
).configure_axisX(
titlePadding=14,
).configure_view(
strokeOpacity=0
).configure_legend(
strokeColor='gray',
fillColor='#EEEEEE',
padding=20,
cornerRadius=10,
titleFontSize=14,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).properties(
width = 600,
height = 600,
title='NYC Crime Rate 2016-2020 Borough'
)
crime_rate_borough_graph
# + [markdown] id="bqTv2633y5gQ"
# **Normalize data with borough population.**
# + colab={"base_uri": "https://localhost:8080/", "height": 824} id="icaRHI9By_Si" executionInfo={"status": "ok", "timestamp": 1620225363112, "user_tz": 240, "elapsed": 153123, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="2364f139-0d0d-4ff4-86b7-ca29ac0ef795"
import pandas as pd
pop_df=pd.read_csv('https://data.cityofnewyork.us/resource/xywu-7bv9.csv')
pop_twenty = pop_df[['borough', '_2020']]
crime_pop_borough = crime_years_borough_pandas_reduce.join(pop_twenty, lsuffix='ARREST_BORO', rsuffix='borough')
crime_pop_borough['norm'] = (crime_pop_borough['TOTAL_CRIME'] / crime_pop_borough['_2020']).round(4) * 1000
crime_pop_borough
# + colab={"base_uri": "https://localhost:8080/", "height": 779} id="1aeU2UHtEp7j" executionInfo={"status": "ok", "timestamp": 1620225363112, "user_tz": 240, "elapsed": 153108, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="79f96619-5767-45e3-ece8-72d94a5bf790"
import altair as alt
line = alt.Chart(crime_pop_borough).mark_line().encode(
x=alt.X('ARREST_BORO', title='Borough'),
y=alt.Y('norm', title='Total',),
color = alt.Color('YEAR', legend=alt.Legend( title='Years')),
)
nearest = alt.selection(type='single', nearest=True, on='mouseover', fields=['ARREST_BORO'], empty='none')
selectors = alt.Chart(crime_pop_borough).mark_point().encode(
x=alt.X('ARREST_BORO'),
y=alt.Y('norm'),
opacity=alt.value(0),
).add_selection(
nearest
)
points = line.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5, fontSize=14, fontWeight=400).encode(
text=alt.condition(nearest, 'norm', alt.value(' ')),
)
# Draw a rule at the location of the selection
rules = alt.Chart(crime_pop_borough).mark_rule(color='gray').encode(
x=alt.X('ARREST_BORO'),
y=alt.Y('norm'),
).transform_filter(
nearest
)
# Put the five layers into a chart and bind the data
crime_rate_borough_norm_graph = alt.layer(
line, selectors, points, text, rules
).configure_title(
fontSize=20,
font='Courier',
anchor='middle',
color='gray'
).configure_axis(
titleFontSize=14,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).configure_axisY(
titlePadding=14,
).configure_axisX(
titlePadding=14,
).configure_view(
strokeOpacity=0
).configure_legend(
strokeColor='gray',
fillColor='#EEEEEE',
padding=20,
cornerRadius=10,
titleFontSize=14,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).properties(
width = 600,
height = 600,
title='NYC Crime Rate 2016-2020 Per 1000 population'
)
crime_rate_borough_norm_graph
# + [markdown] id="pB5RICRHGzKa"
# **Analysis 2: Top list of crime 2016 to 2020**
# + [markdown] id="GIqmppFvIq3i"
# **Top 5 crime each year.**
# + colab={"base_uri": "https://localhost:8080/"} id="LOeLtwK7G4TD" executionInfo={"status": "ok", "timestamp": 1620225376049, "user_tz": 240, "elapsed": 166030, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="ee110bb9-a4d2-4956-c401-577d98e9a4e2"
crime_top_ofns_list = []
for item in df_name_list:
query = "SELECT date_format(to_date(ARREST_DATE, 'MM/dd/yyyy'), 'yyyy') as YEAR, OFNS_DESC, COUNT(OFNS_DESC) as TOTAL_CRIME FROM " + item + " GROUP BY OFNS_DESC, YEAR HAVING YEAR != 'null' AND OFNS_DESC != 'null' ORDER BY TOTAL_CRIME DESC LIMIT 5"
crime_top_ofns_list.append(sqlContext.sql(query))
for item in crime_top_ofns_list:
item.show()
# + [markdown] id="VNOXxshqIvqF"
# **Convert to pandas dataframe.**
# + id="QSRvvZMBIy-U" executionInfo={"status": "ok", "timestamp": 1620225387716, "user_tz": 240, "elapsed": 177694, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
crime_top_ofns_pandas_list = []
for item in crime_top_ofns_list:
crime_top_ofns_pandas_list.append(item.toPandas())
# + [markdown] id="Fqm2mwhmI9ns"
# **Combine all dataframe vertically.**
# + id="Iy6flm_UI9w7" executionInfo={"status": "ok", "timestamp": 1620225387722, "user_tz": 240, "elapsed": 177698, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
import pandas as pd
crime_top_ofns_pandas_reduce = pd.concat(crime_top_ofns_pandas_list)
# + colab={"base_uri": "https://localhost:8080/", "height": 649} id="xu2Ctb8qJSe_" executionInfo={"status": "ok", "timestamp": 1620225387722, "user_tz": 240, "elapsed": 177688, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="611f3699-424e-4d6c-9633-00ad7c60e83c"
import altair as alt
alt.Chart(crime_top_ofns_pandas_reduce).mark_circle().encode(
x=alt.X('OFNS_DESC', title='Offense Description'),
y=alt.Y('YEAR', title='Year'),
size=alt.Size('TOTAL_CRIME', legend=alt.Legend(title='Total count'), scale=alt.Scale(range=[100, 2000])),
color=alt.Color('TOTAL_CRIME', legend=alt.Legend(title='Total count'), scale=alt.Scale(scheme='oranges')),
tooltip='TOTAL_CRIME'
).properties(
width=600,
height=300,
title='NYC Top 5 Crime 2016-2020'
).configure_title(
fontSize=20,
font='Courier',
anchor='middle',
color='gray'
).configure_axis(
titleFontSize=14,
grid=False,
labelFontSize=12,
labelPadding=14,
labelLimit=300,
).configure_axisY(
titlePadding=60,
).configure_axisX(
titlePadding=80,
).configure_view(
strokeOpacity=0
)
# + [markdown] id="vylqaBKqOVBU"
# **Top 5 crime location on robbery and burlary.**
# + colab={"base_uri": "https://localhost:8080/"} id="Jyxoyn_6OoJC" executionInfo={"status": "ok", "timestamp": 1620225399600, "user_tz": 240, "elapsed": 189556, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="5d7cc8b6-31f7-424e-d9de-26a17e33d116"
crime_top_loc_list = []
for item in df_name_list:
query = "SELECT date_format(to_date(ARREST_DATE, 'MM/dd/yyyy'), 'yyyy') as YEAR, Latitude as Lat, Longitude as Long, COUNT(Longitude) as TOTAL FROM " + item + " WHERE OFNS_DESC = 'BURGLARY' OR OFNS_DESC = 'ROBBERY' GROUP BY Lat, Long, YEAR ORDER BY COUNT(Longitude) DESC LIMIT 5"
crime_top_loc_list.append(sqlContext.sql(query))
for item in crime_top_loc_list:
item.show()
# + [markdown] id="UxxRfGRjQCBC"
# **Convert to panda dataframe.**
# + id="VEBZbtk9QE0l" executionInfo={"status": "ok", "timestamp": 1620225408333, "user_tz": 240, "elapsed": 198287, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
crime_top_loc_pandas_list = []
for item in crime_top_loc_list:
crime_top_loc_pandas_list.append(item.toPandas())
# + [markdown] id="nmGr_8jaQKLX"
# **Combine all dataframe vertically.**
# + id="xVtR9tFBQOMC" executionInfo={"status": "ok", "timestamp": 1620225408334, "user_tz": 240, "elapsed": 198285, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}}
import pandas as pd
crime_top_loc_pandas_reduce = pd.concat(crime_top_loc_pandas_list)
# + [markdown] id="nrrJsnrUQy2z"
# **Visualize area.**
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="r3vxkCmJQ4CU" executionInfo={"status": "ok", "timestamp": 1620225409999, "user_tz": 240, "elapsed": 199941, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgMz7Fhfk13xHl7DFWMCqhtQd8oNS99ygVlL3EL=s64", "userId": "13435084732873011643"}} outputId="ae6453f7-3a26-4fe6-8db2-e2a0b5353ffd"
import folium
from folium.plugins import MarkerCluster
from folium.features import GeoJsonTooltip
nyc_rob_area = folium.Map(location=[40.75,-73.98], tiles='cartodbpositron', zoom_start=12)
location_list = crime_top_loc_pandas_reduce[['Lat', 'Long']].values.astype(float)
total_list = crime_top_loc_pandas_reduce['TOTAL'].values.astype(str)
year_list = crime_top_loc_pandas_reduce['YEAR'].values.astype(str)
folium.Choropleth(
geo_data='https://raw.githubusercontent.com/fedhere/PUI2015_EC/master/mam1612_EC/nyc-zip-code-tabulation-areas-polygons.geojson',
fill_color="#658cbb",
fill_opacity=0.5,
line_opacity=0.5,
).geojson.add_child(GeoJsonTooltip(fields=['PO_NAME', 'borough'], sticky=False)).add_to(nyc_rob_area)
cluster = MarkerCluster(locations=crime_top_loc_pandas_reduce[['Lat', 'Long']].values.astype(float),icons=crime_top_loc_pandas_reduce['YEAR'],overlay=True,control=True,popups="Year: " + crime_top_loc_pandas_reduce['YEAR'] + '\n' + 'Total: ' + crime_top_loc_pandas_reduce['TOTAL'].values.astype(str))
nyc_rob_area.add_child(cluster)
|
google-colab-analysis/nyc-crime-data-analysis-five-years.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="sV8g4V38oRq7"
# # **K-Mean Clustering**
# + [markdown] id="8Q5RYGr6obm8"
#
# **Overview**
#
# Online retail is a transnational data set which contains all the transactions occurring between 01/12/2010 and 09/12/2011 for a UK-based and registered non-store online retail.The company mainly sells unique all-occasion gifts. Many customers of the company are wholesalers.
#
# We will be using the online reatil trasnational dataset to build a RFM clustering and choose the best set of customers.
# + id="tRptr92goyyN" executionInfo={"status": "ok", "timestamp": 1640657320288, "user_tz": -420, "elapsed": 1247, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
#Importing Libraries
import pandas as pd
# For Visualisation
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# To Scale our data
from sklearn.preprocessing import scale
# To perform KMeans clustering
from sklearn.cluster import KMeans
# To perform Hierarchical clustering
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import dendrogram
from scipy.cluster.hierarchy import cut_tree
# + [markdown] id="ZyTt5rxdo52F"
# **Let's look at KMeans package help to better understand the KMeans implementation in Python using SKLearn**
# + id="04J--PZRo1yK" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640657345563, "user_tz": -420, "elapsed": 398, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="1db10c45-416a-497c-ce4f-7f0240f97776"
help(KMeans)
# + [markdown] id="6U420-EppJzm"
# ### **Reading the Data Set**
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 74} id="TtxpdeCnzuE7" executionInfo={"status": "ok", "timestamp": 1640658192928, "user_tz": -420, "elapsed": 660805, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="317d1e14-26dd-401b-8eea-174e6da9780d"
from google.colab import files
uploaded = files.upload()
# + id="tltY0yCzpBZR" executionInfo={"status": "ok", "timestamp": 1640658193943, "user_tz": -420, "elapsed": 1024, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
#reading Dataset
retail = pd.read_csv("Online+Retail.csv", sep = ',',encoding = "ISO-8859-1", header= 0)
# parse date
retail['InvoiceDate'] = pd.to_datetime(retail['InvoiceDate'], format = "%d-%m-%Y %H:%M")
# + [markdown] id="o7QRcFrSpXEH"
# ### **Data quality check and cleaning**
# + id="kHczDQSgpTRi" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640658415542, "user_tz": -420, "elapsed": 380, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="fb9c588b-83ba-4abb-c4ff-8cf90f5c0776"
# Let's look top 5 rows
retail.head()
# + id="HmOtsyHwpfRt" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640658417824, "user_tz": -420, "elapsed": 439, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="1a98e6c3-8a71-40ed-e50d-fb13c3d55d28"
#Sanity Check
retail.shape
retail.describe()
retail.info()
# + id="KjeAAzBTpj8N" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640658426040, "user_tz": -420, "elapsed": 927, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="a7163f1f-a150-4cc9-b8fd-348172cbcdfd"
#Na Handling
retail.isnull().values.any()
retail.isnull().values.sum()
retail.isnull().sum()*100/retail.shape[0]
# + id="e4JhAL0Fprvu" executionInfo={"status": "ok", "timestamp": 1640658430131, "user_tz": -420, "elapsed": 570, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
#dropping the na cells
order_wise = retail.dropna()
# + id="skFQzNnDpxHi" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640658452944, "user_tz": -420, "elapsed": 366, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="88507375-1784-41c8-ea46-801a64de0a0c"
#Sanity check
order_wise.shape
order_wise.isnull().sum()
# + [markdown] id="bTUUeTLjp7P3"
# ### **Extracting R(Recency), F(Frequency), M(Monetary) columns form the data that we imported in.**
# + id="FETDTr9kp2_K" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640658467472, "user_tz": -420, "elapsed": 365, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="628ad861-bca9-44f8-912f-28d559883b17"
#RFM implementation
# Extracting amount by multiplying quantity and unit price and saving the data into amount variable.
amount = pd.DataFrame(order_wise.Quantity * order_wise.UnitPrice, columns = ["Amount"])
amount.head()
# + [markdown] id="lvWq2mS5qLXQ"
# ### **Monetary Value**
# + id="ucUXorP7qY6I" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640658494065, "user_tz": -420, "elapsed": 333, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="b18bf5ed-4c14-4447-b144-89cc23283d2a"
#merging amount in order_wise
order_wise = pd.concat(objs = [order_wise, amount], axis = 1, ignore_index = False)
#Monetary Function
# Finding total amount spent per customer
monetary = order_wise.groupby("CustomerID").Amount.sum()
monetary = monetary.reset_index()
monetary.head()
# + [markdown] id="qiNnERr1q2pA"
# **If in the above result you get a column with name level_1, uncomment the below code and run it, else ignore it and keeping moving.**
# + id="fm1h6eSHqvge" executionInfo={"status": "ok", "timestamp": 1640658506770, "user_tz": -420, "elapsed": 534, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
#monetary.drop(['level_1'], axis = 1, inplace = True)
#monetary.head()
# + [markdown] id="K5qg_PYMrCOh"
# ### **Frequency Value**
# + id="xZSbICIXq-00" executionInfo={"status": "ok", "timestamp": 1640658549755, "user_tz": -420, "elapsed": 348, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
#Frequency function
frequency = order_wise[['CustomerID', 'InvoiceNo']]
# + id="IvmrSv8vrF0c" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640658552478, "user_tz": -420, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="8b50f5fe-34cd-4d54-ab5b-c290aa6b9f10"
# Getting the count of orders made by each customer based on customer ID.
k = frequency.groupby("CustomerID").InvoiceNo.count()
k = pd.DataFrame(k)
k = k.reset_index()
k.columns = ["CustomerID", "Frequency"]
k.head()
# + [markdown] id="Wvn3bkDirWv_"
# **Merging Amount and Frequency columns**
# + id="aHHAFUiarTMO" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640658557732, "user_tz": -420, "elapsed": 374, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="1e2afe80-c5fe-4d9b-ea65-f6b738b535fb"
#creating master dataset
master = monetary.merge(k, on = "CustomerID", how = "inner")
master.head()
# + [markdown] id="Wg0DE471rh8U"
# ### **Recency Value**
# + id="_z06NjibremZ" executionInfo={"status": "ok", "timestamp": 1640658616835, "user_tz": -420, "elapsed": 910, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
recency = order_wise[['CustomerID','InvoiceDate']]
maximum = max(recency.InvoiceDate)
# + id="DQFoEVoDr4LW" colab={"base_uri": "https://localhost:8080/", "height": 310} executionInfo={"status": "ok", "timestamp": 1640659208566, "user_tz": -420, "elapsed": 2746, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="62fcad30-ef6d-4680-87eb-a3040f19228e"
#Generating recency function
# Filtering data for customerid and invoice_date
recency = order_wise[['CustomerID','InvoiceDate']]
# Finding max data
maximum = max(recency.InvoiceDate)
# Adding one more day to the max data, so that the max date will have 1 as the difference and not zero.
maximum = maximum + pd.DateOffset(days=1)
recency['Diff'] = maximum - recency.InvoiceDate
recency.head()
# + id="gHqD1GRbr9gl" executionInfo={"status": "ok", "timestamp": 1640659219452, "user_tz": -420, "elapsed": 422, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
# recency by customerid
a = recency.groupby('CustomerID')
# + id="eeynDT-JsE6d" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640659224863, "user_tz": -420, "elapsed": 335, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="96d519b3-3e27-4ef8-a976-488e2ea005ef"
a.Diff.min()
# + id="vMwEHdBjsJwt" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640659340007, "user_tz": -420, "elapsed": 354, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="6d5f32d8-0407-47ca-8010-3efd0544fe58"
#Dataframe merging by recency
df = pd.DataFrame(recency.groupby('CustomerID').Diff.min())
df = df.reset_index()
df.columns = ["CustomerID", "Recency"]
df.head()
# + [markdown] id="hqT7KEZ-sRgT"
# ### **RFM combined DataFrame**
# + id="-raM07s2sOXg" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640659353878, "user_tz": -420, "elapsed": 356, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="205c1eb6-54cc-435e-b53a-41db8a5bb4ce"
#Combining all recency, frequency and monetary parameters
RFM = k.merge(monetary, on = "CustomerID")
RFM = RFM.merge(df, on = "CustomerID")
RFM.head()
# + [markdown] id="kpgfZX69sc4P"
# ### **Outlier Treatment**
# + id="kvqknnLIsZDD" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1640659358909, "user_tz": -420, "elapsed": 370, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="1a7969c7-a8d1-4ba0-d087-3e505bfb13d9"
# outlier treatment for Amount
plt.boxplot(RFM.Amount)
Q1 = RFM.Amount.quantile(0.25)
Q3 = RFM.Amount.quantile(0.75)
IQR = Q3 - Q1
RFM = RFM[(RFM.Amount >= Q1 - 1.5*IQR) & (RFM.Amount <= Q3 + 1.5*IQR)]
# + id="EsS7VKnispY7" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1640659363241, "user_tz": -420, "elapsed": 371, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="e82a9107-6a81-4621-c0ab-0ad200a84285"
# outlier treatment for Frequency
plt.boxplot(RFM.Frequency)
Q1 = RFM.Frequency.quantile(0.25)
Q3 = RFM.Frequency.quantile(0.75)
IQR = Q3 - Q1
RFM = RFM[(RFM.Frequency >= Q1 - 1.5*IQR) & (RFM.Frequency <= Q3 + 1.5*IQR)]
# + id="TZ1dr6YesyRM" colab={"base_uri": "https://localhost:8080/", "height": 276} executionInfo={"status": "ok", "timestamp": 1640659366271, "user_tz": -420, "elapsed": 347, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="394a2a2f-2121-451d-bd71-98fbcffef70e"
# outlier treatment for Recency
plt.boxplot(RFM.Recency)
Q1 = RFM.Recency.quantile(0.25)
Q3 = RFM.Recency.quantile(0.75)
IQR = Q3 - Q1
RFM = RFM[(RFM.Recency >= Q1 - 1.5*IQR) & (RFM.Recency <= Q3 + 1.5*IQR)]
# + id="JOJ_4e7vs-LG" colab={"base_uri": "https://localhost:8080/", "height": 676} executionInfo={"status": "ok", "timestamp": 1640659369358, "user_tz": -420, "elapsed": 367, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="253813f9-7e48-40d4-d83f-ad9ac82eefd8"
RFM.head(20)
# + [markdown] id="TGvH0RL-tMyo"
# ### **Scaling the RFM data**
# + id="oZ3mOUJetIrb" executionInfo={"status": "ok", "timestamp": 1640659383428, "user_tz": -420, "elapsed": 330, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
# standardise all parameters
RFM_norm1 = RFM.drop("CustomerID", axis=1)
RFM_norm1.Recency = RFM_norm1.Recency.dt.days
from sklearn.preprocessing import StandardScaler
standard_scaler = StandardScaler()
RFM_norm1 = standard_scaler.fit_transform(RFM_norm1)
# + id="jGSMKtLytYBE" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640659386913, "user_tz": -420, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="822a868b-3813-436f-a452-338ad5a1a1fa"
RFM_norm1 = pd.DataFrame(RFM_norm1)
RFM_norm1.columns = ['Frequency','Amount','Recency']
RFM_norm1.head()
# + [markdown] id="ya4lrCgbHBsv"
# ## Hopkins Statistics:
# The Hopkins statistic, is a statistic which gives a value which indicates the cluster tendency, in other words: how well the data can be clustered.
#
# - If the value is between {0.01, ...,0.3}, the data is regularly spaced.
#
# - If the value is around 0.5, it is random.
#
# - If the value is between {0.7, ..., 0.99}, it has a high tendency to cluster.
#
# Some usefull links to understand Hopkins Statistics:
# - [WikiPedia](https://en.wikipedia.org/wiki/Hopkins_statistic)
# - [Article](http://www.sthda.com/english/articles/29-cluster-validation-essentials/95-assessing-clustering-tendency-essentials/)
# + id="ihsCbMpbtiOX" executionInfo={"status": "ok", "timestamp": 1640662527784, "user_tz": -420, "elapsed": 368, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
from sklearn.neighbors import NearestNeighbors
from random import sample
from numpy.random import uniform
import numpy as np
from math import isnan
def hopkins(X):
d = X.shape[1]
#d = len(vars) # columns
n = len(X) # rows
m = int(0.1 * n)
nbrs = NearestNeighbors(n_neighbors=1).fit(X.values)
rand_X = sample(range(0, n, 1), m)
ujd = []
wjd = []
for j in range(0, m):
u_dist, _ = nbrs.kneighbors(uniform(np.amin(X,axis=0),np.amax(X,axis=0),d).reshape(1, -1), 2, return_distance=True)
ujd.append(u_dist[0][1])
w_dist, _ = nbrs.kneighbors(X.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True)
wjd.append(w_dist[0][1])
H = sum(ujd) / (sum(ujd) + sum(wjd))
if isnan(H):
print(ujd, wjd)
H = 0
return H
# + id="-m3Y45Uptxa_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640662602760, "user_tz": -420, "elapsed": 936, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="4dd1f641-e75b-42c5-8b90-562049641fb1"
hopkins(RFM_norm1)
# + [markdown] id="Q6krxMwut-rq"
# ### **K-Means with some K**
# + id="I7flLWhht3l1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1640662623062, "user_tz": -420, "elapsed": 924, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="4192a2c8-a3e8-4d70-c30d-4bad1b6488a4"
# Kmeans with K=5
model_clus5 = KMeans(n_clusters = 5, max_iter=50)
model_clus5.fit(RFM_norm1)
# + [markdown] id="Enba6IaquUtb"
# Silhouette Analysissilhouette score=p−qmax(p,q)
# p is the mean distance to the points in the nearest cluster that the data point is not a part of
# q is the mean intra-cluster distance to all the points in its own cluster.
# The value of the silhouette score range lies between -1 to 1.
# A score closer to 1 indicates that the data point is very similar to other data points in the cluster,
# A score closer to -1 indicates that the data point is not similar to the data points in its cluster.
# + id="mQMH-df8uNhx" executionInfo={"status": "ok", "timestamp": 1640662640581, "user_tz": -420, "elapsed": 13785, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
from sklearn.metrics import silhouette_score
sse_ = []
for k in range(2, 15):
kmeans = KMeans(n_clusters=k).fit(RFM_norm1)
sse_.append([k, silhouette_score(RFM_norm1, kmeans.labels_)])
# + id="qn7HOR9yuejp" colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1640662646582, "user_tz": -420, "elapsed": 417, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="19c2b86f-5e07-4310-d378-c9f311bd0acb"
plt.plot(pd.DataFrame(sse_)[0], pd.DataFrame(sse_)[1]);
# + [markdown] id="2Eduwn8buubr"
# ### **Sum of Squared Distances**
# + id="AiW8_aphul6h" colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"status": "ok", "timestamp": 1640662679551, "user_tz": -420, "elapsed": 18925, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="b0ad74a1-7c7b-46b4-e297-5ac321334dfb"
# sum of squared distances
ssd = []
for num_clusters in list(range(1,21)):
model_clus = KMeans(n_clusters = num_clusters, max_iter=50)
model_clus.fit(RFM_norm1)
ssd.append(model_clus.inertia_)
plt.plot(ssd)
# + id="CsRVCWq5u2qP" executionInfo={"status": "ok", "timestamp": 1640662733292, "user_tz": -420, "elapsed": 344, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
# analysis of clusters formed
RFM.index = pd.RangeIndex(len(RFM.index))
RFM_km = pd.concat([RFM, pd.Series(model_clus5.labels_)], axis=1)
RFM_km.columns = ['CustomerID', 'Frequency', 'Amount', 'Recency', 'ClusterID']
RFM_km.Recency = RFM_km.Recency.dt.days
km_clusters_amount = pd.DataFrame(RFM_km.groupby(["ClusterID"]).Amount.mean())
km_clusters_frequency = pd.DataFrame(RFM_km.groupby(["ClusterID"]).Frequency.mean())
km_clusters_recency = pd.DataFrame(RFM_km.groupby(["ClusterID"]).Recency.mean())
# + id="XuKfCaGSvDEN" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640662736615, "user_tz": -420, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="ee4cdb10-d11d-402b-fe47-4161238b13b6"
df = pd.concat([pd.Series([0,1,2,3,4]), km_clusters_amount, km_clusters_frequency, km_clusters_recency], axis=1)
df.columns = ["ClusterID", "Amount_mean", "Frequency_mean", "Recency_mean"]
df.head()
# + id="I4JyJqKxvUxs" colab={"base_uri": "https://localhost:8080/", "height": 296} executionInfo={"status": "ok", "timestamp": 1640662741976, "user_tz": -420, "elapsed": 387, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="e8d7d0ca-f571-49fb-b83c-c18fd89b02a0"
sns.barplot(x=df.ClusterID, y=df.Amount_mean)
# + id="kZbcoWjSvaui" colab={"base_uri": "https://localhost:8080/", "height": 296} executionInfo={"status": "ok", "timestamp": 1640662754430, "user_tz": -420, "elapsed": 357, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="bf4dc447-c85f-4828-ba93-cf0b5aebbb0c"
sns.barplot(x=df.ClusterID, y=df.Amount_mean)
# + id="mc5AKYzivk8q" colab={"base_uri": "https://localhost:8080/", "height": 296} executionInfo={"status": "ok", "timestamp": 1640662757258, "user_tz": -420, "elapsed": 375, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="d349ce5b-1382-4ede-cf69-cfaf26b85bff"
sns.barplot(x=df.ClusterID, y=df.Recency_mean)
# + [markdown] id="tQ-kLfrrvyef"
# <hr>
# + [markdown] id="ISzaF-EPv8QP"
# ### **Heirarchical Clustering**
# + id="NMXDvYSov5W_" colab={"base_uri": "https://localhost:8080/", "height": 268} executionInfo={"status": "ok", "timestamp": 1640662861098, "user_tz": -420, "elapsed": 90177, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="fe4340a0-59e5-4535-d892-d8362662753b"
# heirarchical clustering
mergings = linkage(RFM_norm1, method = "single", metric='euclidean')
dendrogram(mergings)
plt.show()
# + id="B3RV5slfwFPv" colab={"base_uri": "https://localhost:8080/", "height": 268} executionInfo={"status": "ok", "timestamp": 1640662983801, "user_tz": -420, "elapsed": 96299, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="ae776c17-1bb6-4241-e395-49c56e1aefd6"
mergings = linkage(RFM_norm1, method = "complete", metric='euclidean')
dendrogram(mergings)
plt.show()
# + id="UOWYkVIvwUSm" executionInfo={"status": "ok", "timestamp": 1640663177471, "user_tz": -420, "elapsed": 786, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
clusterCut = pd.Series(cut_tree(mergings, n_clusters = 5).reshape(-1,))
RFM_hc = pd.concat([RFM, clusterCut], axis=1)
RFM_hc.columns = ['CustomerID', 'Frequency', 'Amount', 'Recency', 'ClusterID']
# + id="Ly--RTv6wXJu" executionInfo={"status": "ok", "timestamp": 1640663181158, "user_tz": -420, "elapsed": 422, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}}
#summarise
RFM_hc.Recency = RFM_hc.Recency.dt.days
km_clusters_amount = pd.DataFrame(RFM_hc.groupby(["ClusterID"]).Amount.mean())
km_clusters_frequency = pd.DataFrame(RFM_hc.groupby(["ClusterID"]).Frequency.mean())
km_clusters_recency = pd.DataFrame(RFM_hc.groupby(["ClusterID"]).Recency.mean())
# + id="pcXlOh0pwf4_" colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"status": "ok", "timestamp": 1640663183919, "user_tz": -420, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="4ca6c26a-da6c-445c-c145-adea78a2ebaa"
df = pd.concat([pd.Series([0,1,2,3,4]), km_clusters_amount, km_clusters_frequency, km_clusters_recency], axis=1)
df.columns = ["ClusterID", "Amount_mean", "Frequency_mean", "Recency_mean"]
df.head()
# + id="Ng7zQr9Qwky4" colab={"base_uri": "https://localhost:8080/", "height": 296} executionInfo={"status": "ok", "timestamp": 1640663196856, "user_tz": -420, "elapsed": 806, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="f44b52d2-f7df-4f58-e804-1736309255c3"
#plotting barplot
sns.barplot(x=df.ClusterID, y=df.Amount_mean)
# + id="PclaCUwEwtSs" colab={"base_uri": "https://localhost:8080/", "height": 296} executionInfo={"status": "ok", "timestamp": 1640663200420, "user_tz": -420, "elapsed": 400, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="26d026cf-7819-47cf-cc50-17370d7b7fd3"
sns.barplot(x=df.ClusterID, y=df.Frequency_mean)
# + id="6Ak0gfkyw_Pi" colab={"base_uri": "https://localhost:8080/", "height": 296} executionInfo={"status": "ok", "timestamp": 1640663212910, "user_tz": -420, "elapsed": 374, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04579009044830588081"}} outputId="0b35206c-77da-4b90-d851-371138d4362a"
sns.barplot(x=df.ClusterID, y=df.Recency_mean)
# + id="QcIbsR2GJd5L"
|
Unsupervised/K-mean Clustering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# +
import sagemaker
from sagemaker import get_execution_role
import json
import boto3
sess = sagemaker.Session()
role = get_execution_role()
bucket = "mastering-ml-aws"
prefix = "chapter2/blazingtext"
# +
from os.path import expanduser
SRC_PATH = expanduser("~") + '/SageMaker/mastering-ml-on-aws/chapter2/'
with open(SRC_PATH + 'dem.txt', 'r') as file:
dem_text = ["__label__0 " + line.strip('\n') for line in file]
with open(SRC_PATH + 'gop.txt', 'r') as file:
gop_text = ["__label__1 " + line.strip('\n') for line in file]
corpus = dem_text + gop_text
from sklearn.model_selection import train_test_split
corpus_train, corpus_test = train_test_split(corpus, test_size=0.25, random_state=42)
# -
corpus_train_txt = "\n".join(corpus_train)
corpus_test_txt = "\n".join(corpus_test)
with open('tweets.train', 'w') as file:
file.write(corpus_train_txt)
with open('tweets.test', 'w') as file:
file.write(corpus_test_txt)
print(corpus_train_txt[:300])
# +
train_path = prefix + '/train'
validation_path = prefix + '/validation'
sess.upload_data(path='tweets.train', bucket=bucket, key_prefix=train_path)
sess.upload_data(path='tweets.test', bucket=bucket, key_prefix=validation_path)
s3_train_data = 's3://{}/{}'.format(bucket, train_path)
s3_validation_data = 's3://{}/{}'.format(bucket, validation_path)
# +
container = sagemaker.amazon.amazon_estimator.get_image_uri('us-east-1', "blazingtext", "latest")
s3_output_location = 's3://{}/{}/output'.format(bucket, prefix)
# -
bt_model = sagemaker.estimator.Estimator(container,
role,
train_instance_count=1,
train_instance_type='ml.c4.4xlarge',
train_volume_size = 30,
train_max_run = 360000,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sess)
# +
bt_model.set_hyperparameters(mode="supervised",
epochs=10,
min_count=3,
learning_rate=0.05,
early_stopping=False,
patience=5,
min_epochs=5,
word_ngrams=2)
train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated',
content_type='text/plain', s3_data_type='S3Prefix')
validation_data = sagemaker.session.s3_input(s3_validation_data, distribution='FullyReplicated',
content_type='text/plain', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data}
# -
bt_model.fit(inputs=data_channels, logs=True)
predictor = bt_model.deploy(initial_instance_count = 1,instance_type = 'ml.m4.xlarge')
# !aws s3 ls --recursive s3://mastering-ml-aws/chapter2/blazingtext
# +
corpus_test_no_labels = [x[11:] for x in corpus_test]
payload = {"instances" : corpus_test_no_labels}
response = predictor.predict(json.dumps(payload))
predictions = json.loads(response)
print(json.dumps(predictions, indent=2))
# -
predicted_labels = [prediction['label'][0] for prediction in predictions]
predicted_labels[:4]
actual_labels = [x[:10] for x in corpus_test]
actual_labels[:4]
matches = [(actual_label == predicted_label) for (actual_label, predicted_label) in zip(actual_labels, predicted_labels)]
matches[:4]
matches.count(True) / len(matches)
|
Chapter02/train_blazingtext.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jonkrohn/ML-foundations/blob/master/notebooks/1-intro-to-linear-algebra.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="aTOLgsbN69-P"
# # Intro to Linear Algebra
# + [markdown] id="yqUB9FTRAxd-"
# This topic, *Intro to Linear Algebra*, is the first in the *Machine Learning Foundations* series.
#
# It is essential because linear algebra lies at the heart of most machine learning approaches and is especially predominant in deep learning, the branch of ML at the forefront of today’s artificial intelligence advances. Through the measured exposition of theory paired with interactive examples, you’ll develop an understanding of how linear algebra is used to solve for unknown values in high-dimensional spaces, thereby enabling machines to recognize patterns and make predictions.
#
# The content covered in *Intro to Linear Algebra* is itself foundational for all the other topics in the Machine Learning Foundations series and it is especially relevant to *Linear Algebra II*.
# + [markdown] id="d4tBvI88BheF"
# Over the course of studying this topic, you'll:
#
# * Understand the fundamentals of linear algebra, a ubiquitous approach for solving for unknowns within high-dimensional spaces.
#
# * Develop a geometric intuition of what’s going on beneath the hood of machine learning algorithms, including those used for deep learning.
# * Be able to more intimately grasp the details of machine learning papers as well as all of the other subjects that underlie ML, including calculus, statistics, and optimization algorithms.
# + [markdown] id="Z68nQ0ekCYhF"
# **Note that this Jupyter notebook is not intended to stand alone. It is the companion code to a lecture or to videos from <NAME>'s [Machine Learning Foundations](https://github.com/jonkrohn/ML-foundations) series, which offer detail on the following:**
#
# *Segment 1: Data Structures for Algebra*
#
# * What Linear Algebra Is
# * A Brief History of Algebra
# * Tensors
# * Scalars
# * Vectors and Vector Transposition
# * Norms and Unit Vectors
# * Basis, Orthogonal, and Orthonormal Vectors
# * Arrays in NumPy
# * Matrices
# * Tensors in TensorFlow and PyTorch
#
# *Segment 2: Common Tensor Operations*
#
# * Tensor Transposition
# * Basic Tensor Arithmetic
# * Reduction
# * The Dot Product
# * Solving Linear Systems
#
# *Segment 3: Matrix Properties*
#
# * The Frobenius Norm
# * Matrix Multiplication
# * Symmetric and Identity Matrices
# * Matrix Inversion
# * Diagonal Matrices
# * Orthogonal Matrices
#
# + [markdown] id="2khww76J5w9n"
# ## Segment 1: Data Structures for Algebra
#
# **Slides used to begin segment, with focus on introducing what linear algebra is, including hands-on paper and pencil exercises.**
# + [markdown] id="NgGMhK4B51oe"
# ### Scalars (Rank 0 Tensors) in Base Python
# + id="ZXnTHDn_EW6b" outputId="c50e094c-88df-47e1-8a65-bccaa9a06397" colab={"base_uri": "https://localhost:8080/", "height": 35}
x = 25
x
# + id="VF8Jam76R4KJ" outputId="4d482a0b-3bb8-49a6-c706-8425b753fcf7" colab={"base_uri": "https://localhost:8080/", "height": 35}
type(x) # if we'd like more specificity (e.g., int16, uint8), we need NumPy or another numeric library
# + id="ZBzYlL0mRd-P"
y = 3
# + id="1i-hW0bcReyy" outputId="12481726-7547-46d1-ee8f-44d549cff349" colab={"base_uri": "https://localhost:8080/", "height": 35}
py_sum = x + y
py_sum
# + id="CpyUxB6XRk6y" outputId="8032fac6-0d7b-479e-e927-5870aac34037" colab={"base_uri": "https://localhost:8080/", "height": 35}
type(py_sum)
# + id="V2UiLj-JR8Ij" outputId="1135c322-bc15-4f79-9bbc-22f543ae5e6d" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_float = 25.0
float_sum = x_float + y
float_sum
# + id="ikOwjp6ASCaf" outputId="bca9e3d2-48a9-4e60-92cb-4d9f9176a24c" colab={"base_uri": "https://localhost:8080/", "height": 35}
type(float_sum)
# + [markdown] id="SgUvioyUz8T2"
# ### Scalars in PyTorch
#
# * PyTorch and TensorFlow are the two most popular *automatic differentiation* libraries (a focus of the [*Calculus I*](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/3-calculus-i.ipynb) and [*Calculus II*](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/4-calculus-ii.ipynb) subjects in the *ML Foundations* series) in Python, itself the most popular programming language in ML
# * PyTorch tensors are designed to be pythonic, i.e., to feel and behave like NumPy arrays
# * The advantage of PyTorch tensors relative to NumPy arrays is that they easily be used for operations on GPU (see [here](https://pytorch.org/tutorials/beginner/examples_tensor/two_layer_net_tensor.html) for example)
# * Documentation on PyTorch tensors, including available data types, is [here](https://pytorch.org/docs/stable/tensors.html)
# + id="A9Hhazt2zKeD"
import torch
# + id="a211IRW_0-iY" outputId="45b60d66-79cd-4d5a-defd-5e1715fda4a3" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_pt = torch.tensor(25) # type specification optional, e.g.: dtype=torch.float16
x_pt
# + id="LvxzMa_HhUNB" outputId="920a7f66-ed4f-4d69-e5f3-7e189b43c9d4" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_pt.shape
# + [markdown] id="eUyuZXlWS8T9"
# ### Scalars in TensorFlow (version 2.0 or later)
#
# Tensors created with a wrapper, all of which [you can read about here](https://www.tensorflow.org/guide/tensor):
#
# * `tf.Variable`
# * `tf.constant`
# * `tf.placeholder`
# * `tf.SparseTensor`
#
# Most widely-used is `tf.Variable`, which we'll use here.
#
# As with TF tensors, in PyTorch we can similarly perform operations, and we can easily convert to and from NumPy arrays
#
# Also, a full list of tensor data types is available [here](https://www.tensorflow.org/api_docs/python/tf/dtypes/DType).
# + id="CHBYse_MEqZM"
import tensorflow as tf
# + id="sDv92Nh-NSOU" outputId="0939c0f4-73c9-4f95-e3f1-b7b4f4840887" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_tf = tf.Variable(25, dtype=tf.int16) # dtype is optional
x_tf
# + id="EmPMBIV9RQjS" outputId="733b58c7-d4d4-490b-e15c-63e529c10d3a" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_tf.shape
# + id="mEILtO9pPctO"
y_tf = tf.Variable(3, dtype=tf.int16)
# + id="dvvWuaw6Ph_D" outputId="900ba114-72ae-4b8e-9965-e8f9d4679b80" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_tf + y_tf
# + id="JZVhRnX9RUGW" outputId="dbf98349-e71d-4e10-c41e-79869b4215b3" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf_sum = tf.add(x_tf, y_tf)
tf_sum
# + id="sVbMxT1Ey6Y3" outputId="301dc4a8-fd03-4b0b-a48c-efcd363e90bb" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf_sum.numpy() # note that NumPy operations automatically convert tensors to NumPy arrays, and vice versa
# + id="LXpv69t0y-f6" outputId="df9c28d5-815b-4395-d2cf-9fda2dcae001" colab={"base_uri": "https://localhost:8080/", "height": 35}
type(tf_sum.numpy())
# + id="VszuTUAg1uXk" outputId="9d53b9d4-e826-4b87-f162-8ce5a5a37e9f" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf_float = tf.Variable(25, dtype=tf.float16)
tf_float
# + [markdown] id="B5VRGo1H6010"
# **Return to slides here.**
# + [markdown] id="4CURG9Er6aZI"
# ### Vectors (Rank 1 Tensors) in NumPy
# + id="2GfV14gThgO0"
import numpy as np
# + id="T9ME4kBr4wg0" outputId="32a25d00-af04-41d4-d76f-ec80169d5a55" colab={"base_uri": "https://localhost:8080/", "height": 35}
x = np.array([25, 2, 5]) # type argument is optional, e.g.: dtype=np.float16
x
# + id="ZuotxmlZL2wp" outputId="af9064c2-1a1f-4543-d980-5ff61077eb11" colab={"base_uri": "https://localhost:8080/", "height": 35}
len(x)
# + id="OlPYy6GOaIVy" outputId="f6688591-ee75-4931-f9ba-10a9b346b165" colab={"base_uri": "https://localhost:8080/", "height": 35}
x.shape
# + id="sWbYGwObcgtK" outputId="ad98d98c-4789-425c-81ff-6bd24b20ac3f" colab={"base_uri": "https://localhost:8080/", "height": 35}
type(x)
# + id="ME_xuvD_oTPg" outputId="b2c1da3b-0c0b-44bb-ddc2-47b851dde60c" colab={"base_uri": "https://localhost:8080/", "height": 35}
x[0] # zero-indexed
# + id="hXmBHZQ-nxFw" outputId="73cd0776-0b4e-48a4-a305-2ea681338032" colab={"base_uri": "https://localhost:8080/", "height": 35}
type(x[0])
# + [markdown] id="NiEofCzYZBrQ"
# ### Vector Transposition
# + id="hxGFNDx6V95l" outputId="8f4c407e-2865-44c1-8c6a-46b80ebc76f7" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Transposing a regular 1-D array has no effect...
x_t = x.T
x_t
# + id="_f8E9ExDWw4p" outputId="d19351bf-a4dc-47e9-cc33-e85de2e2214e" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_t.shape
# + id="AEd8jB7YcgtT" outputId="15219d6b-5cae-4b1f-b7fb-d006c46e8741" colab={"base_uri": "https://localhost:8080/", "height": 35}
# ...but it does we use nested "matrix-style" brackets:
y = np.array([[25, 2, 5]])
y
# + id="UHQd92oRcgtV" outputId="5a147118-eb9d-40ce-f5e7-290351a7d1ac" colab={"base_uri": "https://localhost:8080/", "height": 35}
y.shape
# + id="SPi1JqGEXXUc" outputId="52569899-b00f-48ac-869d-fc5dff289850" colab={"base_uri": "https://localhost:8080/", "height": 69}
# ...but can transpose a matrix with a dimension of length 1, which is mathematically equivalent:
y_t = y.T
y_t
# + id="6rzUv762Yjis" outputId="48bb8d18-46fc-4964-fec9-6a7c60c364f8" colab={"base_uri": "https://localhost:8080/", "height": 35}
y_t.shape # this is a column vector as it has 3 rows and 1 column
# + id="xVnQMLOrYtra" outputId="2d0a58b7-4fc2-499f-a20c-0c7fd2838e3d" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Column vector can be transposed back to original row vector:
y_t.T
# + id="QIAA2NLRZIXC" outputId="9e3765e8-b61d-43f8-834f-7b7fabc5ae88" colab={"base_uri": "https://localhost:8080/", "height": 35}
y_t.T.shape
# + [markdown] id="Voj26mSpZLuh"
# ### Zero Vectors
#
# Have no effect if added to another vector
# + id="-46AbOdkZVn_" outputId="07bfd104-0bc6-4419-b9ee-76572d2cbe76" colab={"base_uri": "https://localhost:8080/", "height": 35}
z = np.zeros(3)
z
# + [markdown] id="c6xyYiwSnSGC"
# ### Vectors in PyTorch and TensorFlow
# + id="s2TGDeqXnitZ" outputId="1ab85cfe-877b-40b0-e1c7-25afe688b8be" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_pt = torch.tensor([25, 2, 5])
x_pt
# + id="-0jbHgc5iijG" outputId="aae6921a-56ae-4aa8-85b0-2d12a72dd8d3" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_tf = tf.Variable([25, 2, 5])
x_tf
# + [markdown] id="rTDDta1Ro4Pf"
# **Return to slides here.**
# + [markdown] id="8fU5qVTI6SLD"
# ### $L^2$ Norm
# + id="lLc2FbGG6SLD" outputId="4dd92b40-2a72-47ef-af1f-49b14b5e7ccc" colab={"base_uri": "https://localhost:8080/", "height": 35}
x
# + id="AN43hsl86SLG" outputId="266c26a7-071e-414b-fa9f-d6a7326a38ee" colab={"base_uri": "https://localhost:8080/", "height": 35}
(25**2 + 2**2 + 5**2)**(1/2)
# + id="D9CyWo-l6SLI" outputId="0e276447-a60a-4552-e1a6-102a04eaee35" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.linalg.norm(x)
# + [markdown] id="TNEMRi926SLK"
# So, if units in this 3-dimensional vector space are meters, then the vector $x$ has a length of 25.6m
# + [markdown] id="ugQC6k4h6SLK"
# **Return to slides here.**
# + [markdown] id="PwiRlMuC6SLK"
# ### $L^1$ Norm
# + id="lcYKyc5H6SLL" outputId="8ce1f46c-44fc-499a-8e3d-50e66cf77e03" colab={"base_uri": "https://localhost:8080/", "height": 35}
x
# + id="8jNb6nYl6SLM" outputId="a4fba066-c60f-4130-d21a-4c738035d3a9" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.abs(25) + np.abs(2) + np.abs(5)
# + [markdown] id="WTPz0EBSAVee"
# **Return to slides here.**
# + [markdown] id="lQP73B916SLP"
# ### Squared $L^2$ Norm
# + id="Qv1ouJ8r6SLP" outputId="20a94c42-92d9-4be8-df83-6f11ffd95c1e" colab={"base_uri": "https://localhost:8080/", "height": 35}
x
# + id="eG3WSB5R6SLT" outputId="36a92da7-7750-412d-a1a2-862edeb1a8fb" colab={"base_uri": "https://localhost:8080/", "height": 35}
(25**2 + 2**2 + 5**2)
# + id="bXwzSudS6SLV" outputId="1770e805-d21a-480c-d9a7-39f140d02d26" colab={"base_uri": "https://localhost:8080/", "height": 35}
# we'll cover tensor multiplication more soon but to prove point quickly:
np.dot(x, x)
# + [markdown] id="q3CIH9ba6SLX"
# **Return to slides here.**
# + [markdown] id="BHWxVPFC6SLX"
# ### Max Norm
# + id="vO-zfvDG6SLX" outputId="921ee98f-0cbe-4e7f-aa53-96ff4daa03e0" colab={"base_uri": "https://localhost:8080/", "height": 35}
x
# + id="vXXLgbyW6SLZ" outputId="0bd62923-316f-444b-bee9-f185d0f22e9c" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.max([np.abs(25), np.abs(2), np.abs(5)])
# + [markdown] id="3MVTsXA8nNR0"
# **Return to slides here.**
# + [markdown] id="JzKlIpYZcgt9"
# ### Orthogonal Vectors
# + id="4jHg9La-cgt9" outputId="792186ab-6ad2-4eb1-db56-aee0933d53f5" colab={"base_uri": "https://localhost:8080/", "height": 35}
i = np.array([1, 0])
i
# + id="3FyLhPK3cguA" outputId="45abe1c6-fd5f-45d7-859c-003aa63922f1" colab={"base_uri": "https://localhost:8080/", "height": 35}
j = np.array([0, 1])
j
# + id="7eQtKhaDcguC" outputId="d4a9cf18-ce1a-4f24-ad36-3e2e3f0a4389" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.dot(i, j) # detail on the dot operation coming up...
# + [markdown] id="C6eMVPu4nNR7"
# **Return to slides here.**
# + [markdown] id="mK3AZH53o8Br"
# ### Matrices (Rank 2 Tensors) in NumPy
# + id="stk57cmaESW1" outputId="eb73d052-475f-4ad8-fb21-5c5a8d7d55a1" colab={"base_uri": "https://localhost:8080/", "height": 69}
# Use array() with nested brackets:
X = np.array([[25, 2], [5, 26], [3, 7]])
X
# + id="IhDL4L8S6SLc" outputId="410f5a81-7e81-4d02-f45a-730b3eb02585" colab={"base_uri": "https://localhost:8080/", "height": 35}
X.shape
# + id="q3oyaAK36SLe" outputId="651bc3ba-bd8f-4fc6-cda0-f4655d6b4c5b" colab={"base_uri": "https://localhost:8080/", "height": 35}
X.size
# + id="YN9CHzja6SLg" outputId="5ad63eef-b908-4166-b491-0a9e8150933a" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Select left column of matrix X (zero-indexed)
X[:,0]
# + id="ih7nh4qC6SLi" outputId="b2d04fbe-a084-45d0-9663-e3856cd10515" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Select middle row of matrix X:
X[1,:]
# + id="pg7numxP6SLl" outputId="97bf3551-ded1-42a4-8467-9dae260c4631" colab={"base_uri": "https://localhost:8080/", "height": 52}
# Another slicing-by-index example:
X[0:2, 0:2]
# + [markdown] id="HGEfZiBb6SLt"
# ### Matrices in PyTorch
# + id="-bibT9ye6SLt" outputId="423afc74-541d-43bc-dc54-0469f8e3aa19" colab={"base_uri": "https://localhost:8080/", "height": 69}
X_pt = torch.tensor([[25, 2], [5, 26], [3, 7]])
X_pt
# + id="TBPu1L7P6SLv" outputId="8e2779a7-c264-404f-d73e-364352c828ea" colab={"base_uri": "https://localhost:8080/", "height": 35}
X_pt.shape # more pythonic
# + id="4mTj56M16SLw" outputId="6a521308-dc8b-4624-b7ef-a881def82992" colab={"base_uri": "https://localhost:8080/", "height": 35}
X_pt[1,:]
# + [markdown] id="E026fQlD6SLn"
# ### Matrices in TensorFlow
# + id="1gtGH6oA6SLn" outputId="38ab0608-0efd-45dc-df29-3ded6d68e60a" colab={"base_uri": "https://localhost:8080/", "height": 87}
X_tf = tf.Variable([[25, 2], [5, 26], [3, 7]])
X_tf
# + id="4CV_KiTP6SLp" outputId="0ab1fade-f5ee-499e-b365-30ed7ac8ec13" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf.rank(X_tf)
# + id="vUsce8tC6SLq" outputId="cc0525bd-30c2-4d49-e992-20ec8c957663" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf.shape(X_tf)
# + id="QNpfvNPj6SLr" outputId="8b1778a5-def0-4423-97d9-9609b8fb4369" colab={"base_uri": "https://localhost:8080/", "height": 35}
X_tf[1,:]
# + [markdown] id="CodS4evY6SLy"
# **Return to slides here.**
# + [markdown] id="cMpfujF_6SLy"
# ### Higher-Rank Tensors
#
# As an example, rank 4 tensors are common for images, where each dimension corresponds to:
#
# 1. Number of images in training batch, e.g., 32
# 2. Image height in pixels, e.g., 28 for [MNIST digits](http://yann.lecun.com/exdb/mnist/)
# 3. Image width in pixels, e.g., 28
# 4. Number of color channels, e.g., 3 for full-color images (RGB)
# + id="KSZlICRR6SL1"
images_pt = torch.zeros([32, 28, 28, 3])
# + id="6Dqj0vmh6SL2"
# images_pt
# + id="7TASTVD96SLy"
images_tf = tf.zeros([32, 28, 28, 3])
# + id="ftOliyru6SL0" outputId="dc3e4f19-f94a-4064-f858-70b38d8b512a" colab={"base_uri": "https://localhost:8080/", "height": 1000}
images_tf
# + [markdown] id="O3sgkdXZ6SL3"
# **Return to slides here.**
# + [markdown] id="lmG3LEZK6SL4"
# ## Segment 2: Common Tensor Operations
# + [markdown] id="iSHGMCxd6SL4"
# ### Tensor Transposition
# + id="1YN1narR6SL4" outputId="cce2d74b-1452-4e0e-a776-481519e25849" colab={"base_uri": "https://localhost:8080/", "height": 69}
X
# + id="5hf3M_NL6SL5" outputId="255d21fb-01fa-432f-9153-40eaa578369b" colab={"base_uri": "https://localhost:8080/", "height": 52}
X.T
# + id="vyBFN_4g6SL9" outputId="76fe1dc3-b4fa-452e-c635-0307cb398811" colab={"base_uri": "https://localhost:8080/", "height": 52}
X_pt.T
# + id="K2DuDJc_6SL6" outputId="ef49d15b-1166-4675-e0ef-c94712a975ca" colab={"base_uri": "https://localhost:8080/", "height": 69}
tf.transpose(X_tf) # less Pythonic
# + [markdown] id="Hp9P1jx76SL_"
# ### Basic Arithmetical Properties
# + [markdown] id="WxaImEUc6SMA"
# Adding or multiplying with scalar applies operation to all elements and tensor shape is retained:
# + id="yhXGETii6SMA" outputId="d891311d-5dbe-492c-f76d-ce9dae389ec7" colab={"base_uri": "https://localhost:8080/", "height": 69}
X*2
# + id="KnPULtDO6SMC" outputId="e44738ed-5bdd-47bb-9917-8c7ca3599b99" colab={"base_uri": "https://localhost:8080/", "height": 69}
X+2
# + id="MkfC0Gsb6SMD" outputId="05d7e1fc-d097-4701-8bee-892bff259f1b" colab={"base_uri": "https://localhost:8080/", "height": 69}
X*2+2
# + id="04bIDpGj6SMH" outputId="ac1779cf-a951-48f3-9f0c-275f8115cad4" colab={"base_uri": "https://localhost:8080/", "height": 69}
X_pt*2+2 # Python operators are overloaded; could alternatively use torch.mul() or torch.add()
# + id="2oRBSmRL6SMI" outputId="71b70d3d-d457-4537-d0e2-2c48c2a3cff7" colab={"base_uri": "https://localhost:8080/", "height": 69}
torch.add(torch.mul(X_pt, 2), 2)
# + id="OMSb9Otd6SMF" outputId="e0a21743-6f42-4eb7-f80a-cca9eb97b773" colab={"base_uri": "https://localhost:8080/", "height": 87}
X_tf*2+2 # Operators likewise overloaded; could equally use tf.multiply() tf.add()
# + id="5ya2xZ4u6SMG" outputId="a19592c9-c5f6-4cab-b9d1-d12c28bc3649" colab={"base_uri": "https://localhost:8080/", "height": 87}
tf.add(tf.multiply(X_tf, 2), 2)
# + [markdown] id="wt8Ls4076SMK"
# If two tensors have the same size, operations are often by default applied element-wise. This is **not matrix multiplication**, which we'll cover later, but is rather called the **Hadamard product** or simply the **element-wise product**.
#
# The mathematical notation is $A \odot X$
# + id="KUMyU1t46SMK" outputId="ba6f26d2-90b0-47a5-e465-2cb1c8793fdd" colab={"base_uri": "https://localhost:8080/", "height": 69}
X
# + id="UNIbp0P36SML" outputId="d03d1dfe-46c9-46f7-8e3c-750bc18000f0" colab={"base_uri": "https://localhost:8080/", "height": 69}
A = X+2
A
# + id="HE9xPWPdcgu4" outputId="def498dc-945d-45b2-b762-bc2c713a6f43" colab={"base_uri": "https://localhost:8080/", "height": 69}
A + X
# + id="xKyCwGia6SMP" outputId="609718a8-971f-4af4-ce42-f9476331968a" colab={"base_uri": "https://localhost:8080/", "height": 69}
A * X
# + id="B5jXGIBp6SMT"
A_pt = X_pt + 2
# + id="A7k6yxu36SMU" outputId="a5777c5c-9e5e-4303-f87b-c6c44ab8176f" colab={"base_uri": "https://localhost:8080/", "height": 69}
A_pt + X_pt
# + id="r8vOul0m6SMW" outputId="90df4cdb-5ef0-4908-a0d9-0cf4108d1da5" colab={"base_uri": "https://localhost:8080/", "height": 69}
A_pt * X_pt
# + id="rQcBMSb76SMQ"
A_tf = X_tf + 2
# + id="x6s1wtNj6SMR" outputId="c1d6d8b7-fba6-4eee-d777-2bf89b720a27" colab={"base_uri": "https://localhost:8080/", "height": 87}
A_tf + X_tf
# + id="J1D7--296SMS" outputId="753e01ae-2e79-466b-e2bf-c268f852b93b" colab={"base_uri": "https://localhost:8080/", "height": 87}
A_tf * X_tf
# + [markdown] id="FE5f-FEq6SMY"
# ### Reduction
# + [markdown] id="WPJ9FVQF6SMY"
# Calculating the sum across all elements of a tensor is a common operation. For example:
#
# * For vector ***x*** of length *n*, we calculate $\sum_{i=1}^{n} x_i$
# * For matrix ***X*** with *m* by *n* dimensions, we calculate $\sum_{i=1}^{m} \sum_{j=1}^{n} X_{i,j}$
# + id="rXi2stvz6SMZ" outputId="4c248789-9012-40be-9dd9-6dca86bd34f5" colab={"base_uri": "https://localhost:8080/", "height": 69}
X
# + id="W9FKaJbf6SMZ" outputId="f48cc4ef-9600-4143-bc4c-75f7abb67426" colab={"base_uri": "https://localhost:8080/", "height": 35}
X.sum()
# + id="3y9aw7t66SMc" outputId="99267afb-2a29-4678-ee5c-566aa43f00ce" colab={"base_uri": "https://localhost:8080/", "height": 35}
torch.sum(X_pt)
# + id="wcjRtFml6SMb" outputId="ab0cd6d8-21f7-48ef-9dc3-84033754357d" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf.reduce_sum(X_tf)
# + id="awjH9bOz6SMc" outputId="615450d2-dec2-47a8-eccc-052955cdacbc" colab={"base_uri": "https://localhost:8080/", "height": 35}
# Can also be done along one specific axis alone, e.g.:
X.sum(axis=0) # summing all rows
# + id="n2SASjsn6SMd" outputId="cd452f2d-b6bb-4d0c-e2a9-aa4fd44c1d15" colab={"base_uri": "https://localhost:8080/", "height": 35}
X.sum(axis=1) # summing all columns
# + id="uVnSxvSJ6SMh" outputId="0de2981c-64fe-4321-a9fe-0e2ffe6b1631" colab={"base_uri": "https://localhost:8080/", "height": 35}
torch.sum(X_pt, 0)
# + id="IO8drxz36SMe" outputId="1cab75c2-65b4-4465-b4bc-7df791db35f6" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf.reduce_sum(X_tf, 1)
# + [markdown] id="gdAe8S4A6SMj"
# Many other operations can be applied with reduction along all or a selection of axes, e.g.:
#
# * maximum
# * minimum
# * mean
# * product
#
# They're fairly straightforward and used less often than summation, so you're welcome to look them up in library docs if you ever need them.
# + [markdown] id="r2eW8S_46SMj"
# ### The Dot Product
# + [markdown] id="LImETgD76SMj"
# If we have two vectors (say, ***x*** and ***y***) with the same length *n*, we can calculate the dot product between them. This is annotated several different ways, including the following:
#
# * $x \cdot y$
# * $x^Ty$
# * $\langle x,y \rangle$
#
# Regardless which notation you use (I prefer the first), the calculation is the same; we calculate products in an element-wise fashion and then sum reductively across the products to a scalar value. That is, $x \cdot y = \sum_{i=1}^{n} x_i y_i$
#
# The dot product is ubiquitous in deep learning: It is performed at every artificial neuron in a deep neural network, which may be made up of millions (or orders of magnitude more) of these neurons.
# + id="HveIE3IDcgvP" outputId="d6056ded-4523-4109-e56a-fdba2ef91227" colab={"base_uri": "https://localhost:8080/", "height": 35}
x
# + id="3ZjkZcvVcgvQ" outputId="8d67a2ff-d796-4bd6-a96e-b83b597d528c" colab={"base_uri": "https://localhost:8080/", "height": 35}
y = np.array([0, 1, 2])
y
# + id="Xu8z0QB0cgvR" outputId="9d6fda4e-382f-4745-c116-011b364c594a" colab={"base_uri": "https://localhost:8080/", "height": 35}
25*0 + 2*1 + 5*2
# + id="ThehRrr8cgvS" outputId="99b12cd0-cba9-430d-9368-528cfd8a6641" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.dot(x, y)
# + id="J5Zdua4xcgvT" outputId="1b174741-ebed-447d-a431-06d7b6d5b8c7" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_pt
# + id="b3vEdroXcgvU" outputId="1dcb5039-8bc3-4fbc-e14c-388b601d6435" colab={"base_uri": "https://localhost:8080/", "height": 35}
y_pt = torch.tensor([0, 1, 2])
y_pt
# + id="F741E5imcgvV" outputId="fc22ab84-5a9d-4b37-b6fe-8614e51c73f9" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.dot(x_pt, y_pt)
# + id="-W5loHc8cgvX" outputId="d36c8a1b-b3a1-46a1-80d5-caf6c07caf09" colab={"base_uri": "https://localhost:8080/", "height": 35}
torch.dot(torch.tensor([25, 2, 5.]), torch.tensor([0, 1, 2.]))
# + id="jUwKBiqzcgvY" outputId="74280945-b0f3-422e-af4d-a40923b59a66" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_tf
# + id="Xqt3Rac7cgvZ" outputId="60312b2e-7552-4b2d-c3cb-13109a9cec61" colab={"base_uri": "https://localhost:8080/", "height": 35}
y_tf = tf.Variable([0, 1, 2])
y_tf
# + id="x4pgc5JEcgvc" outputId="0ff9d6ef-edd1-403d-bb8a-df9439c4a4ae" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf.reduce_sum(tf.multiply(x_tf, y_tf))
# + [markdown] id="mSmvC1cc6SMj"
# **Return to slides here.**
# + [markdown] id="bYDhomCP6SMj"
# ## Segment 3: Matrix Properties
# + [markdown] id="-HGU_an66SMk"
# ### Frobenius Norm
# + id="pNQHvAqN6SMk" outputId="5e98f76b-0887-4afe-f891-f492f187f9b5" colab={"base_uri": "https://localhost:8080/", "height": 52}
X = np.array([[1, 2], [3, 4]])
X
# + id="T-q-Tzn26SMm" outputId="691a1dc3-35de-475b-9f27-407320d6dffc" colab={"base_uri": "https://localhost:8080/", "height": 35}
(1**2 + 2**2 + 3**2 + 4**2)**(1/2)
# + id="YVG8qiFw6SMn" outputId="6111d4d8-a2a6-4a22-e302-b054bfc6fa6f" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.linalg.norm(X) # same function as for vector L2 norm
# + id="FPnBflKVxyik"
X_pt = torch.tensor([[1, 2], [3, 4.]]) # torch.norm() supports floats only
# + id="NCdTShVyx8z0" outputId="2bf30be6-58b8-465a-8968-e07ea228b0d3" colab={"base_uri": "https://localhost:8080/", "height": 35}
torch.norm(X_pt)
# + id="blezf9fLx_nD"
X_tf = tf.Variable([[1, 2], [3, 4.]]) # tf.norm() also supports floats only
# + id="LiCQzyf6ySCZ" outputId="9bcb3f20-c996-49bd-9a18-21a53fd8fa68" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf.norm(X_tf)
# + [markdown] id="4c6rjVAf6SMo"
# **Return to slides here.**
# + [markdown] id="OLN-MMIe6SMo"
# ### Matrix Multiplication (with a Vector)
# + id="XJw0j8cr6SMo" outputId="1d4d3bcc-7c51-4071-d135-735a6e27020d" colab={"base_uri": "https://localhost:8080/", "height": 69}
A = np.array([[3, 4], [5, 6], [7, 8]])
A
# + id="zZQ1Aupc6SMq" outputId="31862d41-db28-40c3-daa7-a248340891d9" colab={"base_uri": "https://localhost:8080/", "height": 35}
b = np.array([1, 2])
b
# + id="ZbeVtNyW6SMq" outputId="f86a9235-0580-4f13-c1d5-d3619b8b8fdf" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.dot(A, b) # even though technically dot products are between vectors only
# + id="srVI55X96SMu" outputId="bdf8bced-e4c5-4587-93e3-bfb257af8e7b" colab={"base_uri": "https://localhost:8080/", "height": 69}
A_pt = torch.tensor([[3, 4], [5, 6], [7, 8]])
A_pt
# + id="5SDn71Xc6SMv" outputId="b4d95099-ed65-4d2c-916c-2db177eab698" colab={"base_uri": "https://localhost:8080/", "height": 35}
b_pt = torch.tensor([1, 2])
b_pt
# + id="OIeoJlsh6SMx" outputId="8c794bad-928e-4a44-e075-2772a245f4ef" colab={"base_uri": "https://localhost:8080/", "height": 35}
torch.matmul(A_pt, b_pt) # like np.dot(), automatically infers dims in order to perform dot product, matvec, or matrix multiplication
# + id="pnob9GkB6SMs" outputId="50235b0f-41d3-4a81-a40d-e70c3697fd91" colab={"base_uri": "https://localhost:8080/", "height": 87}
A_tf = tf.Variable([[3, 4], [5, 6], [7, 8]])
A_tf
# + id="vYtWxf8K6SMt" outputId="68aeb6f6-05bb-44ae-e5f2-e4532bede6b5" colab={"base_uri": "https://localhost:8080/", "height": 35}
b_tf = tf.Variable([1, 2])
b_tf
# + id="NGBImWRH6SMt" outputId="44b217b8-c736-42b8-a07b-e5ddc544e279" colab={"base_uri": "https://localhost:8080/", "height": 35}
tf.linalg.matvec(A_tf, b_tf)
# + [markdown] id="kzjZmdRR6SMy"
# **Return to slides here.**
# + [markdown] id="21ySqay36SM5"
# ### Matrix Multiplication (with Two Matrices)
# + id="0YRG1Ig2cgvo" outputId="3746b0bd-b7ff-4430-cd13-a588862989b6" colab={"base_uri": "https://localhost:8080/", "height": 69}
A
# + id="DyOEZk_c6SM5" outputId="b1520650-0a9f-4996-ebba-a26026b881a7" colab={"base_uri": "https://localhost:8080/", "height": 52}
B = np.array([[1, 9], [2, 0]])
B
# + id="SfKuNxH-6SM6" outputId="d7b44626-2887-41bd-f381-f1ddcecdabc7" colab={"base_uri": "https://localhost:8080/", "height": 69}
np.dot(A, B)
# + [markdown] id="WcnQMF0s6SNB"
# Note that matrix multiplication is not "commutative" (i.e., $AB \neq BA$) so uncommenting the following line will throw a size mismatch error:
# + id="_mwBGOXO6SNB"
# np.dot(B, A)
# + id="JrrvPoNE6SM9" outputId="95af7679-aa4b-423c-c46b-658aba212137" colab={"base_uri": "https://localhost:8080/", "height": 52}
B_pt = torch.from_numpy(B) # much cleaner than TF conversion
B_pt
# + id="Z6PfwCvX6SM-" outputId="2ed88b4d-be43-4412-e94a-f3a81458e656" colab={"base_uri": "https://localhost:8080/", "height": 52}
# another neat way to create the same tensor with transposition:
B_pt = torch.tensor([[1, 2], [9, 0]]).T
B_pt
# + id="16ZNRaVe6SM_" outputId="38d41772-1cdd-4a49-9eb1-31f5f25874e2" colab={"base_uri": "https://localhost:8080/", "height": 69}
torch.matmul(A_pt, B_pt) # no need to change functions, unlike in TF
# + id="rkymNjE46SM8" outputId="42fb3eac-f4cc-4093-cd07-c3ce9ce2c56e" colab={"base_uri": "https://localhost:8080/", "height": 69}
B_tf = tf.convert_to_tensor(B, dtype=tf.int32)
B_tf
# + id="rslTzFRk6SM8" outputId="be5ffdeb-4196-4cad-bf1a-ef6f0dce6a12" colab={"base_uri": "https://localhost:8080/", "height": 87}
tf.matmul(A_tf, B_tf)
# + [markdown] id="0eBiTmPp6SNC"
# **Return to slides here.**
# + [markdown] id="L2H9F-DQ6SMz"
# ### Symmetric Matrices
# + id="5YsPoWo76SMz" outputId="3cb55090-90e0-4c0d-be4a-ab62a928754d" colab={"base_uri": "https://localhost:8080/", "height": 69}
X_sym = np.array([[0, 1, 2], [1, 7, 8], [2, 8, 9]])
X_sym
# + id="Skg1wSQVcgv2" outputId="0dd4aa0a-92bf-4e22-e371-ae0d4c49ad8f" colab={"base_uri": "https://localhost:8080/", "height": 69}
X_sym.T
# + id="Jv40-i9H6SM1" outputId="f5b75bbf-a3a3-4ea0-95ea-7c0d72543f1d" colab={"base_uri": "https://localhost:8080/", "height": 69}
X_sym.T == X_sym
# + [markdown] id="QZFoUFkq6SM2"
# **Return to slides here.**
# + [markdown] id="Mq_c3ftZ6SM2"
# ### Identity Matrices
# + id="KVSNbH-Z6SM2" outputId="7c514f4a-569d-4eaf-93fd-65b7d17e89b4" colab={"base_uri": "https://localhost:8080/", "height": 69}
I = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
I
# + id="wcoPDhvR6SM3" outputId="b1fd7e3d-2936-4a57-cf0c-3b28c797166c" colab={"base_uri": "https://localhost:8080/", "height": 35}
x_pt = torch.tensor([25, 2, 5])
x_pt
# + id="tuA4RsMv6SM4" outputId="f427e618-f184-4a2b-8f46-ae568e61b550" colab={"base_uri": "https://localhost:8080/", "height": 35}
torch.matmul(I, x_pt)
# + [markdown] id="bgDiOYLk6SM5"
# **Return to slides here.**
# + [markdown] id="3S_6Yfdkcgv7"
# ### Answers to Matrix Multiplication Qs
# + id="pINsKNxH6SNC" outputId="d28d530a-e1bf-4c41-945d-4131b2a0b1f1" colab={"base_uri": "https://localhost:8080/", "height": 69}
M_q = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
M_q
# + id="gfjWd8OO6SNE" outputId="1957427d-cd41-4aed-99e9-7e4700cd4385" colab={"base_uri": "https://localhost:8080/", "height": 69}
V_q = torch.tensor([[-1, 1, -2], [0, 1, 2]]).T
V_q
# + id="boSkaV2M6SNF" outputId="048362f1-00a3-4643-fffc-5030f8dfca11" colab={"base_uri": "https://localhost:8080/", "height": 69}
torch.matmul(M_q, V_q)
# + [markdown] id="slSNKUcN6SNG"
# ### Matrix Inversion
# + id="EW0i5ZRk6SNG" outputId="fe9eee64-a7d9-425f-de24-ca2cc2bd967e" colab={"base_uri": "https://localhost:8080/", "height": 52}
X = np.array([[4, 2], [-5, -3]])
X
# + id="hTYpxaWR6SNI" outputId="7735d415-c3de-4f62-a3e1-4222ae14f23e" colab={"base_uri": "https://localhost:8080/", "height": 52}
Xinv = np.linalg.inv(X)
Xinv
# + id="Q5sQqFaz6SNK" outputId="5528e276-f35b-4771-f21b-fd45031372b3" colab={"base_uri": "https://localhost:8080/", "height": 35}
y = np.array([4, -7])
y
# + id="PK7m6F1I6SNL" outputId="750ef1a3-5388-4a23-dd66-551f069bf60f" colab={"base_uri": "https://localhost:8080/", "height": 35}
w = np.dot(Xinv, y)
w
# + [markdown] id="fyBOHgdccgwD"
# Show that $y = Xw$:
# + id="SVBojjwacgwD" outputId="82f96043-b6d9-442f-934f-954cf4ee1021" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.dot(X, w)
# + [markdown] id="14uhePTna7ZV"
# In PyTorch and TensorFlow:
# + id="7qo-SAvaansp" outputId="7f9f8bc5-7274-4e0d-8cff-424fdfe55098" colab={"base_uri": "https://localhost:8080/", "height": 52}
torch.inverse(torch.tensor([[4, 2], [-5, -3.]])) # float type
# + id="uqtyF3Jqaz4l" outputId="d29a0288-631d-4a92-afde-7459874ec735" colab={"base_uri": "https://localhost:8080/", "height": 69}
tf.linalg.inv(tf.Variable([[4, 2], [-5, -3.]])) # also float
# + [markdown] id="AMxROg326SNN"
# **Return to slides here.**
# + [markdown] id="N8ZxpgcN6SNO"
# ### Matrix Inversion Where No Solution
# + id="RYORHY4E6SNO" outputId="da133687-ddc4-46d2-c228-65998b054ada" colab={"base_uri": "https://localhost:8080/", "height": 52}
X = np.array([[-4, 1], [-8, 2]])
X
# + id="o7GcISdI6SNP"
# Uncommenting the following line results in a "singular matrix" error
# Xinv = np.linalg.inv(X)
# + [markdown] id="uk485dwoI021"
# Feel free to try inverting a non-square matrix; this will throw an error too.
|
notebooks/1-intro-to-linear-algebra.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="bqzDofBT-Bmp"
# Different success indexes' calculation for classification algorithm/ <NAME> (206760274) & <NAME> (208774026)
#
# ---
#
#
#
# The data "digits" has 10 classes, 10 digits and 64 characters.
#
# https://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_digits.html
#
# https://www.datacamp.com/community/tutorials/understanding-logistic-regression-python
# + id="mVz47xT4989z" executionInfo={"status": "ok", "timestamp": 1631964595499, "user_tz": -180, "elapsed": 1448, "user": {"displayName": "\u05e2\u05e0\u05d1\u05e8 \u05e9\u05de\u05d9\u05d9\u05d4", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "02964396275905919772"}}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from google.colab import drive
from sklearn.datasets import load_digits
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
import seaborn as sn
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
# + [markdown] id="F8V2mpvPRsHU"
# Bring the data from sklearn:
# + colab={"base_uri": "https://localhost:8080/"} id="oMhHOxvJRsty" executionInfo={"status": "ok", "timestamp": 1631964741097, "user_tz": -180, "elapsed": 261, "user": {"displayName": "\u05e2\u05e0\u05d1\u05e8 \u05e9\u05de\u05d9\u05d9\u05d4", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "02964396275905919772"}} outputId="5371ea20-4277-4077-b977-8e41c3213a93"
data, labels = load_digits(return_X_y = True)
(n_samples, n_features), n_digits = data.shape, np.unique(labels).size
print(f"# digits: {n_digits}; # samples: {n_samples}; # features {n_features}")
#n_samples, n_features =
print(labels.shape)
print(data.shape)
# + [markdown] id="rv5AuT6J_VAV"
# Here we chose 2 classes and separated them by logic_regression:
# + id="mQt9IMgG_bk-" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627758068430, "user_tz": -180, "elapsed": 394, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhAAlScA5LNx4a_8K3ad_Q_ugHl3nMQR0bCFHq7=s64", "userId": "14654848345289467053"}} outputId="9f589b53-6840-43f9-b062-8145c5dd7338"
data, labels = load_digits(return_X_y = True, n_class = 2)
(n_samples, n_features), n_digits = data.shape, np.unique(labels).size
# Normalize the data
scaler = StandardScaler()
normalized_data = scaler.fit_transform(data)
# Separate the data into 2 groups: train and test
header_x = data[0]
X_scaled_df = pd.DataFrame(normalized_data, columns = header_x)
X_train, X_test, y_train, y_test = train_test_split(X_scaled_df, labels, test_size = 0.25, random_state = 0)
logistic_regression = LogisticRegression()
logistic_regression.fit(X_train, y_train)
# + [markdown] id="2ZlfrDOn_cU9"
# Printing the confusion matrix:
# + id="XGyP0cm5_nxT" colab={"base_uri": "https://localhost:8080/", "height": 296} executionInfo={"status": "ok", "timestamp": 1627758068431, "user_tz": -180, "elapsed": 18, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhAAlScA5LNx4a_8K3ad_Q_ugHl3nMQR0bCFHq7=s64", "userId": "14654848345289467053"}} outputId="8bbe41ef-703b-420f-c405-df4e9a35eed6"
y_pred = logistic_regression.predict(X_test)
confusion_matrix = pd.crosstab(y_test, y_pred, rownames=['Actual'], colnames=['Predicted'])
# print(y_test)
# print(y_pred)
sn.heatmap(confusion_matrix, annot=True)
print('Accuracy: ', metrics.accuracy_score(y_test, y_pred))
plt.show()
# + [markdown] id="OXbKP-Q6_2QG"
# For different thresholds, calculate the TPR and the FPR (Draw the graphs of the ROC (Receiver Operating Characteristic), random and model with different thresholds):
# + id="unOXYk5DAFD0" colab={"base_uri": "https://localhost:8080/", "height": 332} executionInfo={"status": "ok", "timestamp": 1627758069075, "user_tz": -180, "elapsed": 652, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhAAlScA5LNx4a_8K3ad_Q_ugHl3nMQR0bCFHq7=s64", "userId": "14654848345289467053"}} outputId="94517781-8889-450e-f3b4-a656a4ac2b27"
thresholds = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
TP = confusion_matrix[1][1] # TP
FN = confusion_matrix[1][0] # FN
FP = confusion_matrix[0][1] # FP
TN = confusion_matrix[0][0] # TN
TPR = TP / (TP + FN)
FPR = FP / (FP + TN)
print(TPR)
print(FPR)
# Draw the graph for the current TPR & FPR points: ROC
metrics.plot_roc_curve(logistic_regression, X_train, y_train)
# We tried to understand diffrenet threashols...
# Calculate additional 10 points for each threshold from the relevant array, and draw the graph they create ON THE SAME AXIS SYSTEM
# y_pred = logistic_regression.predict_proba(X_test)
# for thresh in range(len(thresholds)):
# # temp array of binary in y_pred length
# binary_pred = [None] * len(y_pred)
# for pred in range(len(y_pred)):
# # print(y_pred[pred])
# if y_pred[pred][0] > thresholds[thresh]:
# binary_pred[pred] = 1
# else:
# binary_pred[pred] = 0
# # confusion matrix for binary array
# confusion_matrix = pd.crosstab(y_test, binary_pred, rownames=['Actual'], colnames=['Predicted'])
# x=fpr y=tpr for cm for each tresh
# save in array and then figure this array
# figure all the dots
# + [markdown] id="TDXzU8xdAKyn"
# Calculate the AUC (Area Under Curve):
# + id="KfoSMAx7AUpl" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1627758069076, "user_tz": -180, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhAAlScA5LNx4a_8K3ad_Q_ugHl3nMQR0bCFHq7=s64", "userId": "14654848345289467053"}} outputId="ad47e5c8-9b75-4ba8-a8ac-8207508387ee"
# Calculate an integral for the graph ROC model (the one with the curve) and print the result
metrics.roc_auc_score(y_test,y_pred)
|
Classification Algorithms/Classification Algorithms.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from thermo import *
from thermo.identifiers import ChemicalMetadataDB
from numpy.testing import assert_allclose
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem.rdMolDescriptors import CalcMolFormula
db = ChemicalMetadataDB(elements=False, main_db=('anion_db_20171029.tsv'),
user_dbs=[]) # user_dbs=['/home/caleb/Documents/University/CHE3123/thermo/thermo/Identifiers/Cation db.tsv']
def formula_to_charge(formula):
splits = formula.split('-')
if len(splits) == 1 or splits[1] == '':
return -1
else:
return -1*int(splits[1])
# [(i.formula, formula_to_charge(i.formula)) for i in db.CAS_index.values()]
def formula_variations_ion(formula, charge):
formula = formula.split('-')[0]
formulas = [formula+'-'*abs(charge),
formula+str(charge),
formula+'('+'-'+ str(abs(charge)) + ')',
formula+'('+ str(abs(charge))+'-' + ')',
formula+'('+ '-'*abs(charge) + ')']
return formulas
# formula_variations_ion('AgH2O2-', -1)
# 259 in that book vs 339 here.
len(db.CAS_index)
# -
data = {}
with open('Original metadata.csv') as f:
f.readline()
for line in f.readlines():
if len(line.split('\t')) == 6:
name, name2, CAS, formula, charge, MW = line.split('\t')
else:
name, name2, CAS, formula, charge = line.split('\t')
MW = 0
data[CAS] = {'Name': name, 'Name2': name2, 'formula': formula, 'charge':int(charge), 'MW': float(MW)}
# data
# +
good_syns = {CAS:{'synonyms': []} for CAS, d in data.items()}
for CAS, d in data.items():
if d['MW']:
good_syns[CAS]['synonyms'].append(d['Name2'])
for CAS, d in data.items():
good_syns[CAS]['synonyms'].extend(formula_variations_ion(d['formula'], d['charge']))
for CAS, d in db.CAS_index.items():
CAS = d.CASs
syns = formula_variations_ion(d.formula, formula_to_charge(d.formula))
if CAS in good_syns:
syns = [i for i in syns if i not in good_syns[CAS]['synonyms']]
good_syns[CAS]['synonyms'].extend(syns)
else:
good_syns[CAS] = {}
good_syns[CAS]['synonyms'] = syns
good_syns['338-70-5']['synonyms'].append('oxalate')
# good_syns['18500-32-8']['synonyms'].append('H2NNH3+')
# 'H2P2O7-2 is in there as P2O7H2-2 - look into unique searching by formula? Should be possible.
# 34175-11-6 'HS2O4-', as HO4S2-
# 14102-45-5 is 'H2AsO3-',
# HP2O7-3 as well
# 44030-61-7 is 'HPO3F-', here's a good case to the search
# OCN NCO- 661-20-1
# 920-52-5 'C2O4H-' already done
['CH3COO-', 'CHOO-']
ClO2m = {'formula': 'ClO2-', 'MW': 67.448, 'pubchem': 197148, 'smiles': '[O-]Cl=O',
'inchikey': 'QBWCMBCROVPCKQ-UHFFFAOYSA-M', 'inchi': 'InChI=1S/ClHO2/c2-1-3/h(H,2,3)/p-1'}
# Get the rest from the api
# Key problem - if there's no mol downloaded, there's a no go.
ClO3m = {'formula': 'ClO3-', 'pubchem': 104770}
Br3m = {'pubchem': 77881}
BrO3m = {'pubchem': 84979}
AlOm = {'smiles': '[Al-].[O]', 'MW': 42.981, 'formula': 'AlO-'} # No inchi
BrO4m = {'pubchem': 5460630}
IO4m = {'pubchem': 167232}
B4O7m = {'pubchem': 91932232}
HF2m = {'pubchem': 21864337}
SiF6m = {'pubchem': 28117}
B4O7m = {'pubchem': 91932232}
N2O2m = {'pubchem': 4686309}
SbF6m = {'pubchem': 3868826}
IO3m = {'pubchem': 84927}
AgO2H2m = {'smiles': '[Ag+1].[OH-].[OH-]', 'formula': 'H2AgO2-1', 'MW': 124.8752}
CrO4H4m = {'smiles': '[OH-].[OH-].[OH-].[OH-].[Cr+3]', 'formula': 'CrH4O4-', 'MW': 120.024} # read wrong no pubchem
TiO5H5 = {'smiles': '[OH-].[OH-].[OH-].[OH-].[Ti+4]', 'MW': 132.902, 'formula': 'TiH5O5-'} # read wrong no pubchem
FeO4H4m3 = {'smiles': '[OH-].[OH-].[OH-].[OH-].[Fe+1]', 'formula': 'FeH4O4-3', 'MW': 123.873} # read wrong no pubchem
FeO4H4m = {'smiles': '[OH-].[OH-].[OH-].[OH-].[Fe+3]', 'formula': 'FeH4O4-', 'MW': 123.873} # read wrong no pubchem
FeO4H4m2 = {'smiles': '[OH-].[OH-].[OH-].[OH-].[Fe+2]', 'formula': 'FeH4O4-2', 'MW': 123.873} # read wrong no pubchem
FeO3H3m = {'smiles': '[OH-].[OH-].[OH-].[Fe+2]', 'formula': 'FeH3O3-', 'MW': 106.866}
FeOH2m = {'formula': 'FeH2O-', 'MW': 73.86028, 'smiles': '[H-].[OH-].[Fe+1]'}
HB4O7m = {'formula': 'HB4O7-', 'MW': 156.24774} # no smiles available?
H2P2m = {'formula': 'H2P2-', 'MW': molecular_weight(nested_formula_parser('H2P2-'))}
custom_ions = {'14998-27-7': ClO2m, '14866-68-3': ClO3m, '14522-80-6': Br3m, '15541-45-4': BrO3m,
'12758-12-2': AlOm, '16474-32-1': BrO4m, '15056-35-6': IO4m, '12258-53-6': B4O7m,
'18130-74-0': HF2m, '17084-08-1': SiF6m, '12258-53-6': B4O7m, '15435-66-2': N2O2m,
'17111-95-4': SbF6m, '15454-31-6': IO3m, '12258-16-1': AgO2H2m, '97775-49-0':CrO4H4m,
'119046-04-7': TiO5H5, '29145-79-7': FeO4H4m, '150393-25-2': FeO4H4m3,
'73128-36-6': FeO4H4m2, '70756-39-7': FeO3H3m, '150381-43-4':FeOH2m,
'12447-33-5': HB4O7m, '107596-48-5': H2P2m }
# 15454-31-6
for CAS, d in custom_ions.items():
if CAS in good_syns:
good_syns[CAS].update(d)
else:
good_syns[CAS] = d
import json
f = open('Good synoynms by CAS2.json', 'w')
json.dump(good_syns, f, indent=2, separators=(',', ': '), sort_keys=True)
f.close()
# -
from collections import Counter
ns = []
for i in db.CAS_index.values():
ns.extend(list(set(i.all_names)))
Counter(ns).most_common(20)
# No dup names :)
# len(ns)
# len(a.CAS_index), len(a.pubchem_index)
# TODO oxalate goes to the one without Hs, 71081 338-70-5 C2O4-2
# None of the charges are wrong?
for CAS, d in data.items():
chem = db.search_CAS(CAS)
if not chem:
continue
print('NOTINDB', CAS)
continue
mol = Chem.MolFromSmiles(chem.smiles)
if mol is None:
continue
print('CANTREADMOL', CAS)
continue
# print(Chem.MolToSmiles(mol))
charge = Chem.GetFormalCharge(mol)
try:
assert charge == d['charge']
# print('PASS', charge, d['charge'])
except:
print('F:', charge, d['charge'], CAS)
# +
# Chem.GetFormalCharge(Chem.MolFromSmiles('[SbH6+3]'))
# a.search_CAS('16971-29-2').InChI, a.search_CAS('16971-29-2').formula, a.search_CAS('16971-29-2').smiles
# -
# +
# mol = Chem.MolFromMolFile('mol/14695-95-5.mol')
# mol = Chem.MolFromMolFile('/tmp/399316.mol')
# # # mol = Chem.MolFromSmiles('[Sb+3]')
# # # When read, 1 atom
# # Chem.MolToSmiles(mol, allHsExplicit=True)
# # mol.GetNumAtoms()
# mw = Descriptors.MolWt(mol)
# formula = CalcMolFormula(mol)
# mw, formula
# +
# Most of the MW ones fail due to having added extra hydrogens???? OR MW?
for CAS, d in data.items():
chem = db.search_CAS(CAS)
if not chem or d['MW'] == 0:
continue
try:
assert_allclose(chem.MW, d['MW'], atol=0.3)
except:
print('F:', CAS, chem.MW, d['MW'], chem)
# Plenty of outstanding work here and with the charges.
# 20561-39-1 astatine mos stable.
# 16518-47-1 Arsenate (AsO43-), dihydrogen (8CI,9CI), must be a typo - clearly weight is higher.
# 14493-01-7 is fine, confirmed.
# 19469-81-9 H4Al is simply wrong.
# 14897-04-2 is fine
# 34786-97-5 is fine, Vanadate (V(OH)2O21-), their error
# 14333-20-1 Pertechnetate is fine
# 16844-87-4 Arsenate (AsO43-), monohydrogen is fine
# 26450-38-4 their typo for sure
# 15390-83-7 is fine for sure, their bad
# -
|
scifinder-anions/.ipynb_checkpoints/Compare db anions-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="uRLPr0TnIAHO"
BRANCH = 'v1.0.0b2'
# + id="o_0K1lsW1dj9"
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell
# install NeMo
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]
# + id="dzqD2WDFOIN-"
from nemo.utils.exp_manager import exp_manager
from nemo.collections import nlp as nemo_nlp
import os
import wget
import torch
import pytorch_lightning as pl
from omegaconf import OmegaConf
# + [markdown] id="daYw_Xll2ZR9"
# # Task Description
# Given a question and a context both in natural language, predict the span within the context with a start and end position which indicates the answer to the question.
# For every word in our training dataset we’re going to predict:
# - likelihood this word is the start of the span
# - likelihood this word is the end of the span
#
# We are using a pretrained [BERT](https://arxiv.org/pdf/1810.04805.pdf) encoder with 2 span prediction heads for prediction start and end position of the answer. The span predictions are token classifiers consisting of a single linear layer.
# + [markdown] id="ZnuziSwJ1yEB"
# # Dataset
# This model expects the dataset to be in [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) format, e.g. a JSON file for each dataset split.
# In the following we will show example for a training file. Each title has one or multiple paragraph entries, each consisting of the text - "context", and question-answer entries. Each question-answer entry has:
# * a question
# * a globally unique id
# * a boolean flag "is_impossible" which shows if the question is answerable or not
# * in case the question is answerable one answer entry, which contains the text span and its starting character index in the context. If not answerable, the "answers" list is empty
#
# The evaluation files (for validation and testing) follow the above format except for it can provide more than one answer to the same question.
# The inference file follows the above format except for it does not require the "answers" and "is_impossible" keywords.
#
# + [markdown] id="TXFORGBv2Jqu"
#
#
# ```
# {
# "data": [
# {
# "title": "Super_Bowl_50",
# "paragraphs": [
# {
# "context": "Super Bowl 50 was an American football game to determine the champion of the National Football League (NFL) for the 2015 season. The American Football Conference (AFC) champion Denver Broncos defeated the National Football Conference (NFC) champion Carolina Panthers 24\u201310 to earn their third Super Bowl title. The game was played on February 7, 2016, at Levi's Stadium in the San Francisco Bay Area at Santa Clara, California. As this was the 50th Super Bowl, the league emphasized the \"golden anniversary\" with various gold-themed initiatives, as well as temporarily suspending the tradition of naming each Super Bowl game with Roman numerals (under which the game would have been known as \"Super Bowl L\"), so that the logo could prominently feature the Arabic numerals 50.",
# "qas": [
# {
# "question": "Where did Super Bowl 50 take place?",
# "is_impossible": "false",
# "id": "56be4db0acb8001400a502ee",
# "answers": [
# {
# "answer_start": "403",
# "text": "Santa Clara, California"
# }
# ]
# },
# {
# "question": "What was the winning score of the Super Bowl 50?",
# "is_impossible": "true",
# "id": "56be4db0acb8001400a502ez",
# "answers": [
# ]
# }
# ]
# }
# ]
# }
# ]
# }
# ...
# ```
#
#
# + [markdown] id="SL58EWkd2ZVb"
# ## Download the data
# + [markdown] id="THi6s1Qx2G1k"
# In this notebook we are going download the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) dataset to showcase how to do training and inference. There are two datasets, SQuAD1.0 and SQuAD2.0. SQuAD 1.1, the previous version of the SQuAD dataset, contains 100,000+ question-answer pairs on 500+ articles. SQuAD2.0 dataset combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones.
#
#
# To download both datasets, we use [NeMo/examples/nlp/question_answering/get_squad.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/question_answering/get_squad.py).
#
#
#
# + id="tv3qXTTR_hBk"
# set the following paths
DATA_DIR = "PATH_TO_DATA"
WORK_DIR = "PATH_TO_CHECKPOINTS_AND_LOGS"
# + id="qcz3Djem_hBn"
## download get_squad.py script to download and preprocess the SQuAD data
os.makedirs(WORK_DIR, exist_ok=True)
if not os.path.exists(WORK_DIR + '/get_squad.py'):
print('Downloading get_squad.py...')
wget.download('https://raw.githubusercontent.com/NVIDIA/NeMo/v1.0.0b2/examples/nlp/question_answering/get_squad.py', WORK_DIR)
else:
print ('get_squad.py already exists')
# + id="mpzsC41t_hBq"
# download and preprocess the data
# ! python $WORK_DIR/get_squad.py --destDir $DATA_DIR
# + [markdown] id="m_HLLl6t_hBs"
# after execution of the above cell, your data folder will contain a subfolder "squad" the following 4 files for training and evaluation
# - v1.1/train-v1.1.json
# - v1.1/dev-v1.1.json
# - v2.0/train-v2.0.json
# - v2.0/dev-v2.0.json
# + id="qYHcfxPL_hBt"
# ! ls -LR {DATA_DIR}/squad
# + [markdown] id="bdpikZVreLlI"
# ## Data preprocessing
#
# The input into the model is the concatenation of two tokenized sequences:
# " [CLS] query [SEP] context [SEP]".
# This is the tokenization used for BERT, i.e. [WordPiece](https://arxiv.org/pdf/1609.08144.pdf) Tokenizer, which uses the [Google's BERT vocabulary](https://github.com/google-research/bert). This tokenizer is configured with `model.tokenizer.tokenizer_name=bert-base-uncased` and is automatically instantiated using [Huggingface](https://huggingface.co/)'s API.
# The benefit of this tokenizer is that this is compatible with a pretrained BERT model, from which we can finetune instead of training the question answering model from scratch. However, we also support other tokenizers, such as `model.tokenizer.tokenizer_name=sentencepiece`. Unlike the BERT WordPiece tokenizer, the [SentencePiece](https://github.com/google/sentencepiece) tokenizer model needs to be first created from a text file.
# See [02_NLP_Tokenizers.ipynb](https://colab.research.google.com/github/NVIDIA/NeMo/blob/main/tutorials/nlp/02_NLP_Tokenizers.ipynb) for more details on how to use NeMo Tokenizers.
# + [markdown] id="0q7Y7nyW_hBv"
# # Data and Model Parameters
#
# + [markdown] id="B0b0Tn8M_hBv"
# Note, this is only an example to showcase usage and is not optimized for accuracy. In the following, we will download and adjust the model configuration to create a toy example, where we only use a small fraction of the original dataset.
#
# In order to train the full SQuAD model, leave the model parameters from the configuration file unchanged. This sets NUM_SAMPLES=-1 to use the entire dataset, which will slow down performance significantly. We recommend to use bash script and multi-GPU to accelerate this.
#
# + id="n8HZrDmr12_-"
# This is the model configuration file that we will download, do not change this
MODEL_CONFIG = "question_answering_squad_config.yaml"
# model parameters, play with these
BATCH_SIZE = 12
MAX_SEQ_LENGTH = 384
# specify BERT-like model, you want to use
PRETRAINED_BERT_MODEL = "bert-base-uncased"
TOKENIZER_NAME = "bert-base-uncased" # tokenizer name
# Number of data examples used for training, validation, test and inference
TRAIN_NUM_SAMPLES = VAL_NUM_SAMPLES = TEST_NUM_SAMPLES = 5000
INFER_NUM_SAMPLES = 5
TRAIN_FILE = f"{DATA_DIR}/squad/v1.1/train-v1.1.json"
VAL_FILE = f"{DATA_DIR}/squad/v1.1/dev-v1.1.json"
TEST_FILE = f"{DATA_DIR}/squad/v1.1/dev-v1.1.json"
INFER_FILE = f"{DATA_DIR}/squad/v1.1/dev-v1.1.json"
INFER_PREDICTION_OUTPUT_FILE = "output_prediction.json"
INFER_NBEST_OUTPUT_FILE = "output_nbest.json"
# training parameters
LEARNING_RATE = 0.00003
# number of epochs
MAX_EPOCHS = 1
# + [markdown] id="daludzzL2Jba"
# # Model Configuration
# + [markdown] id="_whKCxfTMo6Y"
# The model is defined in a config file which declares multiple important sections. They are:
# - **model**: All arguments that will relate to the Model - language model, span prediction, optimizer and schedulers, datasets and any other related information
#
# - **trainer**: Any argument to be passed to PyTorch Lightning
# + id="T1gA8PsJ13MJ"
# download the model's default configuration file
config_dir = WORK_DIR + '/configs/'
os.makedirs(config_dir, exist_ok=True)
if not os.path.exists(config_dir + MODEL_CONFIG):
print('Downloading config file...')
wget.download('https://raw.githubusercontent.com/NVIDIA/NeMo/v1.0.0b2/examples/nlp/question_answering/conf/{MODEL_CONFIG}', config_dir)
else:
print ('config file is already exists')
# + id="mX3KmWMvSUQw"
# this line will print the entire default config of the model
config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}'
print(config_path)
config = OmegaConf.load(config_path)
print(OmegaConf.to_yaml(config))
# + [markdown] id="ZCgWzNBkaQLZ"
# ## Setting up data within the config
#
# Among other things, the config file contains dictionaries called dataset, train_ds and validation_ds, test_ds. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.
#
# Specify data paths using `model.train_ds.file`, `model.valuation_ds.file` and `model.test_ds.file`.
#
# Let's now add the data paths to the config.
# + id="LQHCJN-ZaoLp"
config.model.train_ds.file = TRAIN_FILE
config.model.validation_ds.file = VAL_FILE
config.model.test_ds.file = TEST_FILE
config.model.train_ds.num_samples = TRAIN_NUM_SAMPLES
config.model.validation_ds.num_samples = VAL_NUM_SAMPLES
config.model.test_ds.num_samples = TEST_NUM_SAMPLES
config.model.tokenizer.tokenizer_name = TOKENIZER_NAME
# + [markdown] id="nB96-3sTc3yk"
# # Building the PyTorch Lightning Trainer
#
# NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem!
#
# Let's first instantiate a Trainer object!
# + id="knF6QeQQdMrH"
# lets modify some trainer configs
# checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
config.trainer.gpus = cuda
config.trainer.precision = 16 if torch.cuda.is_available() else 32
# For mixed precision training, use precision=16 and amp_level=O1
config.trainer.max_epochs = MAX_EPOCHS
# Remove distributed training flags if only running on a single GPU or CPU
config.trainer.accelerator = None
print("Trainer config - \n")
print(OmegaConf.to_yaml(config.trainer))
trainer = pl.Trainer(**config.trainer)
# + [markdown] id="8IlEMdVxdr6p"
# # Setting up a NeMo Experiment¶
#
# NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it!
# + id="8uztqGAmdrYt"
config.exp_manager.exp_dir = WORK_DIR
exp_dir = exp_manager(trainer, config.get("exp_manager", None))
# the exp_dir provides a path to the current experiment for easy access
exp_dir = str(exp_dir)
# + [markdown] id="D4jy28fbjekD"
# # Using an Out-Of-Box Model
# + id="Ins2ZzJckKKo"
# list available pretrained models
nemo_nlp.models.QAModel.list_available_models()
# + id="iFnzHvkVk-S5"
# load pretained model
pretrained_model_name="BERTBaseUncasedSQuADv1.1"
model = nemo_nlp.models.QAModel.from_pretrained(model_name='BERTBaseUncasedSQuADv1.1')
# + [markdown] id="6FI_nQsJo_11"
# # Model Training
# + [markdown] id="8tjLhUvL_o7_"
# Before initializing the model, we might want to modify some of the model configs.
# + id="Xeuc2i7Y_nP5"
# complete list of supported BERT-like models
nemo_nlp.modules.get_pretrained_lm_models_list()
# + id="RK2xglXyAUOO"
# add the specified above model parameters to the config
config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL
config.model.train_ds.batch_size = BATCH_SIZE
config.model.validation_ds.batch_size = BATCH_SIZE
config.model.test_ds.batch_size = BATCH_SIZE
config.model.optim.lr = LEARNING_RATE
print("Updated model config - \n")
print(OmegaConf.to_yaml(config.model))
# + id="NgsGLydWo-6-"
# initialize the model
# dataset we'll be prepared for training and evaluation during
model = nemo_nlp.models.QAModel(cfg=config.model, trainer=trainer)
# + [markdown] id="kQ592Tx4pzyB"
# ## Monitoring Training Progress
# Optionally, you can create a Tensorboard visualization to monitor training progress.
# + id="mTJr16_pp0aS"
try:
from google import colab
COLAB_ENV = True
except (ImportError, ModuleNotFoundError):
COLAB_ENV = False
# Load the TensorBoard notebook extension
if COLAB_ENV:
# %load_ext tensorboard
# %tensorboard --logdir {exp_dir}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
# + id="hUvnSpyjp0Dh"
# start the training
trainer.fit(model)
# + [markdown] id="JxBiIKMlH8yv"
# After training for 1 epochs, exact match on the evaluation data should be around 59.2%, F1 around 70.2%.
# + [markdown] id="ynCLBmAWFVsM"
# # Evaluation
#
# To see how the model performs, let’s run evaluation on the test dataset.
# + id="XBMCoXAKFtSd"
model.setup_test_data(test_data_config=config.model.test_ds)
trainer.test(model)
# + [markdown] id="VPdzJVAgSFaJ"
# # Inference
#
# To use the model for creating predictions, let’s run inference on the unlabeled inference dataset.
# + id="DQhsamclRtxJ"
# # store test prediction under the experiment output folder
output_prediction_file = f"{exp_dir}/{INFER_PREDICTION_OUTPUT_FILE}"
output_nbest_file = f"{exp_dir}/{INFER_NBEST_OUTPUT_FILE}"
all_preds, all_nbests = model.inference(file=INFER_FILE, batch_size=5, num_sample=INFER_NUM_SAMPLES, output_nbest_file=output_nbest_file, output_prediction_file=output_prediction_file)
# + id="sQpRIOaM_hCQ"
for question_id, answer in all_preds.items():
if answer != "empty":
print(f"Question ID: {question_id}, answer: {answer}")
#The prediction file contains the predicted answer to each question id for the first TEST_NUM_SAMPLES.
# ! python -m json.tool $WORK_DIR/${exp_dir}/$INFER_PREDICTION_OUTPUT_FILE
# + [markdown] id="ref1qSonGNhP"
# If you have NeMo installed locally, you can also train the model with
# [NeMo/examples/nlp/question_answering/get_squad.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/question_answering/question_answering_squad.py).
#
# To run training script, use:
#
# `python question_answering_squad.py model.train_ds.file=TRAIN_FILE model.validation_ds.file=VAL_FILE model.test_ds.file=TEST_FILE`
#
# To improve the performance of the model, train with multi-GPU and a global batch size of 24. So if you use 8 GPUs with `trainer.gpus=8`, set `model.train_ds.batch_size=3`
|
tutorials/nlp/Question_Answering_Squad.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Challenge Part 1: Get weather description and precipitation for each city
#Import dependencies
from citipy import citipy
import numpy as np
import pandas as pd
# +
# Create a random set of latitudes and longitudes (lats, lngs)
lats = np.random.uniform(low = -90.000, high = 90.000, size=1500)
lngs = np.random.uniform(low = -180.000, high = 180.000, size=1500)
lats_lngs = zip(lats, lngs)
# Store the tuple lats_lngs into a list
coordinates = list(lats_lngs)
# Initialize empty list to store city names
cities = []
# Use city.nearest_city to get the name of the nearest city for each latitude and longitude
for coordinate in coordinates:
city = citipy.nearest_city(coordinate[0], coordinate[1]).city_name
# Check that the city name unique, if true add it to the cities list
if city not in cities:
cities.append(city)
len(cities)
# -
# Import requests, datetime, weather_api_key
import requests
from datetime import datetime
from config import weather_api_key
# Create base url for request
url = f'http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID={weather_api_key}'
# +
# Initialize list to store city weather data
city_data = []
# Print beginning of logging
print('Begging Data Retrieval ')
print('---------------------------')
# Create counters
record_count = 1
set_count = 1
# Loop through all cities in cities list
for i, city in enumerate(cities):
# Group cities in sets of 50
if (i % 50 == 0 and i >= 50):
set_count += 1
record_count = 1
# Create endpoint url
city_url = url + "&q=" + city.replace(" ","+")
# Log the record number, set number, and city
print(f'Recording {record_count} of {set_count}|{city}')
record_count += 1
# Run API request for each city
try:
# Parse the JSON and retrieve the data
city_weather = requests.get(city_url).json()
# Parse out the needed data.
city_lat = city_weather["coord"]["lat"]
city_lng = city_weather["coord"]["lon"]
city_max_temp = city_weather["main"]["temp_max"]
city_humidity = city_weather["main"]["humidity"]
city_clouds = city_weather["clouds"]["all"]
city_wind = city_weather["wind"]["speed"]
city_country = city_weather["sys"]["country"]
city_weather_description = city_weather["weather"][0]["description"]
# try to get the rainfall and snowfall in the last hour, insert 0 if none
try:
city_rain = city_weather["rain"]["1h"]
except KeyError:
city_rain = 0
try:
city_snow = city_weather["snow"]["1h"]
except KeyError:
city_snow = 0
# Convert the date to ISO standard.
# city_date = datetime.utcfromtimestamp(city_weather["dt"]).strftime('%Y-%m-%d %H:%M:%S')
# Append the city information as a dictionary into city_data list.
city_data.append({"City": city.title(),
"Country": city_country,
"Lat": city_lat,
"Lng": city_lng,
"Max Temp": city_max_temp,
"Humidity": city_humidity,
"Cloudiness": city_clouds,
"Wind Speed": city_wind,
"Description": city_weather_description,
"Rain (inches)": city_rain,
"Snow (inches)": city_snow})
# Skip city on any error
except:
print('City not found, skipping...')
pass
# Print completion statement
print('----------------------------')
print('Data Retrieval Complete ')
print('----------------------------')
len(city_data)
# +
# store results as a dataframe
city_data_df = pd.DataFrame(city_data)
# Save the data frame to a csv file
city_data_df.to_csv('data\WeatherPy_database.csv', index_label='City ID')
# +
# Find the cities where it is raining
raining_cities = city_data_df.loc[city_data_df['Rain (inches)'] > 0]
len(raining_cities)
# Find the cities where it is snowing
snowing_cities = city_data_df.loc[city_data_df['Snow (inches)'] > 0]
len(snowing_cities)
|
Weather_Database.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# #### This notebook analyses the viscosity parameter for WCVI NEMO model.
#
# The runs with constant values of eddy laplacian viscosity are analysed at first and then the model is tested against the Smagorinksi viscosity which varies in our domain based on the relative sizes of the grid cells.
#
#
# #### Smagorinski is used only for lateral diffusion of momentum (smag_coeff = 4) whereas for tracers a constant laplacian diffusivity of 1000 is chosen and an eddy induced velocity coeffecient of 50
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import netCDF4 as nc
import cmocean.cm as cm
from salishsea_tools import (
viz_tools,
nc_tools,
)
from IPython.display import display, Math, Latex
# %matplotlib inline
# ### Question: When we are using a constant eddy viscosity where do the veloctites go to high values and what numbers do we get for the max velocity?
#
# To this extent we will analyse the 3 reduced viscosity runs (reduced from 1000)
# +
visc_500_T = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_500_30_day_run/WCVI_1h_20160402_20160509_grid_T.nc')
visc_500_U = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_500_30_day_run/WCVI_1h_20160402_20160509_grid_U.nc')
visc_500_V = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_500_30_day_run/WCVI_1h_20160402_20160509_grid_V.nc')
visc_225_T = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_225_30_day_run/WCVI_1h_20160402_20160509_grid_T.nc')
visc_225_U = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_225_30_day_run/WCVI_1h_20160402_20160509_grid_U.nc')
visc_225_V = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_225_30_day_run/WCVI_1h_20160402_20160509_grid_V.nc')
visc_125_crash_T = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_125/WCVI_1h_20160402_20160405_grid_T.nc')
visc_125_crash_U = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_125/WCVI_1h_20160402_20160405_grid_U.nc')
visc_125_crash_V = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_125/WCVI_1h_20160402_20160405_grid_V.nc')
# +
U_vel_500 = visc_500_U.variables['vozocrtx'][:];
V_vel_500 = visc_500_V.variables['vomecrty'][:];
u_unstgg_500, v_unstagg_500 = viz_tools.unstagger(U_vel_500, V_vel_500)
speed_500 = np.sqrt(np.square(u_unstgg_500) + np.square(u_unstgg_500))
print(np.max(speed_500),np.min(speed_500))
(t,z,y,x) = np.where(speed_500 == np.max(speed_500))
print(t,z,y,x)
# -
# ### For the 500 run the max velocity (1.33 m/sec) were at the surface at (y,x) = (97,35)
# +
U_vel_225 = visc_225_U.variables['vozocrtx'][:];
V_vel_225 = visc_225_V.variables['vomecrty'][:];
u_unstgg_225, v_unstagg_225 = viz_tools.unstagger(U_vel_225, V_vel_225)
speed_225 = np.sqrt(np.square(u_unstgg_225) + np.square(u_unstgg_225))
print(np.max(speed_225),np.min(speed_225))
(t,z,y,x) = np.where(speed_225 == np.max(speed_225))
print(t,z,y,x)
# -
# ### For the 225 run the max velocity (6.05 m/sec) were at the surface at (y,x) = (97,37)
# +
U_vel_125 = visc_125_crash_U.variables['vozocrtx'][:];
V_vel_125 = visc_125_crash_V.variables['vomecrty'][:];
u_unstgg_125, v_unstagg_125 = viz_tools.unstagger(U_vel_125, V_vel_125)
speed_125 = np.sqrt(np.square(u_unstgg_125) + np.square(u_unstgg_125))
print(np.max(speed_125),np.min(speed_125))
(t,z,y,x) = np.where(speed_125 == np.max(speed_125))
print(t,z,y,x)
# -
# ### For the run crash of viscosity 125, the max velocities go to 13.9753 at the depth level 18 (out of 27) that too at the western boundary (to the top)
visc_smag_T = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_eiv_50_smag_4_30_day_run/WCVI_1h_20160402_20160509_grid_T.nc')
visc_smag_U = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_eiv_50_smag_4_30_day_run/WCVI_1h_20160402_20160509_grid_U.nc')
visc_smag_V = nc.Dataset('/ocean/ssahu/CANYONS/Results/NEMO_trial_viscosity/viscosity_eiv_50_smag_4_30_day_run/WCVI_1h_20160402_20160509_grid_V.nc')
# +
U_vel_smag = visc_smag_U.variables['vozocrtx'][:];
V_vel_smag = visc_smag_V.variables['vomecrty'][:];
u_unstgg_smag, v_unstagg_smag = viz_tools.unstagger(U_vel_smag, V_vel_smag)
speed_smag = np.sqrt(np.square(u_unstgg_smag) + np.square(u_unstgg_smag))
print(np.max(speed_smag),np.min(speed_smag))
(t,z,y,x) = np.where(speed_smag == np.max(speed_smag))
print(t,z,y,x)
# -
# #### The run with smagorinksi viscosity has max horizontal speeds (0.55 m/sec) and it occurs at the surface
|
WCVI_viscosity_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
# -
loaded_keras_model = keras.models.load_model(
'./graph_def_and_weights/fashion_mnist_model.h5')
loaded_keras_model(np.ones((1, 28, 28)))
run_model = tf.function(lambda x : loaded_keras_model(x))
keras_concrete_func = run_model.get_concrete_function(
tf.TensorSpec(loaded_keras_model.inputs[0].shape,
loaded_keras_model.inputs[0].dtype))
keras_concrete_func(tf.constant(np.ones((1, 28, 28), dtype=np.float32)))
|
JupyterNotebookCode/to_concrete_function.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: testenv
# language: python
# name: testenv
# ---
# + [markdown] tags=[]
# # Basic FULMAR usage. Using K2-109 as an example
# This system contains two known exoplanets. For more information, have a look at [Barros et al. 2017](https://www.aanda.org/articles/aa/pdf/2017/12/aa31276-17.pdf)
#
# Let's start by importing common modules
# -
import fulmar
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from transitleastsquares import transit_mask
# ## Define target
lc_targ = fulmar.target('K2-109') # aka. HD 106315
# ### Update stellar parameters if necessary
# +
# K2-109 parameters from Barros et al. 2017 (https://www.aanda.org/articles/aa/pdf/2017/12/aa31276-17.pdf)
lc_targ.R_star = 1.296
lc_targ.R_star_max = 0.058
lc_targ.R_star_min = 0.058
lc_targ.M_star = 1.091
lc_targ.M_star_max = 0.036
lc_targ.M_star_min = 0.036
# -
# ## Build the lightcurve
lc_targ.build_lightcurve(author='EVEREST')
# ### Alternatively, build the lightcurve from files, for example data reduced with POLAR
filelist = [your_files_here]
lc_targ.build_lightcurve(filelist=filelist, author='AUTHOR')
# ### Plot the light curve for a quick visualisation
# +
plt.plot(lc_targ.ts_stitch.time.value, lc_targ.ts_stitch['flux'],'k.', markersize=1.8, alpha = 0.25)
plt.xlabel('Time [{}]'.format(str(lc_targ.ts_stitch.time.format).upper()))
plt.ylabel('Flux')
plt.title(lc_targ.K2 + ' Lightcurve')
# plt.xlim(2350,2356)
# plt.ylim(0.9985,1.0015)
plt.show()
# -
# ### Mask Outliers
m1 = lc_targ.mask_outliers(sigma=4)
# #### Plot the light curve with outliers highlighted
# +
plt.plot(lc_targ.ts_stitch.time.value, lc_targ.ts_stitch['flux'],'k.', markersize=1.8, alpha = 0.25)
plt.plot(lc_targ.ts_stitch.time.value[~m1], lc_targ.ts_stitch['flux'][~m1],'o', color='xkcd:orange', markersize=8, alpha = 0.5)
plt.xlabel('Time [{}]'.format(str(lc_targ.ts_stitch.time.format).upper()))
plt.ylabel('Flux')
# plt.xlim(2350,2356)
# plt.ylim(0.985,1.015)
plt.show()
# -
# #### Remove the outliers if necessary.
lc_targ.ts_stitch = lc_targ.ts_stitch[m1]
# ## Clean the light curve / correct activity
# ### Using a Savistzky-Golay filter:
# +
lc_targ.clean_subt_activity_flatten(sigma=3,
wl=37,
time_window=18*u.h,
polyorder=2,
return_trend=False,
remove_outliers=True,
break_tolerance=5,
niters=3,
mask=None)
# You might face issues with Astropy >= 5.0 where flatten gives TypeError: "cannot write to unmasked output"
# The issue comes from Lightkurve and a pull request has been made: https://github.com/lightkurve/lightkurve/pull/1162/commits/05034f4bbd8e6a40f8efd9cfa1f108a36ddc0872
# -
# ### Alternatively, using GP:
lc_targ.clean_subt_activity_GP(
timeseries=None,
bin_duration=60 * u.min,
period_min=0.2,
period_max=100.0,
tune=2500,
draws=2500,
chains=2,
target_accept=0.95,
ncores=None,
return_trend=False,
remove_outliers=True,
sigma_out=3,
mask=None)
# ### Plot the corrected light curve
# +
plt.plot(lc_targ.ts_clean.time.value, lc_targ.ts_clean['flux'],'k.', markersize=1.8, alpha = 0.25)
plt.xlabel('Time [{}]'.format(str(lc_targ.ts_stitch.time.format).upper()))
plt.ylabel('Flux')
# plt.xlim(2350,2356)
# plt.ylim(0.985,1.015)
plt.show()
# -
# #### Looks like there's something wrong at the begginning of our data. For now, lets mask it out.
debut_mask = np.array(lc_targ.ts_clean.time.value < 2752.5)
# ## Look for transits using TLS
# ### Look for the first exoplanet
perio_results1 = lc_targ.tls_periodogram(cleaned=True, n_transits_min=2, mask=debut_mask)
# #### Plot the TLS periodogram
fulmar.perioplot(perio_results1, lc_targ.K2, folder="K2-109/", pl_n=1, maxper=None, savefig=False)
# #### Plot the resulting model to have a quick visual check
fulmar.modelplot(lc_targ.tls_results)
# #### Optimize the transit parameters. **_Be careful it assumes circular orbit_**
p, t0, dur, depth, ab, flat_samps = fulmar.params_optimizer(lc_targ.ts_clean, lc_targ.tls_results.period, lc_targ.tls_results.T0, 1-lc_targ.tls_results.depth, lc_targ.ab, lc_targ.R_star, lc_targ.K2, tran_window=0.25, ncores=None, mask=None)
# #### Mask intransit data
intransit = transit_mask(lc_targ.ts_clean.time.value, p, 1.3*dur, t0) # True when datapoints are in transit
# #### Combine intransit mask with the initial mask
p1_mask = np.logical_or(debut_mask, intransit)
# #### Plot the lightcurve with transits highlighted
# +
plt.plot(lc_targ.ts_clean.time.value, lc_targ.ts_clean['flux'],'k.', markersize=1.8, alpha = 0.25)
plt.plot(lc_targ.ts_clean.time.value[intransit], lc_targ.ts_clean['flux'][intransit],'o', color='xkcd:green', markersize=4.8, alpha = 0.5)
plt.xlabel('Time [{}]'.format(str(lc_targ.ts_stitch.time.format).upper()))
plt.ylabel('Flux')
#plt.xlim(2749,2752)
# plt.ylim(0.985,1.015)
plt.show()
# -
# ### Look for the second exoplanet
perio_results2 = lc_targ.tls_periodogram(cleaned=True, n_transits_min=2, period_min=0.9, mask=p1_mask)
# #### Plot the TLS periodogram
fulmar.perioplot(perio_results2, lc_targ.K2, folder="K2-109/", pl_n=2, maxper=None, savefig=False)
# perioplot(results, lc_targ.K2, folder="K2-109/", num=1, maxper=50, savefig=False)
fulmar.modelplot(lc_targ.tls_results)
# #### Visualize where the transits lay in the lightcurve
# +
plt.plot(lc_targ.ts_clean.time.value[~debut_mask], lc_targ.ts_clean['flux'][~debut_mask],'ko', markersize=1.8, alpha = 0.2)
plt.xlabel('Time [{}]'.format(str(lc_targ.ts_stitch.time.format).upper()))
plt.ylabel('Flux')
#plt.xlim(2749,2752)
# plt.ylim(0.985,1.015)
for n in perio_results1.transit_times:
plt.axvline(n, alpha=0.4, lw=2, ymax=0.15, color='xkcd:green')
for n in perio_results2.transit_times:
plt.axvline(n, alpha=0.3, lw=2, ymax=0.45, color='xkcd:orange')
plt.title(lc_targ.K2)
plt.ylim(0.9985,1.00028)
plt.show()
# -
# #### Optimize the transit parameters. **_Be careful it still assumes circular orbit_**
# + tags=[]
p_1, t0_1, dur_1, depth_1, ab_1, flat_samps_1 = fulmar.params_optimizer(lc_targ.ts_clean, lc_targ.tls_results.period, lc_targ.tls_results.T0, 1-lc_targ.tls_results.depth, lc_targ.ab, lc_targ.R_star, lc_targ.K2, tran_window=0.25, ncores=None, mask=~p1_mask)
# -
# #### Mask intransit data
intransit2 = transit_mask(lc_targ.ts_clean.time.value, p_1, 1.3*dur_1, t0_1) # True when datapoints are in transit
# #### Combine intransit mask with the initial mask
p2_mask = np.logical_or(p1_mask, intransit2)
# #### Plot the lightcurve with transits highlighted
# +
plt.plot(lc_targ.ts_clean.time.value[~debut_mask], lc_targ.ts_clean['flux'][~debut_mask],'k.', markersize=1.8, alpha = 0.25)
plt.plot(lc_targ.ts_clean.time.value[intransit2], lc_targ.ts_clean['flux'][intransit2],'o', color='xkcd:orange', markersize=4.8, alpha = 0.25, label='K2-109b')
plt.plot(lc_targ.ts_clean.time.value[intransit], lc_targ.ts_clean['flux'][intransit],'o', color='xkcd:green', markersize=4.8, alpha = 0.25, label='K2-109c')
plt.xlabel('Time [{}]'.format(str(lc_targ.ts_stitch.time.format).upper()))
plt.ylabel('Flux')
#plt.xlim(2749,2752)
# plt.ylim(0.985,1.015)
plt.title(lc_targ.K2)
plt.legend()
plt.show()
# -
# ### Look for a possible third exoplanet ?
perio_results3 = lc_targ.tls_periodogram(cleaned=True, n_transits_min=2, mask=p2_mask)
# #### Plot the TLS periodogram
fulmar.perioplot(perio_results3, lc_targ.K2, folder="K2-109/", pl_n=3, maxper=None, savefig=False)
# perioplot(results, lc_targ.K2, folder="K2-109/", num=1, maxper=50, savefig=False)
fulmar.modelplot(lc_targ.tls_results)
perio_results3.SDE, perio_results3.FAP
# **SDE < 9**, which is the normally accepted threshold for a detection. We can end it there, as we can assume the system only contains two planets.
print("In the paper, K2-109b period is announced to be 9.55237 days, in our quick look we found it at {0:.5f}.\nRegarding K2-109c, the paper announces 21.05704 days, we found {1:.5f}. \nNot bad !".format(p_1, p))
# ## Print version of Fulmar
fulmar.utils.print_version()
|
docs/source/example_K2-109.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Application Example
# ### Step 1: Load basic python libraries
# This is used to display images within the browser
# %matplotlib inline
import os
import numpy as np
import matplotlib.pyplot as plt
import dicom as pydicom # library to load dicom images
try:
import cPickle as pickle
except:
import pickle
from sklearn.preprocessing import StandardScaler
import nibabel as nib
# ### Step 2: Load the classifier and the images
# # load a classifier that has been saved in pickle form
# with open('my_dumped_classifier.pkl', 'rb') as fid:
# gnb_loaded = cPickle.load(fid)
with open('RBF SVM.pkl', 'rb') as fid:
classifier = pickle.load(fid)
print (dir(classifier))
# # Step 3: Load the unknown image and perform the segmetnation
CurrentDir= os.getcwd()
# Print current directory
print (CurrentDir)
# Get parent direcotry
print(os.path.abspath(os.path.join(CurrentDir, os.pardir)))
# Create the file paths. The images are contained in a subfolder called Data.
PostName = os.path.abspath(os.path.join(os.path.abspath(os.path.join(CurrentDir, os.pardir)), "Data", 'POST.nii.gz') )
PreName = os.path.abspath(os.path.join(os.path.abspath(os.path.join(CurrentDir, os.pardir)), "Data", 'PRE.nii.gz') )
FLAIRName = os.path.abspath(os.path.join(os.path.abspath(os.path.join(CurrentDir, os.pardir)), "Data", 'FLAIR.nii.gz') )
GT = os.path.abspath(os.path.join(os.path.abspath(os.path.join(CurrentDir, os.pardir)), "Data", 'GroundTruth.nii.gz') )
# read Pre in--we assume that all images are same x,y dims
Pre = nib.load(PreName)
# Pre is a class containing the image data among other information
Pre=Pre.get_data()
xdim = np.shape(Pre)[0]
ydim = np.shape(Pre)[1]
zdim = np.shape(Pre)[2]
# Printing the dimensions of an image
print ('Dimensions')
print (xdim,ydim,zdim)
# make space in a numpy array for the images
ArrayDicom = np.zeros((xdim, ydim, 2), dtype=Pre.dtype)
# # copy Pre pixels into z=0
Pre=Pre[:,:,55]
ArrayDicom[:, :, 0] = Post/ np.mean(Post[np.nonzero(Post)])
# Post
Post = nib.load(PostName)
# Pre is a class containing the image data among other information
Post=Post.get_data()
Post= Post[:,:,55]
ArrayDicom[:, :, 1] = Pre/ np.mean(Pre[np.nonzero(Pre)])
# # Step 4: Use the pretrained classifier to perform segmentation
# #### Reshape the data
print ('Shape before reshape')
print (np.shape(ArrayDicom))
ArrayDicom=ArrayDicom.reshape(-1,2)
print ('Shape after reshape')
print (np.shape(ArrayDicom))
# #### Appy trained classifier
# ArrayDicom = StandardScaler().fit_transform(ArrayDicom)
Labels=classifier.predict(ArrayDicom)
print (Labels)
# #### Visualize results
print (np.mean(Labels[np.nonzero(Labels)]))
print (np.shape(Labels))
# respape to image
Labels=Labels.reshape(240,240)
Post=Post.reshape(240,240)
Pre=Pre.reshape(240,240)
f, (ax1,ax2,ax3)=plt.subplots(1,3)
ax1.imshow(np.rot90(Post[:, :],3), cmap=plt.cm.gray)
ax1.axis('off')
ax2.imshow(np.rot90(Pre[:, :],3), cmap=plt.cm.gray)
ax2.axis('off')
ax3.imshow(np.rot90(Labels[:, :,],3), cmap=plt.cm.jet)
ax3.axis('off')
|
notebooks/Module 6.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <table align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/AbdelMahm/FSR/blob/master/IDDLO-29-20/Notebooks/Logistic_Regression.ipynb"><img src="https://colab.research.google.com/img/colab_favicon_256px.png" />Run in Google Colab</a>
# </td>
# </table>
# # Logistic Regression
# +
import sys
import urllib.request
import os
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import sklearn
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
# -
# ## Part1: Logistic Regression
# In this part of the exercise, you will build a logistic regression model to predict whether a student gets admitted into a university.
# Suppose that you are the administrator of a university department and you want to determine each applicant's chance of admission based on their results on two exams. You have historical data from previous applicants that you can use as a training set for logistic regression. For each training example, you have the applicant's scores on two exams and the admissions decision.
# Your task is to build a classification model that estimates an applicant's probability of admission based the scores from those two exams.
# ### Visualizing the data
# Before starting to implement any learning algorithm, it is always good to visualize the data if possible. In the first part, the code will load the data and display it on a 2-dimensional plot where the axes are the two exam scores, and the positive and negative examples are shown with different marker colors.
import urllib.request
data_path = os.path.join("datasets", "")
download_path = "https://raw.githubusercontent.com/AbdelMahm/FSR/master/IDDLO-29-20/Notebooks/datasets/"
os.makedirs(data_path, exist_ok=True)
for filename in ("log_reg_data1.csv", "log_reg_data2.csv"):
print("Downloading", filename)
url = download_path + filename
urllib.request.urlretrieve(url, data_path + filename)
#load data
data_exam = pd.read_csv(data_path + '/log_reg_data1.csv')
data_exam.head()
# ### Get the parameters of the model
# +
X = np.c_[data_exam[["score1","score2"]]]
y = np.c_[data_exam["admitted"]]
(m,n) = X.shape
# display all examples
fig = plt.figure()
plt.title('Student scores')
plt.xlabel('score 1')
plt.ylabel('score 2')
plt.scatter(X[:,0],X[:,1], c=y.ravel())
plt.show()
#add a column of 1s to X
#X = np.insert(X, 0, values=1, axis=1)
# -
# $w_j$ = clf.coef_, $w_0$ = clf.intercept_
# +
clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X, y.ravel())
#print model parameters
print("w0 =", clf.intercept_[0], ", w1 = ", clf.coef_[0][0], ", w2 = ", clf.coef_[0][1])
# -
# ### Plot the decision boundary
# The decision boundary correspends to the value $y = 0.5$. We can write $x_2$ in terms of $x_1$ by solving the following equation:
# $$ 0.5 = w_0 + w_1*x_1 + w_2*x_2 $$
# +
fig = plt.figure()
ax = plt.axes()
plt.title('Students Classification')
plt.xlabel('score 1')
plt.ylabel('score 2')
plt.scatter(X[:,0], X[:,1], c=y.ravel())
#generate new points to plot a decision boundary line
x1_vals = np.linspace(min(X[:,1]), max(X[:,1]), 1000)
# the boundry is at line at y = 0.5 if y in {0,1} or y = 0 if y in {-1,1},
# sklearn converts y to the range {-1,1}
# So, we can then write x2 in terms of x1 using: (w0 + w1*x1 + w2*x2 = 0)
x2_vals = -(clf.intercept_[0] + clf.coef_[0][0]*x1_vals) / clf.coef_[0][1]
# plot the line
plt.plot(x1_vals, x2_vals)
plt.show()
# -
# ### Accuracy of the model
# the score function measures how well the learned model predicts on a given set.
# +
#prediction probability of one example (the 5th example)
clf.predict_proba(X[5:6,:]) # the two probabilities sums up to 1.
#predicted class of an example (class with max probability)
clf.predict(X[5:6,:])
#prediction accuracy on the training set X
clf.score(X, y)
# -
# ## Part 2: Regularized logistic regression
#
# In this part of the exercise, you will implement regularized logistic regression using the ridge method to predict whether microchips from a fabrication plant passes quality assurance (QA). During QA, each microchip goes through various tests to ensure it is functioning correctly.
# Suppose you are the product manager of the factory and you have the test results for some microchips on two different tests. From these two tests, you would like to determine whether the microchips should be accepted or rejected. To help you make the decision, you have a dataset of test results on past microchips, from which you can build a logistic regression model.
# ### Load and Visualize the data
# Similarly to the previous part, we will load and plot the data of the two QA test scores. The positive (y = 1, accepted) and negative (y = 0, rejected) examples are shown with different markers.
data_microchip = pd.read_csv('datasets/log_reg_data2.csv')
data_microchip.head()
# +
X = np.c_[data_microchip[["test1","test2"]]]
y = np.c_[data_microchip["accepted"]]
(m,n) = X.shape
# +
X1 = X[:,0]
X2 = X[:,1]
# display
fig = plt.figure()
plt.title('Microchips tests')
plt.xlabel('test 1')
plt.ylabel('test 2')
plt.scatter(X1,X2, c=y.ravel())
plt.show()
# -
# ### Feature mapping
# The scatter plot shows that our dataset cannot be separated into positive and negative examples by a straight-line through the plot. Therefore, a straightforward application of logistic regression will not perform well on this dataset since logistic regression will only be able to find a linear decision boundary.
#
# One way to fit the data better is to create more features from each data point. Sklearn provide you with such transformation. PolynomialFeatures allow you to map the features into all polynomial terms of $x_1$ and $x_2$ up to the order power $order$:
# $$(1, x_1, x_2, x_1^2, x_2^2, x_1x_2, x_1^3, x_1^2x_2, x_2^2x_1, x_2^3, ..., x_2^{order})$$
# +
from sklearn.preprocessing import PolynomialFeatures
order = 30
poly = PolynomialFeatures(order)
Xmap = poly.fit_transform(X)
print(X.shape)
print(Xmap.shape)
# -
# As a result of a six order power mapping (order=6), our vector of two features (the scores on two QA tests) has been transformed into a 28-dimensional vector. A logistic regression classifier trained on this higher-dimension feature vector will have a more complex decision boundary and will appear nonlinear when drawn in our 2-dimensional plot.
# ### fit a logistic regression model to the polynomial features
clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial', C=10**7).fit(Xmap, y.ravel())
w_star = clf.coef_[0]
# ### Plot the decision boundary
# +
def get_boundary(u, v, theta, order):
boundary = np.zeros(shape=(len(u), len(v)))
for i in range(len(u)):
for j in range(len(v)):
poly = PolynomialFeatures(order)
uv = [np.array([u[i],v[j]])]
poly_map = poly.fit_transform([np.array([u[i],v[j]])])
boundary[i, j] = (poly_map[0].dot(np.array(theta)))
return boundary
#plot data and boundary
fig = plt.figure()
u = np.linspace(-1.1, 1.1, 50)
v = np.linspace(-1.1, 1.1, 50)
boundary = get_boundary(u, v, w_star, order)
plt.title('microchips')
plt.xlabel('test 1')
plt.ylabel('test 2')
plt.scatter(X1,X2, c=y.ravel())
plt.contour(u, v, boundary, 0, colors='red')
plt.legend()
plt.show()
# -
# ### Evaluating the regularized logistic regression
clf.score(Xmap, y)
# ## Tuning the hyper-parameters
# Try tuning the two hyper-parameters ($C$ and the polynome order) and see how the decision boundary and the model's accuracy evolve.
#
# ### Use a grid search
# +
acc = np.zeros((10, 20))
C_range = list(10**x for x in range (0, 10))
for idx, c in enumerate(C_range):
print(idx, sep='.', end='', flush=True)
for order in range(1,21):
poly = PolynomialFeatures(order)
Xmap = poly.fit_transform(X)
clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial', C=c).fit(Xmap, y)
acc[idx,order-1] = clf.score(Xmap, y)
# -
# ### get $\lambda^*$ and $order^*$ (those maximizing the accuracy)
# +
from numpy import unravel_index
acc_max_idx = unravel_index(acc.argmax(), acc.shape)
print(acc_max_idx)
print(acc[acc_max_idx[0], acc_max_idx[1]])
c_star = C_range[acc_max_idx[0]]
order_star = acc_max_idx[1]
print("c_star = ", c_star, ", order_star = ", order_star)
fig = plt.figure()
fig.clf()
ax = fig.add_subplot(1,1,1)
img = ax.imshow(acc, interpolation='nearest', vmin=0.0, vmax=1.0)
fig.colorbar(img)
plt.show()
# -
# ### plot data and boundary
# +
fig = plt.figure()
u = np.linspace(-1.1, 1.1, 50)
v = np.linspace(-1.1, 1.1, 50)
poly = PolynomialFeatures(order_star)
Xmap = poly.fit_transform(X)
clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial', C=c_star).fit(Xmap, y)
theta_star = clf.coef_[0]
boundary_green = get_boundary(u, v, theta_star, order_star)
plt.title('score=%f' %clf.score(Xmap, y))
plt.xlabel('test 1')
plt.ylabel('test 2')
plt.scatter(X1,X2, c=y.ravel())
plt.contour(u, v, boundary_green, 0, colors='green')
plt.legend()
plt.show()
# -
# ## Exercise
# +
#1) use pipelines
#1) try GridSearch and Randomised Search
#2) try SVM with different Kernels
#3) try GridSearch and Randomised Search
# +
#1
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
#from sklearn.grid_search import GridSearchCV
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint as sp_randint
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,random_state=0)
pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC())])
# The pipeline can be used as any other estimator
# and avoids leaking the test set into the train set
pipe.fit(X_train, y_train)
Pipeline(steps=[('scaler', StandardScaler()), ('svc', SVC())])
pipe.score(X_test, y_test)
# +
#2
""""Search for the Parameters consists of following:
an estimator - in our case it is RandomForestRegressor
a parameter space - this the grid we have passed onto the GridSearchCV
a method for searching or sampling candidates - we have seen GridsearchCV and other one is RandomizedSearchCV
a cross-validation scheme
a scoring function - evaluates the parameters
GridSearchCV is an exhaustive search or say it is a brute force technique.
RandomizedSearchCV as the name mentions does a randomized search over parameters, where each setting is sampled from a distribution over possible parameter values. Which is far more efficient in searching the parameter values for fine tuning # Randomized Search
Grid Search:
The above models were run with the default parameters determined by the LogisticRegression. we can improve the model by tuning the hyperparameters of the model?
To achieve this, we define a “grid” of parameters that we would want to test out in the model and select the best model using GridSearchCV.
"""
grid={"C":np.logspace(-3,3,7), "penalty":["l1","l2"]}# l1 lasso l2 ridge
logreg=LogisticRegression()
logreg_cv=GridSearchCV(logreg,grid,cv=10)
logreg_cv.fit(X_train,y_train)
print("tuned hpyerparameters :(best parameters) ",logreg_cv.best_params_)
print("accuracy :",logreg_cv.best_score_)
# +
#3
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
from sklearn.model_selection import KFold, cross_val_score
data_exam.head()
X = np.c_[data_exam[["score1","score2"]]]
y = np.c_[data_exam["admitted"]]
#define a seed for reproducibility
seed = 1
# Splitting data into the training and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = seed)
# Define scoring method
scoring = 'accuracy'
# Trying linear, RBF and sigmoid kernels
names = ['SVM Linear', 'SVM RBF', 'SVM Sigmoid']
Classifiers = [
svm.SVC(kernel = 'linear'),
svm.SVC(kernel = 'rbf'),
svm.SVC(kernel = 'sigmoid')
]
models = zip(names, Classifiers)
names = []
result = []
for name, model in models:
kfold = KFold(n_splits = 10, random_state = 1, shuffle=True)
cv_results = cross_val_score(model, X_train, y_train, cv = kfold, scoring = 'accuracy')
result.append(cv_results)
names.append(name)
msg = "{0}: {1} ({2})".format(name, cv_results.mean(), cv_results.std())
print(msg)
#.Model's Evaluation
models = zip(names, Classifiers)
for name, model in models:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(name)
print(accuracy_score(y_test, y_pred))
print(classification_report(y_test, y_pred))
"""Support Vector Machine with 'linear'and RBF kernels performed best with F1_score = 0.88 on testing data.
while SVM with sigmoid kernel has an F1_score = 0.56 on testing data
# -
|
Logistic_reg.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="W1McFcDOVx-a"
# Imports
# + id="SpFE0ICxUZjy"
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import statistics
from scipy import stats # statistic
# + [markdown] id="aak6N4H2V7Pl"
# Load CSV
# + id="379xB7tYV6G8" colab={"base_uri": "https://localhost:8080/"} outputId="1677ef5d-217b-416a-c8b2-9bc24cf7196a"
# change to your file location
df_log = pd.read_csv('/content/drive/MyDrive/Škola/DM/[projekt]/logs5.csv', ';')
df = pd.read_csv('/content/drive/MyDrive/Škola/DM/[projekt]/parametricke_odhady.csv', ';')
# empty dict to save created crosstables
dfDict = {}
# + colab={"base_uri": "https://localhost:8080/"} id="y8c69Kj-SxSV" outputId="362feb02-8384-48ab-d263-8bb707b7b180"
df_log.head
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="LEemyKunSp8V" outputId="226ab199-a3c6-4595-f093-c1184de756b3"
df.head(5)
# + [markdown] id="C_aZKiMSaiaS"
# Crosstable for crisis = 0
# + id="g6Mmy3q5alBZ"
df1 = df_log[(df_log['crisis'] == 0)]
crosstable = pd.crosstab(df1['week'], df1['category'], values=df1['crisis'], margins=True,
dropna=False, aggfunc='count').reset_index().fillna(0)
# Add crisis for 0 crosstable into dict
dfDict['0'] = crosstable
# + [markdown] id="dSeuhuOsXNZ0"
# Crosstable for crisis = 1
# + id="6H_8VBhmXWfk"
df1 = df_log[(df_log['crisis'] == 1)]
crosstable = pd.crosstab(df1['week'], df1['category'], values=df1['crisis'], margins=True,
dropna=False, aggfunc='count').reset_index().fillna(0)
# Add missing line
crosstable = crosstable.append({'week': 53, 'Business Conditions': 0, 'Pillar3 disclosure requirements': 0, 'Pillar3 related': 0, 'Pricing List': 0, 'Reputation': 0, 'We support..': 0, 'All': 0}, ignore_index=True)
# Add crisis for 1 crosstable into dict
dfDict['1'] = crosstable
# + [markdown] id="PtnywonssqHe"
# Create function for graph estimates creation
# + id="MW-aa1musl4j"
def create_graph(df_graph, title):
fig = go.Figure()
fig.add_trace(go.Scatter(x=df_graph['0_week'], y=df_graph['1_pricing'],
mode='lines',
name='Pricing_List'))
fig.add_trace(go.Scatter(x=df_graph['0_week'], y=df_graph['2_reputation'],
mode='lines',
name='Reputation'))
fig.add_trace(go.Scatter(x=df_graph['0_week'], y=df_graph['3_business'],
mode='lines',
name='Business_Conditions'))
fig.add_trace(go.Scatter(x=df_graph['0_week'], y=df_graph['4_related'],
mode='lines',
name='Pillar3_related'))
fig.add_trace(go.Scatter(x=df_graph['0_week'], y=df_graph['5_disclosure'],
mode='lines',
name='Pillar3_disclosure_requirements'))
fig.add_trace(go.Scatter(x=df_graph['0_week'], y=df_graph['6_weSupport'],
mode='lines',
name='We_support'))
fig.update_layout(title=title)
fig.show()
# + [markdown] id="vkLzEHKftIFc"
# Create array of new category names
# + id="8yKtcPN6tHo7"
categories = ['1_pricing', '2_reputation', '3_business', '4_related', '5_disclosure', '6_weSupport']
# + [markdown] id="Fw512zBkt53e"
# Create function to add extreme values
# + id="j7T6Tj0Dt5V0"
def add_extreme_values(df1):
for category in categories:
df1[category + "_max"] = np.nanmean(df1[category].tolist()) + 2*statistics.stdev(df1[category].tolist())
df1[category + "_min"] = np.nanmean(df1[category].tolist()) - 2*statistics.stdev(df1[category].tolist())
# + [markdown] id="_53W5qUMuquR"
# Create function for graph differences creation
# + id="Jlg8i_RduxMU"
def create_figure(df1, crisis):
color_map = {'1_pricing': 'blue', '2_reputation': 'green', '3_business': 'red', '4_related': 'yellow', '5_disclosure': 'violet', '6_weSupport': 'brown'}
fig = go.Figure()
for category in categories:
fig.add_trace(go.Scatter(x=df1['0_week'], y=df1[category], mode='lines', name=category, line=dict(color=color_map[category])))
fig.add_trace(go.Scatter(x=df1['0_week'], y=df1[category + '_max'], mode='lines', name='ext ' + category, line=dict(color="lightgrey", dash="dashdot")))
fig.add_trace(go.Scatter(x=df1['0_week'], y=df1[category + '_min'], mode='lines', name='ext ' + category, line=dict(color="lightgrey", dash="dashdot")))
fig.update_layout(title='Visualization of differences - ' + crisis , xaxis_title='week', yaxis_title='differences')
return fig
# + [markdown] id="XbW2l7qz_3dB"
# Create function to create figures for logits and return them
# + id="H_8Pg214-2oQ"
def create_figure_logits(df1, df1_logits, crisis):
color_map = {'1_pricing': 'blue', '2_reputation': 'green', '3_business': 'red', '4_related': 'yellow', '5_disclosure': 'violet'}
fig = go.Figure()
for category in categories[:-1]:
fig.add_trace(go.Scatter(x=df1['0_week'], y=df1[category], mode='lines', name='teo ' + category, line=dict(color=color_map[category], dash="dashdot")))
fig.add_trace(go.Scatter(x=df1['0_week'], y=df1_logits[category], mode='lines', name='emp ' + category, line=dict(color=color_map[category])))
fig.update_layout(title='Visualization of logits differences - ' + crisis, xaxis_title='week', yaxis_title='Logits')
return fig
# + [markdown] id="AuIlADr_dGJz"
# ## After crisis
# + id="vsSrBel3TOIy"
dataframe_collection = {}
# Create empty dataframe for logits
df_logits = pd.DataFrame()
# Create empty dataframe for estimates
df_estimates = pd.DataFrame()
# Create empty dataframe for evaluation
df_evaluation = pd.DataFrame()
# Create empty dataframe for differences
df_differences = pd.DataFrame()
# Create empty dataframe for empirical relative abundance
df_era = pd.DataFrame()
# Create empty dataframe for empirical logits
df_elogits = pd.DataFrame()
# Cycle through weekdays
for x in range (0, 54):
# Create logits estimates
logit_pricing = df.at[0, 'Intercept'] + df.at[0, 'week']*x+df.at[0, 'week_sq']*(x*x)+df.at[0, 'week_cb']*(x*x*x)
logit_reputation = df.at[1, 'Intercept'] + df.at[1, 'week']*x+df.at[1, 'week_sq']*(x*x)+df.at[1, 'week_cb']*(x*x*x)
logit_business = df.at[2, 'Intercept'] + df.at[2, 'week']*x+df.at[2, 'week_sq']*(x*x)+df.at[2, 'week_cb']*(x*x*x)
logit_related = df.at[3, 'Intercept'] + df.at[3, 'week']*x+df.at[3, 'week_sq']*(x*x)+df.at[3, 'week_cb']*(x*x*x)
logit_disclosure = df.at[4, 'Intercept'] + df.at[4, 'week']*x+df.at[4, 'week_sq']*(x*x)+df.at[4, 'week_cb']*(x*x*x)
# Reference web
reference_web = 1 / (1 + np.exp(logit_pricing) + np.exp(logit_reputation) + np.exp(logit_business) + np.exp(logit_related) + np.exp(logit_disclosure))
# Create estimates
estimate_pricing = np.exp(logit_pricing) * reference_web
estimate_reputation = np.exp(logit_reputation) * reference_web
estimate_business = np.exp(logit_business) * reference_web
estimate_related = np.exp(logit_related) * reference_web
estimate_disclosure = np.exp(logit_disclosure) * reference_web
# Create evaluation
crosstable = dfDict['0']
crosstable = crosstable[(crosstable['week'] == x)]
crosstable_all = crosstable.iloc[0]['All']
evaluation_pricing = estimate_pricing * crosstable_all
evaluation_reputation = estimate_reputation * crosstable_all
evaluation_business = estimate_business * crosstable_all
evaluation_related = estimate_related * crosstable_all
evaluation_disclosure = estimate_disclosure * crosstable_all
evaluation_weSupport = reference_web * crosstable_all
# Differences
dij_pricing = crosstable.iloc[0]['Pricing List'] - (estimate_pricing * crosstable_all)
dij_reputation = crosstable.iloc[0]['Reputation'] - (estimate_reputation * crosstable_all)
dij_business = crosstable.iloc[0]['Business Conditions'] - (estimate_business * crosstable_all)
dij_related = crosstable.iloc[0]['Pillar3 related'] - (estimate_related * crosstable_all)
dij_disclosure = crosstable.iloc[0]['Pillar3 disclosure requirements'] - (estimate_disclosure * crosstable_all)
dij_weSupport = crosstable.iloc[0]['We support..'] - (reference_web * crosstable_all)
# Empirical relative abundance
if(crosstable_all == 0):
era_pricing = 0
era_reputation = 0
era_business = 0
era_related = 0
era_disclosure = 0
era_weSupport = 0
else:
era_pricing = crosstable.iloc[0]['Pricing List'] / crosstable_all
era_reputation = crosstable.iloc[0]['Reputation'] / crosstable_all
era_business = crosstable.iloc[0]['Business Conditions'] / crosstable_all
era_related = crosstable.iloc[0]['Pillar3 related'] / crosstable_all
era_disclosure = crosstable.iloc[0]['Pillar3 disclosure requirements'] / crosstable_all
era_weSupport = crosstable.iloc[0]['We support..'] / crosstable_all
# Empirical logits
if (era_weSupport == 0):
elogits_pricing = 0
elogits_reputation = 0
elogits_business = 0
elogits_related = 0
elogits_disclosure = 0
else:
elogits_pricing = np.log(era_pricing / era_weSupport)
elogits_reputation = np.log(era_reputation / era_weSupport)
elogits_business = np.log(era_business / era_weSupport)
elogits_related = np.log(era_related / era_weSupport)
elogits_disclosure = np.log(era_disclosure / era_weSupport)
# Create new row for logits and append it to dataframe
new_row_logits = {'0_week': x, '1_pricing': logit_pricing, '2_reputation':logit_reputation, '3_business':logit_business, '4_related':logit_related, '5_disclosure':logit_disclosure}
df_logits = df_logits.append(new_row_logits, sort=False, ignore_index=True)
# Create new row for estimates and append it to dataframe
new_row_estimate = {'0_week': x, '1_pricing': estimate_pricing, '2_reputation':estimate_reputation, '3_business':estimate_business, '4_related':estimate_related, '5_disclosure':estimate_disclosure, '6_weSupport': reference_web}
df_estimates = df_estimates.append(new_row_estimate, sort=False, ignore_index=True)
# Create new row for evaluation and append it to dataframe
new_row_evaluation = {'0_week': x, '1_pricing': evaluation_pricing, '2_reputation':evaluation_reputation, '3_business':evaluation_business, '4_related':evaluation_related, '5_disclosure':evaluation_disclosure, '6_weSupport': evaluation_weSupport}
df_evaluation = df_evaluation.append(new_row_evaluation, sort=False, ignore_index=True)
# Create new row for differences and append it to dataframe
new_row_differences = {'0_week': x, '1_pricing': dij_pricing, '2_reputation':dij_reputation, '3_business':dij_business, '4_related':dij_related, '5_disclosure':dij_disclosure, '6_weSupport': dij_weSupport}
df_differences = df_differences.append(new_row_differences, sort=False, ignore_index=True)
# Create new row for empirical relative abundance and append it to dataframe
new_row_era = {'0_week': x, '1_pricing': era_pricing, '2_reputation':era_reputation, '3_business':era_business, '4_related':era_related, '5_disclosure':era_disclosure, '6_weSupport': era_weSupport}
df_era = df_era.append(new_row_era, sort=False, ignore_index=True)
# Create new row for empirical logits and append it to dataframe
new_row_elogits = {'0_week': x, '1_pricing': elogits_pricing, '2_reputation':elogits_reputation, '3_business':elogits_business, '4_related':elogits_related, '5_disclosure':elogits_disclosure}
df_elogits = df_elogits.append(new_row_elogits, sort=False, ignore_index=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="E1Aysec7daR4" outputId="d6946d88-3530-4035-8617-ab4114ac67e3"
print("Logits")
df_logits.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="4GSgXiz5iwEj" outputId="91295c9a-d13a-4678-b8a4-3964ff5c52cc"
print("Estimates")
df_estimates.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="mroZFwSggDSn" outputId="0c4f71d5-918d-4dec-d6ac-c7241997f387"
print("Evaluation")
df_evaluation.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="i6GtWKgDrNRq" outputId="2ac2e40c-fc10-442d-876d-8afd2635cff8"
print("Differences")
df_differences.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="MrS9ChZu2uJR" outputId="acb5ce1c-44ef-45cf-a75c-3c29a4964e1d"
print("Empirical relative abundance")
df_era.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="qNzUDotZ87MS" outputId="d5e721f4-b0c3-4bac-f7c8-ca71a353270f"
print("Empirical logits")
df_elogits.head(5)
# + [markdown] id="u3XSuHIa5rxS"
# Print WilcoxonResult without Crisis
# + colab={"base_uri": "https://localhost:8080/"} id="vh8M-T734WeG" outputId="20fb77e7-a5bc-4c1a-fffc-774227a28a48"
for category in categories:
print(stats.wilcoxon(df_estimates[category], df_era[category]))
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="ogIRPTF2suE8" outputId="c7b0d419-5345-4645-bd57-e990401cf3dd"
# After crisis
create_graph(df_estimates, 'Estimates after crisis')
# + [markdown] id="sGbx5aBuzAIX"
# Create graph after crisis
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="P19FEl5wxyvR" outputId="b7592ac1-4500-41bf-cc10-d90de9282768"
# Add extreme values to each category in dataframe
add_extreme_values(df_differences)
fig = create_figure(df_differences, 'after crisis')
fig.show()
# + [markdown] id="pPyWEW4aBBgD"
# Create graph after crisis for empirical logits
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="nTr2SKKUAGt8" outputId="6012b20d-4d3c-49cd-f88a-715b2e6024dd"
create_figure_logits(df_logits, df_elogits, 'after crisis')
# + [markdown] id="f18twF8edgJQ"
# ## During crisis
#
# + id="jMaXeY4xdDsH"
dataframe_collection = {}
# Create empty dataframe for logits and crisis 1
df_logits_crisis = pd.DataFrame()
# Create empty dataframe for estimates and crisis 1
df_estimates_crisis = pd.DataFrame()
# Create empty dataframe for evaluation and crisis 1
df_evaluation_crisis = pd.DataFrame()
# Create empty dataframe for differences and crisis 1
df_differences_crisis = pd.DataFrame()
# Create empty dataframe for empirical relative abundance and crisis 1
df_era_crisis = pd.DataFrame()
# Create empty dataframe for empirical logits
df_elogits_crisis = pd.DataFrame()
# Cycle through weekdays
for x in range (0, 54):
# Create logits estimates
logit_pricing_crisis = df.at[0, 'Intercept'] + df.at[0, 'week']*x+df.at[0, 'week_sq']*(x*x)+df.at[0, 'week_cb']*(x*x*x) +df.at[0, 'crisis']
logit_reputation_crisis = df.at[1, 'Intercept'] + df.at[1, 'week']*x+df.at[1, 'week_sq']*(x*x)+df.at[1, 'week_cb']*(x*x*x) +df.at[1, 'crisis']
logit_business_crisis = df.at[2, 'Intercept'] + df.at[2, 'week']*x+df.at[2, 'week_sq']*(x*x)+df.at[2, 'week_cb']*(x*x*x) +df.at[2, 'crisis']
logit_related_crisis = df.at[3, 'Intercept'] + df.at[3, 'week']*x+df.at[3, 'week_sq']*(x*x)+df.at[3, 'week_cb']*(x*x*x) +df.at[3, 'crisis']
logit_disclosure_crisis = df.at[4, 'Intercept'] + df.at[4, 'week']*x+df.at[4, 'week_sq']*(x*x)+df.at[4, 'week_cb']*(x*x*x) +df.at[4, 'crisis']
# Reference web
reference_web_crisis = 1 / (1 + np.exp(logit_pricing_crisis) + np.exp(logit_reputation_crisis) + np.exp(logit_business_crisis) + np.exp(logit_related_crisis) + np.exp(logit_disclosure_crisis))
# Create estimates
estimate_pricing_crisis = np.exp(logit_pricing_crisis) * reference_web_crisis
estimate_reputation_crisis = np.exp(logit_reputation_crisis) * reference_web_crisis
estimate_business_crisis = np.exp(logit_business_crisis) * reference_web_crisis
estimate_related_crisis = np.exp(logit_related_crisis) * reference_web_crisis
estimate_disclosure_crisis = np.exp(logit_disclosure_crisis) * reference_web_crisis
# Create evaluation
crosstable = dfDict['1']
crosstable = crosstable[(crosstable['week'] == x)]
crosstable_all = crosstable.iloc[0]['All']
evaluation_pricing_crisis = estimate_pricing_crisis * crosstable_all
evaluation_reputation_crisis = estimate_reputation_crisis * crosstable_all
evaluation_business_crisis = estimate_business_crisis * crosstable_all
evaluation_related_crisis = estimate_related_crisis * crosstable_all
evaluation_disclosure_crisis = estimate_disclosure_crisis * crosstable_all
evaluation_weSupport_crisis = reference_web_crisis * crosstable_all
# Differences
dij_pricing_crisis = crosstable.iloc[0]['Pricing List'] - (estimate_pricing_crisis * crosstable_all)
dij_reputation_crisis = crosstable.iloc[0]['Reputation'] - (estimate_reputation_crisis * crosstable_all)
dij_business_crisis = crosstable.iloc[0]['Business Conditions'] - (estimate_business_crisis * crosstable_all)
dij_related_crisis = crosstable.iloc[0]['Pillar3 related'] - (estimate_related_crisis * crosstable_all)
dij_disclosure_crisis = crosstable.iloc[0]['Pillar3 disclosure requirements'] - (estimate_disclosure_crisis * crosstable_all)
dij_weSupport_crisis = crosstable.iloc[0]['We support..'] - (reference_web_crisis * crosstable_all)
# Empirical relative abundance
if(crosstable_all == 0):
era_pricing_crisis = 0
era_reputation_crisis = 0
era_business_crisis = 0
era_related_crisis = 0
era_disclosure_crisis = 0
era_weSupport_crisis = 0
else:
era_pricing_crisis = crosstable.iloc[0]['Pricing List'] / crosstable_all
era_reputation_crisis = crosstable.iloc[0]['Reputation'] / crosstable_all
era_business_crisis = crosstable.iloc[0]['Business Conditions'] / crosstable_all
era_related_crisis = crosstable.iloc[0]['Pillar3 related'] / crosstable_all
era_disclosure_crisis = crosstable.iloc[0]['Pillar3 disclosure requirements'] / crosstable_all
era_weSupport_crisis = crosstable.iloc[0]['We support..'] / crosstable_all
# Empirical logits
if (era_weSupport_crisis == 0):
elogits_pricing_crisis = 0
elogits_reputation_crisis = 0
elogits_business_crisis = 0
elogits_related_crisis = 0
elogits_disclosure_crisis = 0
else:
elogits_pricing_crisis = np.log(era_pricing_crisis / era_weSupport_crisis)
elogits_reputation_crisis = np.log(era_reputation_crisis / era_weSupport_crisis)
elogits_business_crisis = np.log(era_business_crisis / era_weSupport_crisis)
elogits_related_crisis = np.log(era_related_crisis / era_weSupport_crisis)
elogits_disclosure_crisis = np.log(era_disclosure_crisis / era_weSupport_crisis)
# Create new row for logits and append it to dataframe for crisis 1
new_row_logits_crisis = {'0_week': x, '1_pricing': logit_pricing_crisis, '2_reputation':logit_reputation_crisis, '3_business':logit_business_crisis, '4_related':logit_related_crisis, '5_disclosure':logit_disclosure_crisis, '6_weSupport': reference_web_crisis}
df_logits_crisis = df_logits_crisis.append(new_row_logits_crisis, sort=False, ignore_index=True)
# Create new row for estimates and append it to dataframe for crisis 1
new_row_estimate_crisis = {'0_week': x, '1_pricing': estimate_pricing_crisis, '2_reputation':estimate_reputation_crisis, '3_business':estimate_business_crisis, '4_related':estimate_related_crisis, '5_disclosure':estimate_disclosure_crisis, '6_weSupport': reference_web_crisis}
df_estimates_crisis = df_estimates_crisis.append(new_row_estimate_crisis, sort=False, ignore_index=True)
# Create new row for evaluation and append it to dataframe for crisis 1
new_row_evaluation_crisis = {'0_week': x, '1_pricing': evaluation_pricing_crisis, '2_reputation':evaluation_reputation_crisis, '3_business':evaluation_business_crisis, '4_related':evaluation_related_crisis, '5_disclosure':evaluation_disclosure_crisis, '6_weSupport': evaluation_weSupport_crisis}
df_evaluation_crisis = df_evaluation_crisis.append(new_row_evaluation_crisis, sort=False, ignore_index=True)
# Create new row for differences and append it to dataframe for crisis 1
new_row_differences_crisis = {'0_week': x, '1_pricing': dij_pricing_crisis, '2_reputation':dij_reputation_crisis, '3_business':dij_business_crisis, '4_related':dij_related_crisis, '5_disclosure':dij_disclosure_crisis, '6_weSupport': dij_weSupport_crisis}
df_differences_crisis = df_differences_crisis.append(new_row_differences_crisis, sort=False, ignore_index=True)
# Create new row for empirical relative abundance and append it to dataframe for crisis 1
new_row_era_crisis = {'0_week': x, '1_pricing': era_pricing_crisis, '2_reputation':era_reputation_crisis, '3_business':era_business_crisis, '4_related':era_related_crisis, '5_disclosure':era_disclosure_crisis, '6_weSupport': era_weSupport_crisis}
df_era_crisis = df_era_crisis.append(new_row_era_crisis, sort=False, ignore_index=True)
# Create new row for empirical logits and append it to dataframe for crisis 1
new_row_elogits_crisis = {'0_week': x, '1_pricing': elogits_pricing_crisis, '2_reputation':elogits_reputation_crisis, '3_business':elogits_business_crisis, '4_related':elogits_related_crisis, '5_disclosure':elogits_disclosure_crisis}
df_elogits_crisis = df_elogits_crisis.append(new_row_elogits_crisis, sort=False, ignore_index=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="2I5duyYecFHa" outputId="1dfc8053-d683-432e-ec49-74329c04ac78"
print("Logits during crisis")
df_logits_crisis.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="ue21gp5ejqkN" outputId="3c3ff116-482b-40f7-94fe-9e6fd65de90d"
print("Estimates during crisis")
df_estimates_crisis.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="D_4b33-fhGyL" outputId="d88cff68-d706-49fe-8a4e-9742291bc049"
print("Evaluation during crisis")
df_evaluation_crisis.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="vRQYiv8YsKf7" outputId="96502beb-dcdf-40d3-91ea-26cc670cebde"
print("Differences during crisis")
df_differences_crisis.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="vYRtOS943yl7" outputId="dcda31a3-e69a-41ae-ea62-bbb6e6815be2"
print("Empirical relative abundance during crisis")
df_era_crisis.head(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="po3R7h279lBV" outputId="33e00b0a-a85e-4ff6-8058-a96ecbf89195"
print("Empirical logits during crisis")
df_elogits_crisis.head(5)
# + [markdown] id="1rFrEx6F5lVq"
# Print WilcoxonResult during Crisis
# + colab={"base_uri": "https://localhost:8080/"} id="zfc__SOp5HZQ" outputId="99f62fc7-67b5-40f7-ea74-fc0f252e6437"
for category in categories:
print(stats.wilcoxon(df_estimates_crisis[category], df_era_crisis[category]))
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="ucRNF4m6nxA3" outputId="df3bc166-bb90-431e-8721-ca3c1f2f4bde"
# During crisis
create_graph(df_estimates_crisis, 'Estimates during crisis')
# + [markdown] id="XPnKeqX6zmlt"
# Create graph during crisis
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="uZO3Efo4zI-A" outputId="8f09b756-8e99-4c69-cfb2-f54e5b1545d9"
# Add extreme values to each category in dataframe during crisis
add_extreme_values(df_differences_crisis)
fig = create_figure(df_differences_crisis, 'during crisis')
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="YALj3P1PBMpw" outputId="72de0126-31c9-410a-b4f9-ef4c3dd7b196"
create_figure_logits(df_logits_crisis, df_elogits_crisis, 'during crisis')
|
projekt.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1> Preprocessing using Dataflow </h1>
#
# This notebook illustrates:
# <ol>
# <li> Creating datasets for Machine Learning using Dataflow
# </ol>
# <p>
# While Pandas is fine for experimenting, for operationalization of your workflow, it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam also allows for streaming.
# !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# + deletable=true editable=true jupyter={"outputs_hidden": false}
pip install --user apache-beam[gcp]==2.16.0
# -
# Run the command again if you are getting oauth2client error.
#
# Note: You may ignore the following responses in the cell output above:
#
# ERROR (in Red text) related to: witwidget-gpu, fairing
#
# WARNING (in Yellow text) related to: hdfscli, hdfscli-avro, pbr, fastavro, gen_client
#
# <b>Restart</b> the kernel before proceeding further.
#
# Make sure the Dataflow API is enabled by going to this [link](https://console.developers.google.com/apis/api/dataflow.googleapis.com). Ensure that you've installed Beam by importing it and printing the version number.
# Ensure the right version of Tensorflow is installed.
# !pip freeze | grep tensorflow==2.1
import apache_beam as beam
print(beam.__version__)
# You may receive a `UserWarning` about the Apache Beam SDK for Python 3 as not being yet fully supported. Don't worry about this.
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
# + language="bash"
# if ! gsutil ls | grep -q gs://${BUCKET}/; then
# gsutil mb -l ${REGION} gs://${BUCKET}
# fi
# -
# <h2> Create ML dataset using Dataflow </h2>
# Let's use Cloud Dataflow to read in the BigQuery data, do some preprocessing, and write it out as CSV files.
# In this case, I want to do some preprocessing, modifying data so that we can simulate what is known if no ultrasound has been performed.
# Note that after you launch this, the actual processing is happening on the cloud. Go to the GCP webconsole to the Dataflow section and monitor the running job. It took about 20 minutes for me.
# <p>
# If you wish to continue without doing this step, you can copy my preprocessed output:
# <pre>
# gsutil -m cp -r gs://cloud-training-demos/babyweight/preproc gs://your-bucket/
# </pre>
# But if you do this, you also have to use my TensorFlow model since yours might expect the fields in a different order
# +
import datetime, os
def to_csv(rowdict):
import hashlib
import copy
# TODO #1:
# Pull columns from BQ and create line(s) of CSV input
CSV_COLUMNS = None
# Create synthetic data where we assume that no ultrasound has been performed
# and so we don't know sex of the baby. Let's assume that we can tell the difference
# between single and multiple, but that the errors rates in determining exact number
# is difficult in the absence of an ultrasound.
no_ultrasound = copy.deepcopy(rowdict)
w_ultrasound = copy.deepcopy(rowdict)
no_ultrasound['is_male'] = 'Unknown'
if rowdict['plurality'] > 1:
no_ultrasound['plurality'] = 'Multiple(2+)'
else:
no_ultrasound['plurality'] = 'Single(1)'
# Change the plurality column to strings
w_ultrasound['plurality'] = ['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)'][rowdict['plurality'] - 1]
# Write out two rows for each input row, one with ultrasound and one without
for result in [no_ultrasound, w_ultrasound]:
data = ','.join([str(result[k]) if k in result else 'None' for k in CSV_COLUMNS])
key = hashlib.sha224(data.encode('utf-8')).hexdigest() # hash the columns to form a key
yield str('{},{}'.format(data, key))
def preprocess(in_test_mode):
import shutil, os, subprocess
job_name = 'preprocess-babyweight-features' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')
if in_test_mode:
print('Launching local job ... hang on')
OUTPUT_DIR = './preproc'
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
os.makedirs(OUTPUT_DIR)
else:
print('Launching Dataflow job {} ... hang on'.format(job_name))
OUTPUT_DIR = 'gs://{0}/babyweight/preproc/'.format(BUCKET)
try:
subprocess.check_call('gsutil -m rm -r {}'.format(OUTPUT_DIR).split())
except:
pass
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': job_name,
'project': PROJECT,
'region': REGION,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True,
'num_workers': 4,
'max_num_workers': 5
}
opts = beam.pipeline.PipelineOptions(flags = [], **options)
if in_test_mode:
RUNNER = 'DirectRunner'
else:
RUNNER = 'DataflowRunner'
p = beam.Pipeline(RUNNER, options = opts)
query = """
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE year > 2000
AND weight_pounds > 0
AND mother_age > 0
AND plurality > 0
AND gestation_weeks > 0
AND month > 0
"""
if in_test_mode:
query = query + ' LIMIT 100'
for step in ['train', 'eval']:
if step == 'train':
selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) < 3'.format(query)
else:
selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) = 3'.format(query)
(p
## TODO Task #2: Modify the Apache Beam pipeline such that the first part of the pipe reads the data from BigQuery
| '{}_read'.format(step) >> None
| '{}_csv'.format(step) >> beam.FlatMap(to_csv)
| '{}_out'.format(step) >> beam.io.Write(beam.io.WriteToText(os.path.join(OUTPUT_DIR, '{}.csv'.format(step))))
)
job = p.run()
if in_test_mode:
job.wait_until_finish()
print("Done!")
# TODO Task #3: Once you have verified that the files produced locally are correct, change in_test_mode to False
# to execute this in Cloud Dataflow
preprocess(in_test_mode = False)
# -
# The above step will take 20+ minutes. Go to the GCP web console, navigate to the Dataflow section and <b>wait for the job to finish</b> before you run the following step.
# + language="bash"
# gsutil ls gs://${BUCKET}/babyweight/preproc/*-00000*
# -
# Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
courses/machine_learning/deepdive/06_structured/labs/4_preproc.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7 (tensorflow)
# language: python
# name: tensorflow
# ---
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_10_5_temporal_cnn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # T81-558: Applications of Deep Neural Networks
# **Module 10: Time Series in Keras**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# # Module 10 Material
#
# * Part 10.1: Time Series Data Encoding for Deep Learning [[Video]](https://www.youtube.com/watch?v=dMUmHsktl04&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_1_timeseries.ipynb)
# * Part 10.2: Programming LSTM with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=wY0dyFgNCgY&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_2_lstm.ipynb)
# * Part 10.3: Text Generation with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=6ORnRAz3gnA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_3_text_generation.ipynb)
# * Part 10.4: Image Captioning with Keras and TensorFlow [[Video]](https://www.youtube.com/watch?v=NmoW_AYWkb4&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_4_captioning.ipynb)
# * **Part 10.5: Temporal CNN in Keras and TensorFlow** [[Video]](https://www.youtube.com/watch?v=i390g8acZwk&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_10_5_temporal_cnn.ipynb)
# # Google CoLab Instructions
#
# The following code ensures that Google CoLab is running the correct version of TensorFlow.
# Running the following code will map your GDrive to ```/content/drive```.
try:
# %tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# # Part 10.5: Temporal CNN in Keras and TensorFlow
#
# Traditionally, Convolutional Neural Networks (CNNs) are used for image classification problems and Long Short Term Memory (LSTM) Networks have been used for time series. However, recent research has shown CNN's to be very effective at time series problems.
# +
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D
import numpy as np
max_features = 4 # 0,1,2,3 (total of 4)
x = [
[[0],[1],[1],[0],[0],[0]],
[[0],[0],[0],[2],[2],[0]],
[[0],[0],[0],[0],[3],[3]],
[[0],[2],[2],[0],[0],[0]],
[[0],[0],[3],[3],[0],[0]],
[[0],[0],[0],[0],[1],[1]]
]
x = np.array(x,dtype=np.float32)
y = np.array([1,2,3,2,3,1],dtype=np.int32)
# Convert y2 to dummy variables
y2 = np.zeros((y.shape[0], max_features),dtype=np.float32)
y2[np.arange(y.shape[0]), y] = 1.0
y2 = np.asarray(y2).astype('float32').reshape((-1,1,4))
print(y2)
print('Build model...')
model = Sequential()
model.add(Conv1D(128, kernel_size=x.shape[1], input_shape=(None, 1)))
model.add(Dense(4, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(x,y2,epochs=200)
pred = model.predict(x)
predict_classes = np.argmax(pred,axis=1)
print("Predicted classes: {}",predict_classes)
print("Expected classes: {}",predict_classes)
# +
def runit(model, inp):
inp = np.array(inp,dtype=np.float32)
pred = model.predict(inp)
return np.argmax(pred[0])
print( runit( model, [[[0],[1],[1],[0],[0],[0]]] ))
# -
# ### Sun Spots Example - CNN
#
# An example of RNN regression to predict sunspots. The data files needed for this example can be found at the following location.
#
# * [Sunspot Data Files](http://www.sidc.be/silso/datafiles#total)
# * [Download Daily Sunspots](http://www.sidc.be/silso/INFO/sndtotcsv.php) - 1/1/1818 to now.
#
# The following code is used to load the sunspot file:
#
# +
import pandas as pd
import os
# Replacce the following path with your own file. It can be downloaded from:
# http://www.sidc.be/silso/INFO/sndtotcsv.php
path = "./data/"
filename = os.path.join(path,"SN_d_tot_V2.0.csv")
names = ['year', 'month', 'day', 'dec_year', 'sn_value' , 'sn_error', 'obs_num']
df = pd.read_csv(filename,sep=';',header=None,names=names,na_values=['-1'], index_col=False)
print("Starting file:")
print(df[0:10])
print("Ending file:")
print(df[-10:])
# -
# As you can see, there is quite a bit of missing data near the end of the file. We would like to find the starting index where the missing data no longer occurs. This is somewhat sloppy, it would be better to find a use for the data between missing values. However, the point of this example is show how to use LSTM with a somewhat simple time-series.
start_id = max(df[df['obs_num'] == 0].index.tolist())+1 # Find the last zero and move one beyond
print(start_id)
df = df[start_id:] # Trim the rows that have missing observations
# +
df['sn_value'] = df['sn_value'].astype(float)
df_train = df[df['year']<2000]
df_test = df[df['year']>=2000]
spots_train = df_train['sn_value'].tolist()
spots_test = df_test['sn_value'].tolist()
print("Training set has {} observations.".format(len(spots_train)))
print("Test set has {} observations.".format(len(spots_test)))
# +
import numpy as np
def to_sequences(seq_size, obs):
x = []
y = []
for i in range(len(obs)-SEQUENCE_SIZE-1):
#print(i)
window = obs[i:(i+SEQUENCE_SIZE)]
after_window = obs[i+SEQUENCE_SIZE]
window = [[x] for x in window]
#print("{} - {}".format(window,after_window))
x.append(window)
y.append(after_window)
return np.array(x),np.array(y)
SEQUENCE_SIZE = 25
x_train,y_train = to_sequences(SEQUENCE_SIZE,spots_train)
x_test,y_test = to_sequences(SEQUENCE_SIZE,spots_test)
print("Shape of training set: {}".format(x_train.shape))
print("Shape of test set: {}".format(x_test.shape))
# -
x_train
# +
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, Dropout, MaxPooling1D, Flatten
from tensorflow.keras.callbacks import EarlyStopping
import numpy as np
print('Build model...')
model = Sequential()
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(SEQUENCE_SIZE,1)))
model.add(Conv1D(filters=32, kernel_size=3, activation='relu'))
model.add(Dropout(0.2))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(50, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=50, verbose=1, mode='auto', restore_best_weights=True)
print('Train...')
model.fit(x_train,y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=2,epochs=1000)
# +
from sklearn import metrics
# from sklearn import metrics
pred = model.predict(x_test)
score = np.sqrt(metrics.mean_squared_error(pred.flatten(),y_test))
print("Score (RMSE): {}".format(score))
# -
|
t81_558_class_10_5_temporal_cnn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Entities Recognition
#
# Trained on 80% of dataset, tested on 20% of dataset. All training sessions stored in [session/entities](https://github.com/huseinzol05/Malaya/tree/master/session/entities)
# +
from IPython.core.display import Image, display
display(Image('ner-accuracy.png', width=500))
# -
# #### Concat
#
# ```text
# precision recall f1-score support
#
# OTHER 1.00 1.00 1.00 498279
# event 0.98 0.99 0.99 2217
# law 0.99 0.99 0.99 1610
# location 0.99 1.00 1.00 20194
# organization 0.99 0.99 0.99 26093
# person 1.00 0.99 1.00 43377
# quantity 1.00 1.00 1.00 13180
# time 0.99 1.00 0.99 12750
#
# avg / total 1.00 1.00 1.00 617700
# ```
# #### Bahdanau
#
# ```text
# precision recall f1-score support
#
# OTHER 1.00 1.00 1.00 498587
# event 0.98 0.99 0.98 2212
# law 1.00 0.99 0.99 1746
# location 0.99 1.00 1.00 20387
# organization 0.99 1.00 1.00 25376
# person 1.00 1.00 1.00 43158
# quantity 1.00 1.00 1.00 13581
# time 0.99 1.00 0.99 12653
#
# avg / total 1.00 1.00 1.00 617700
# ```
# #### Luong
#
# ```text
# precision recall f1-score support
#
# OTHER 1.00 1.00 1.00 497138
# event 0.99 0.99 0.99 2331
# law 0.99 0.99 0.99 1872
# location 0.99 1.00 1.00 20671
# organization 0.99 1.00 0.99 25942
# person 0.99 1.00 1.00 43511
# quantity 1.00 1.00 1.00 13376
# time 1.00 1.00 1.00 12859
#
# avg / total 1.00 1.00 1.00 617700
# ```
# #### Entity-Network
#
# ```text
# precision recall f1-score support
#
# OTHER 1.00 1.00 1.00 497198
# event 0.98 0.95 0.96 2381
# law 0.99 0.97 0.98 1881
# location 0.99 0.99 0.99 20305
# organization 0.99 0.98 0.98 26036
# person 0.99 0.99 0.99 43470
# quantity 0.99 0.99 0.99 13608
# time 0.98 0.99 0.98 12821
#
# avg / total 1.00 1.00 1.00 617700
# ```
# #### CRF
#
# ```text
# precision recall f1-score support
#
# quantity 0.991 0.991 0.991 13891
# location 0.987 0.989 0.988 20798
# time 0.987 0.977 0.982 13264
# person 0.993 0.987 0.990 43590
# organization 0.974 0.973 0.973 25426
# event 0.995 0.983 0.989 2417
# law 0.994 0.988 0.991 1686
#
# avg / total 0.987 0.983 0.985 121072
# ```
# #### Attention
#
# ```text
# precision recall f1-score support
#
# OTHER 1.00 1.00 1.00 497073
# event 0.99 0.97 0.98 2426
# law 1.00 0.99 0.99 1806
# location 1.00 1.00 1.00 20176
# organization 1.00 1.00 1.00 26044
# person 1.00 1.00 1.00 44346
# quantity 1.00 1.00 1.00 13155
# time 0.99 1.00 1.00 12674
#
# avg / total 1.00 1.00 1.00 617700
# ```
# ## Language Detection
#
# Trained on 80% of dataset, tested on 20% of dataset. All training sessions stored in [session/language-detection](https://github.com/huseinzol05/Malaya/tree/master/session/language-detection)
display(Image('language-detection-accuracy.png', width=500))
# #### XGB
#
# ```text
# precision recall f1-score support
#
# OTHER 0.98 0.99 0.99 9424
# eng 1.00 0.99 0.99 9972
# ind 1.00 0.99 0.99 11511
# zlm 1.00 1.00 1.00 10679
#
# micro avg 0.99 0.99 0.99 41586
# macro avg 0.99 0.99 0.99 41586
# weighted avg 0.99 0.99 0.99 41586
# ```
# #### Multinomial
#
# ```text
# precision recall f1-score support
#
# OTHER 1.00 0.97 0.99 9424
# eng 0.99 1.00 0.99 9972
# ind 1.00 1.00 1.00 11511
# zlm 0.99 1.00 0.99 10679
#
# micro avg 0.99 0.99 0.99 41586
# macro avg 0.99 0.99 0.99 41586
# weighted avg 0.99 0.99 0.99 41586
# ```
# #### SGD
#
# ```text
# precision recall f1-score support
#
# OTHER 0.97 0.99 0.98 9424
# eng 0.99 0.99 0.99 9972
# ind 1.00 0.99 0.99 11511
# zlm 1.00 1.00 1.00 10679
#
# micro avg 0.99 0.99 0.99 41586
# macro avg 0.99 0.99 0.99 41586
# weighted avg 0.99 0.99 0.99 41586
# ```
# #### Deep learning
#
# ```text
# precision recall f1-score support
#
# other 1.00 0.99 0.99 9445
# english 1.00 1.00 1.00 9987
# indonesian 1.00 1.00 1.00 11518
# malay 1.00 1.00 1.00 10636
#
# micro avg 1.00 1.00 1.00 41586
# macro avg 1.00 1.00 1.00 41586
# weighted avg 1.00 1.00 1.00 41586
# ```
# ## POS Recognition
#
# Trained on 80% of dataset, tested on 20% of dataset. All training sessions stored in [session/pos](https://github.com/huseinzol05/Malaya/tree/master/session/pos)
display(Image('pos-accuracy.png', width=500))
# #### Concat
#
# ```text
# precision recall f1-score support
#
# ADJ 0.99 1.00 1.00 22663
# ADP 1.00 1.00 1.00 60199
# ADV 1.00 1.00 1.00 23633
# AUX 1.00 1.00 1.00 5249
# CCONJ 1.00 1.00 1.00 18485
# DET 1.00 1.00 1.00 19849
# NOUN 1.00 1.00 1.00 135031
# NUM 1.00 1.00 1.00 21842
# PART 1.00 1.00 1.00 2900
# PRON 1.00 1.00 1.00 23908
# PROPN 1.00 1.00 1.00 113206
# SCONJ 1.00 0.99 1.00 7304
# SYM 1.00 1.00 1.00 1205
# VERB 1.00 1.00 1.00 61222
# X 0.97 0.99 0.98 154
#
# avg / total 1.00 1.00 1.00 516850
# ```
# #### Bahdanau
#
# ```text
# precision recall f1-score support
#
# ADJ 0.99 0.99 0.99 22879
# ADP 1.00 1.00 1.00 60063
# ADV 1.00 0.99 1.00 23653
# AUX 1.00 1.00 1.00 5295
# CCONJ 1.00 1.00 1.00 18395
# DET 1.00 1.00 1.00 20088
# NOUN 1.00 1.00 1.00 134736
# NUM 1.00 1.00 1.00 21938
# PART 0.99 1.00 0.99 3093
# PRON 1.00 1.00 1.00 24060
# PROPN 1.00 1.00 1.00 112859
# SCONJ 0.99 0.99 0.99 7445
# SYM 0.99 0.99 0.99 1219
# VERB 1.00 1.00 1.00 60937
# X 0.98 0.97 0.98 190
#
# avg / total 1.00 1.00 1.00 516850
# ```
# #### Luong
#
# ```text
# precision recall f1-score support
#
# ADJ 0.99 0.99 0.99 22649
# ADP 1.00 1.00 1.00 60088
# ADV 0.99 1.00 0.99 24031
# AUX 1.00 1.00 1.00 5279
# CCONJ 1.00 1.00 1.00 18469
# DET 1.00 1.00 1.00 20053
# NOUN 1.00 1.00 1.00 134614
# NUM 1.00 1.00 1.00 21703
# PART 1.00 0.99 0.99 2956
# PRON 1.00 1.00 1.00 23786
# PROPN 1.00 1.00 1.00 113689
# SCONJ 0.99 0.99 0.99 7315
# SYM 1.00 1.00 1.00 1189
# VERB 1.00 1.00 1.00 60827
# X 0.97 0.99 0.98 202
#
# avg / total 1.00 1.00 1.00 516850
# ```
# #### CRF
#
# ```text
# precision recall f1-score support
#
# PRON 0.998 0.997 0.998 47911
# DET 0.990 0.993 0.991 39932
# NOUN 0.988 0.988 0.988 270045
# VERB 0.997 0.997 0.997 122015
# PROPN 0.989 0.988 0.988 225893
# ADP 0.997 0.998 0.997 120358
# ADV 0.992 0.991 0.991 47753
# CCONJ 0.997 0.998 0.997 36696
# NUM 0.993 0.995 0.994 43748
# ADJ 0.985 0.988 0.986 45244
# PART 0.992 0.995 0.993 5975
# AUX 1.000 1.000 1.000 10505
# SCONJ 0.994 0.987 0.990 14798
# SYM 0.998 0.997 0.998 2483
#
# micro avg 0.992 0.992 0.992 1033356
# macro avg 0.994 0.994 0.994 1033356
# weighted avg 0.992 0.992 0.992 1033356
# ```
# #### Entity-network
#
# ```text
# precision recall f1-score support
#
# ADJ 0.98 0.98 0.98 22626
# ADP 0.99 0.99 0.99 60045
# ADV 0.97 0.98 0.98 23537
# AUX 0.99 0.99 0.99 5195
# CCONJ 0.99 0.99 0.99 18357
# DET 0.99 0.99 0.99 19762
# NOUN 0.99 0.99 0.99 134505
# NUM 0.99 0.99 0.99 22083
# PART 0.97 0.97 0.97 2924
# PRON 0.99 0.99 0.99 23783
# PROPN 0.99 0.99 0.99 114144
# SCONJ 0.96 0.95 0.95 7534
# SYM 0.97 0.98 0.97 1335
# VERB 0.99 0.99 0.99 60834
# X 0.93 0.68 0.79 186
#
# avg / total 0.99 0.99 0.99 516850
# ```
# ## Sentiment Analysis
#
# Trained on 80% of dataset, tested on 20% of dataset. All training sessions stored in [session/sentiment](https://github.com/huseinzol05/Malaya/tree/master/session/sentiment)
display(Image('sentiment-accuracy.png', width=500))
# #### Bahdanau
#
# ```text
# precision recall f1-score support
#
# negative 0.79 0.82 0.80 70381
# positive 0.79 0.76 0.78 64624
#
# avg / total 0.79 0.79 0.79 135005
# ```
# #### Fast-text-char
#
# ```text
# precision recall f1-score support
#
# negative 0.78 0.80 0.79 70256
# positive 0.77 0.75 0.76 63766
#
# avg / total 0.77 0.77 0.77 134022
# ```
# #### Luong
#
# ```text
# precision recall f1-score support
#
# negative 0.79 0.80 0.80 70329
# positive 0.78 0.77 0.78 64676
#
# avg / total 0.79 0.79 0.79 135005
# ```
# #### Multinomial
#
# ```text
# precision recall f1-score support
#
# negative 0.78 0.84 0.81 70720
# positive 0.80 0.74 0.77 64129
#
# micro avg 0.79 0.79 0.79 134849
# macro avg 0.79 0.79 0.79 134849
# weighted avg 0.79 0.79 0.79 134849
# ```
# #### Self-Attention
#
# ```text
# precision recall f1-score support
#
# negative 0.77 0.82 0.80 70708
# positive 0.79 0.74 0.76 64297
#
# avg / total 0.78 0.78 0.78 135005
# ```
# #### XGB
#
# ```text
# precision recall f1-score support
#
# negative 0.81 0.80 0.81 70356
# positive 0.79 0.80 0.79 64493
#
# micro avg 0.80 0.80 0.80 134849
# macro avg 0.80 0.80 0.80 134849
# weighted avg 0.80 0.80 0.80 134849
# ```
# #### BERT
#
# ```text
# precision recall f1-score support
#
# negative 0.85 0.79 0.82 69590
# positive 0.79 0.85 0.82 63296
#
# avg / total 0.82 0.82 0.82 132886
# ```
# ## Toxicity Analysis
#
# Trained on 80% of dataset, tested on 20% of dataset. All training sessions stored in [session/toxic](https://github.com/huseinzol05/Malaya/tree/master/session/toxic)
#
# Labels are,
# ```python
# {0: 'toxic', 1: 'severe_toxic', 2: 'obscene', 3: 'threat', 4: 'insult', 5: 'identity_hate'}
# ```
display(Image('toxic-accuracy.png', width=500))
# #### Bahdanau
#
# ```text
# precision recall f1-score support
#
# toxic 0.77 0.67 0.72 3650
# severe_toxic 0.45 0.40 0.43 395
# obscene 0.82 0.65 0.73 1985
# threat 0.54 0.30 0.38 142
# insult 0.71 0.62 0.66 1856
# identity_hate 0.65 0.35 0.45 357
#
# avg / total 0.75 0.62 0.68 8385
# ```
# #### Fast-text-char
#
# ```text
# precision recall f1-score support
#
# toxic 0.83 0.56 0.67 3654
# severe_toxic 0.47 0.22 0.30 387
# obscene 0.84 0.54 0.65 1985
# threat 0.48 0.17 0.25 120
# insult 0.73 0.44 0.55 1862
# identity_hate 0.44 0.18 0.26 326
#
# avg / total 0.77 0.49 0.60 8334
# ```
# #### Logistic Regression
#
# ```text
# precision recall f1-score support
#
# 0 0.98 0.27 0.43 805
# 1 0.50 0.02 0.04 88
# 2 0.99 0.30 0.46 460
# 3 0.00 0.00 0.00 32
# 4 0.87 0.22 0.35 420
# 5 0.00 0.00 0.00 68
#
# avg / total 0.88 0.24 0.38 1873
# ```
# #### Multinomial
#
# ```text
# precision recall f1-score support
#
# 0 0.81 0.52 0.63 805
# 1 0.44 0.35 0.39 88
# 2 0.76 0.49 0.59 460
# 3 0.00 0.00 0.00 32
# 4 0.68 0.47 0.56 420
# 5 0.15 0.09 0.11 68
#
# avg / total 0.71 0.47 0.56 1873
# ```
# #### Luong
#
# ```text
# precision recall f1-score support
#
# toxic 0.77 0.70 0.74 3678
# severe_toxic 0.58 0.14 0.23 430
# obscene 0.80 0.66 0.72 2014
# threat 0.53 0.21 0.30 127
# insult 0.72 0.60 0.65 1905
# identity_hate 0.67 0.27 0.38 338
#
# avg / total 0.75 0.62 0.67 8492
# ```
# #### Self-Attention
#
# ```text
# precision recall f1-score support
#
# toxic 0.80 0.53 0.64 3806
# severe_toxic 0.55 0.17 0.26 417
# obscene 0.80 0.55 0.65 2106
# threat 0.43 0.02 0.05 122
# insult 0.73 0.46 0.56 1989
# identity_hate 0.54 0.12 0.20 343
#
# avg / total 0.76 0.48 0.58 8783
# ```
# #### BERT
#
# ```text
# precision recall f1-score support
#
# toxic 0.81 0.71 0.76 3744
# severe_toxic 0.55 0.17 0.26 413
# obscene 0.84 0.69 0.76 2101
# threat 0.68 0.43 0.53 120
# insult 0.78 0.62 0.69 1964
# identity_hate 0.71 0.41 0.52 359
#
# avg / total 0.79 0.64 0.71 8701
# ```
# ## Subjectivity Analysis
#
# Trained on 80% of dataset, tested on 20% of dataset. All training sessions stored in [session/subjectivity](https://github.com/huseinzol05/Malaya/tree/master/session/subjectivity)
display(Image('subjectivity-accuracy.png', width=500))
# #### Bahdanau
#
# ```text
# precision recall f1-score support
#
# negative 0.90 0.68 0.77 975
# positive 0.75 0.93 0.83 1018
#
# avg / total 0.82 0.81 0.80 1993
# ```
# #### Fast-text-char
#
# ```text
# precision recall f1-score support
#
# negative 0.89 0.88 0.88 992
# positive 0.88 0.90 0.89 1001
#
# avg / total 0.89 0.89 0.89 1993
# ```
# #### Luong
#
# ```text
# precision recall f1-score support
#
# negative 0.79 0.84 0.82 998
# positive 0.83 0.78 0.80 995
#
# avg / total 0.81 0.81 0.81 1993
# ```
# #### Multinomial
#
# ```text
# precision recall f1-score support
#
# negative 0.91 0.85 0.88 999
# positive 0.86 0.92 0.89 994
#
# micro avg 0.89 0.89 0.89 1993
# macro avg 0.89 0.89 0.89 1993
# weighted avg 0.89 0.89 0.89 1993
# ```
# #### Self-Attention
#
# ```text
# precision recall f1-score support
#
# negative 0.84 0.70 0.76 1023
# positive 0.73 0.86 0.79 970
#
# avg / total 0.79 0.78 0.77 1993
#
# ```
# #### Xgboost
#
# ```text
#
# precision recall f1-score support
#
# negative 0.86 0.85 0.85 1003
# positive 0.85 0.86 0.85 990
#
# micro avg 0.85 0.85 0.85 1993
# macro avg 0.85 0.85 0.85 1993
# weighted avg 0.85 0.85 0.85 1993
# ```
# #### BERT
#
# ```text
# precision recall f1-score support
#
# negative 0.94 0.91 0.92 983
# positive 0.91 0.94 0.93 1010
#
# avg / total 0.93 0.93 0.93 1993
# ```
# ## Emotion Analysis
#
# Trained on 80% of dataset, tested on 20% of dataset. All training sessions stored in [session/emotion](https://github.com/huseinzol05/Malaya/tree/master/session/emotion)
display(Image('emotion-accuracy.png', width=500))
# #### Bahdanau
#
# ```text
# precision recall f1-score support
#
# anger 0.91 0.92 0.92 14943
# fear 0.87 0.86 0.87 7630
# joy 0.94 0.89 0.92 16570
# love 0.94 0.92 0.93 15729
# sadness 0.73 0.91 0.81 19849
# surprise 0.77 0.47 0.58 9383
#
# avg / total 0.86 0.86 0.85 84104
# ```
# #### Fast-text-char
#
# ```text
# precision recall f1-score support
#
# anger 0.89 0.90 0.90 15061
# fear 0.83 0.83 0.83 7552
# joy 0.87 0.89 0.88 16575
# love 0.89 0.90 0.90 15635
# sadness 0.72 0.78 0.75 19640
# surprise 0.67 0.53 0.59 9592
#
# avg / total 0.82 0.82 0.82 84055
#
# ```
# #### Luong
#
# ```text
# precision recall f1-score support
#
# anger 0.93 0.90 0.91 14883
# fear 0.89 0.83 0.86 7680
# joy 0.93 0.89 0.91 16640
# love 0.91 0.94 0.92 15621
# sadness 0.78 0.79 0.78 19766
# surprise 0.66 0.72 0.69 9514
#
# avg / total 0.86 0.85 0.85 84104
# ```
# #### Multinomial
#
# ```text
# precision recall f1-score support
#
# anger 0.84 0.83 0.83 14746
# fear 0.83 0.44 0.58 7661
# joy 0.74 0.87 0.80 16560
# love 0.87 0.79 0.83 15829
# sadness 0.61 0.86 0.71 19839
# surprise 0.77 0.27 0.39 9467
#
# avg / total 0.76 0.74 0.72 84102
# ```
# #### Self-attention
#
# ```text
# precision recall f1-score support
#
# anger 0.90 0.90 0.90 14869
# fear 0.83 0.85 0.84 7682
# joy 0.87 0.90 0.89 16658
# love 0.92 0.90 0.91 15767
# sadness 0.77 0.74 0.76 19866
# surprise 0.64 0.67 0.66 9262
#
# avg / total 0.83 0.83 0.83 84104
# ```
# #### Xgboost
#
# ```text
# precision recall f1-score support
#
# anger 0.91 0.90 0.91 14898
# fear 0.86 0.84 0.85 7589
# joy 0.89 0.91 0.90 16554
# love 0.91 0.92 0.91 15694
# sadness 0.73 0.73 0.73 19869
# surprise 0.57 0.57 0.57 9498
#
# avg / total 0.82 0.82 0.82 84102
# ```
# #### BERT
#
# ```text
# precision recall f1-score support
#
# anger 0.94 0.92 0.93 14790
# fear 0.88 0.87 0.87 7527
# joy 0.92 0.93 0.92 16669
# love 0.96 0.91 0.94 15651
# sadness 0.74 0.95 0.83 20004
# surprise 0.86 0.44 0.58 9463
#
# avg / total 0.88 0.87 0.86 84104
#
# ```
# ## Similarity
#
# Trained on 80% of dataset, tested on 20% of dataset. All training sessions stored in [session/similarity](https://github.com/huseinzol05/Malaya/tree/master/session/similarity)
display(Image('similarity-accuracy.png', width=500))
# #### bahdanau
#
# ```text
# precision recall f1-score support
#
# not similar 0.83 0.83 0.83 31524
# similar 0.71 0.71 0.71 18476
#
# avg / total 0.79 0.79 0.79 50000
# ```
# #### self-attention
#
# ```text
# precision recall f1-score support
#
# not similar 0.81 0.83 0.82 31524
# similar 0.70 0.67 0.68 18476
#
# avg / total 0.77 0.77 0.77 50000
# ```
# #### dilated-cnn
#
# ```text
# precision recall f1-score support
#
# not similar 0.82 0.82 0.82 31524
# similar 0.69 0.69 0.69 18476
#
# avg / total 0.77 0.77 0.77 50000
# ```
# #### bert
#
# ```text
# precision recall f1-score support
#
# not similar 0.86 0.86 0.86 50757
# similar 0.77 0.76 0.76 30010
#
# avg / total 0.83 0.83 0.83 80767
# ```
# ## Dependency parsing
#
# Trained on 90% of dataset, tested on 10% of dataset. All training sessions stored in [session/dependency](https://github.com/huseinzol05/Malaya/tree/master/session/dependency)
display(Image('dependency-accuracy.png', width=500))
# #### Bahdanau
#
# ```text
# precision recall f1-score support
#
# PAD 1.0000 1.0000 1.0000 843055
# acl 0.9406 0.9296 0.9351 2983
# advcl 0.8924 0.8613 0.8766 1175
# advmod 0.9549 0.9482 0.9515 4712
# amod 0.9296 0.9100 0.9197 4135
# appos 0.9312 0.9570 0.9439 2488
# aux 1.0000 1.0000 1.0000 5
# case 0.9809 0.9823 0.9816 10557
# cc 0.9676 0.9795 0.9735 3170
# ccomp 0.8598 0.8045 0.8312 404
# compound 0.9201 0.9464 0.9331 6605
# compound:plur 0.9597 0.9630 0.9613 594
# conj 0.9600 0.9572 0.9586 4158
# cop 0.9670 0.9720 0.9695 966
# csubj 0.8929 0.8333 0.8621 30
# csubj:pass 0.8000 0.6667 0.7273 12
# dep 0.8189 0.9259 0.8691 459
# det 0.9558 0.9369 0.9463 4041
# fixed 0.9337 0.8953 0.9141 535
# flat 0.9724 0.9714 0.9719 10479
# iobj 0.9286 0.7222 0.8125 18
# mark 0.9210 0.9491 0.9349 1376
# nmod 0.9355 0.9324 0.9340 3921
# nsubj 0.9430 0.9538 0.9484 6345
# nsubj:pass 0.9458 0.9053 0.9251 1985
# nummod 0.9762 0.9787 0.9775 3854
# obj 0.9495 0.9465 0.9480 5162
# obl 0.9458 0.9543 0.9500 5599
# parataxis 0.9268 0.8283 0.8748 367
# punct 0.9978 0.9968 0.9973 16549
# root 0.9743 0.9643 0.9693 5037
# xcomp 0.8878 0.9039 0.8958 1217
#
# avg / total 0.9953 0.9953 0.9953 951993
#
# precision recall f1-score support
#
# 0 1.0000 1.0000 1.0000 843055
# 1 0.9718 0.9633 0.9675 5037
# 2 0.9604 0.9459 0.9531 4285
# 3 0.9474 0.9557 0.9515 4971
# 4 0.9575 0.9647 0.9611 6594
# 5 0.9534 0.9665 0.9599 5880
# 6 0.9648 0.9632 0.9640 6037
# 7 0.9512 0.9654 0.9582 5548
# 8 0.9611 0.9623 0.9617 5542
# 9 0.9729 0.9498 0.9612 4877
# 10 0.9614 0.9621 0.9617 4559
# 11 0.9495 0.9588 0.9541 4316
# 12 0.9547 0.9573 0.9560 3698
# 13 0.9664 0.9506 0.9584 3600
# 14 0.9652 0.9590 0.9621 3294
# 15 0.9619 0.9541 0.9580 3179
# 16 0.9604 0.9573 0.9589 3117
# 17 0.9634 0.9587 0.9610 2831
# 18 0.9406 0.9594 0.9499 2392
# 19 0.9657 0.9582 0.9619 2176
# 20 0.9656 0.9615 0.9635 2102
# 21 0.9523 0.9577 0.9550 1960
# 22 0.9519 0.9586 0.9552 1859
# 23 0.9605 0.9555 0.9580 1732
# 24 0.9649 0.9474 0.9561 1540
# 25 0.9399 0.9503 0.9451 1349
# 26 0.9680 0.9333 0.9503 1199
# 27 0.9246 0.9604 0.9422 1111
# 28 0.9491 0.9561 0.9526 956
# 29 0.9578 0.9646 0.9612 989
# 30 0.9365 0.9513 0.9438 1007
# 31 0.9483 0.9592 0.9537 784
# 32 0.9352 0.9545 0.9448 726
# 33 0.9468 0.9290 0.9378 690
# 34 0.9575 0.9464 0.9519 690
# 35 0.9480 0.9231 0.9354 533
# 36 0.9532 0.9432 0.9481 475
# 37 0.9511 0.9340 0.9425 500
# 38 0.9455 0.9139 0.9294 418
# 39 0.9326 0.9708 0.9513 342
# 40 0.9361 0.9338 0.9350 408
# 41 0.9260 0.9602 0.9428 352
# 42 0.9649 0.9615 0.9632 286
# 43 0.9418 0.9487 0.9453 273
# 44 0.9125 0.9389 0.9255 311
# 45 0.9406 0.9556 0.9480 315
# 46 0.9703 0.9655 0.9679 203
# 47 0.9662 0.9542 0.9602 240
# 48 0.9065 0.9065 0.9065 214
# 49 0.9455 0.9720 0.9585 214
# 50 0.9315 0.9189 0.9252 148
# 51 0.9356 0.9265 0.9310 204
# 52 0.9257 0.9580 0.9416 143
# 53 0.9496 0.9231 0.9362 143
# 54 0.9381 0.9430 0.9406 193
# 55 0.9837 0.9237 0.9528 131
# 56 0.8532 0.9688 0.9073 96
# 57 0.9604 0.9510 0.9557 102
# 58 0.9633 0.9459 0.9545 111
# 59 0.9870 0.8837 0.9325 86
# 60 1.0000 0.9559 0.9774 68
# 61 0.9429 0.9519 0.9474 104
# 62 0.9726 0.8875 0.9281 80
# 63 0.9459 0.9589 0.9524 73
# 64 0.9385 0.9531 0.9457 64
# 65 1.0000 0.8833 0.9381 60
# 66 0.8676 0.9516 0.9077 62
# 67 0.9020 0.8519 0.8762 54
# 68 0.9683 0.9242 0.9457 66
# 69 0.9474 0.9351 0.9412 77
# 70 0.8406 0.8923 0.8657 65
# 71 0.9474 0.9818 0.9643 55
# 72 0.9722 0.9459 0.9589 37
# 73 0.9796 0.9600 0.9697 50
# 74 0.9630 0.9630 0.9630 27
# 75 0.9750 1.0000 0.9873 39
# 76 0.9655 1.0000 0.9825 28
# 77 0.9655 0.9333 0.9492 30
# 78 1.0000 1.0000 1.0000 24
# 79 0.9677 1.0000 0.9836 30
# 80 0.9608 0.9074 0.9333 54
# 81 0.9167 1.0000 0.9565 11
# 82 0.9074 0.9423 0.9245 52
# 83 0.9259 1.0000 0.9615 25
# 84 0.9677 1.0000 0.9836 30
# 85 1.0000 1.0000 1.0000 14
# 86 1.0000 0.9412 0.9697 34
# 87 1.0000 1.0000 1.0000 22
# 88 1.0000 1.0000 1.0000 8
# 89 1.0000 1.0000 1.0000 14
# 90 1.0000 1.0000 1.0000 18
# 91 0.9677 0.8824 0.9231 34
# 92 0.8182 1.0000 0.9000 9
# 93 0.9444 0.9444 0.9444 18
# 94 1.0000 0.9444 0.9714 18
# 95 0.9259 0.9615 0.9434 26
# 96 1.0000 1.0000 1.0000 8
# 97 1.0000 1.0000 1.0000 2
# 98 1.0000 1.0000 1.0000 16
# 99 0.9697 0.8649 0.9143 37
# 100 1.0000 1.0000 1.0000 2
# 101 1.0000 1.0000 1.0000 44
# 102 1.0000 1.0000 1.0000 15
# 103 0.8889 1.0000 0.9412 8
# 104 0.8269 0.9773 0.8958 44
# 105 1.0000 1.0000 1.0000 6
# 106 1.0000 1.0000 1.0000 7
# 107 1.0000 1.0000 1.0000 10
# 108 0.9412 1.0000 0.9697 32
# 109 1.0000 1.0000 1.0000 13
# 110 1.0000 1.0000 1.0000 9
# 111 1.0000 1.0000 1.0000 1
# 112 1.0000 0.7826 0.8780 23
# 113 1.0000 1.0000 1.0000 16
# 114 0.8333 1.0000 0.9091 5
# 115 1.0000 1.0000 1.0000 1
# 116 0.9130 0.9545 0.9333 22
# 117 1.0000 1.0000 1.0000 5
# 118 0.0000 0.0000 0.0000 0
# 119 1.0000 1.0000 1.0000 3
# 120 1.0000 1.0000 1.0000 15
# 122 1.0000 1.0000 1.0000 8
# 123 1.0000 1.0000 1.0000 4
# 125 1.0000 1.0000 1.0000 10
# 126 1.0000 1.0000 1.0000 2
# 129 1.0000 1.0000 1.0000 8
# 133 1.0000 1.0000 1.0000 4
# 135 1.0000 1.0000 1.0000 3
# 136 1.0000 1.0000 1.0000 2
# 139 1.0000 1.0000 1.0000 1
# 142 1.0000 1.0000 1.0000 2
# 146 1.0000 1.0000 1.0000 1
# 151 1.0000 1.0000 1.0000 1
#
# avg / total 0.9951 0.9951 0.9951 951993
# ```
# #### Luong
#
# ```text
# precision recall f1-score support
#
# PAD 1.0000 1.0000 1.0000 840905
# acl 0.9249 0.9392 0.9320 3094
# advcl 0.8952 0.8478 0.8709 1209
# advmod 0.9629 0.9475 0.9551 4952
# amod 0.9288 0.9246 0.9267 4218
# appos 0.9535 0.9204 0.9367 2426
# aux 1.0000 1.0000 1.0000 1
# case 0.9796 0.9795 0.9796 10991
# cc 0.9686 0.9739 0.9713 3298
# ccomp 0.8426 0.8501 0.8463 447
# compound 0.9170 0.9477 0.9321 6787
# compound:plur 0.9428 0.9744 0.9584 626
# conj 0.9539 0.9581 0.9560 4251
# cop 0.9625 0.9809 0.9716 993
# csubj 0.9655 0.8750 0.9180 32
# csubj:pass 1.0000 0.9167 0.9565 12
# dep 0.8905 0.8320 0.8603 518
# det 0.9503 0.9364 0.9433 4088
# fixed 0.9113 0.8899 0.9005 554
# flat 0.9596 0.9792 0.9693 10272
# iobj 1.0000 0.6000 0.7500 15
# mark 0.9396 0.9217 0.9305 1417
# nmod 0.9086 0.9475 0.9277 4155
# nsubj 0.9524 0.9547 0.9535 6483
# nsubj:pass 0.9402 0.9108 0.9252 1916
# nummod 0.9747 0.9761 0.9754 4022
# obj 0.9559 0.9468 0.9513 5337
# obl 0.9622 0.9242 0.9428 5727
# parataxis 0.8072 0.8910 0.8470 376
# punct 0.9972 0.9984 0.9978 16581
# root 0.9646 0.9688 0.9667 5037
# xcomp 0.9225 0.8364 0.8774 1253
#
# avg / total 0.9950 0.9950 0.9950 951993
#
# precision recall f1-score support
#
# 0 1.0000 1.0000 1.0000 840905
# 1 0.9709 0.9726 0.9717 5037
# 2 0.9310 0.9534 0.9420 4271
# 3 0.9543 0.9485 0.9514 5148
# 4 0.9587 0.9514 0.9551 6220
# 5 0.9471 0.9631 0.9550 5984
# 6 0.9593 0.9585 0.9589 5827
# 7 0.9597 0.9554 0.9576 5789
# 8 0.9657 0.9527 0.9592 5559
# 9 0.9548 0.9517 0.9532 5088
# 10 0.9565 0.9492 0.9528 4427
# 11 0.9458 0.9631 0.9544 4280
# 12 0.9584 0.9540 0.9562 3910
# 13 0.9481 0.9586 0.9533 3791
# 14 0.9385 0.9563 0.9473 3272
# 15 0.9577 0.9389 0.9482 3306
# 16 0.9383 0.9560 0.9471 3023
# 17 0.9629 0.9417 0.9522 2815
# 18 0.9384 0.9548 0.9465 2409
# 19 0.9463 0.9391 0.9427 2103
# 20 0.9349 0.9617 0.9481 2166
# 21 0.9712 0.9354 0.9530 2090
# 22 0.9525 0.9450 0.9487 1763
# 23 0.9512 0.9512 0.9512 1742
# 24 0.9624 0.9475 0.9549 1619
# 25 0.9439 0.9460 0.9449 1333
# 26 0.9584 0.9333 0.9457 1260
# 27 0.9443 0.9231 0.9336 1158
# 28 0.9384 0.9414 0.9399 955
# 29 0.9313 0.9417 0.9365 1080
# 30 0.9332 0.9323 0.9327 1004
# 31 0.9240 0.9404 0.9322 789
# 32 0.9500 0.9226 0.9361 762
# 33 0.9292 0.9502 0.9396 843
# 34 0.9553 0.9468 0.9510 677
# 35 0.9284 0.9396 0.9339 662
# 36 0.9238 0.9287 0.9262 561
# 37 0.9213 0.9152 0.9183 448
# 38 0.8978 0.9114 0.9045 395
# 39 0.8991 0.9114 0.9052 440
# 40 0.9262 0.9446 0.9353 505
# 41 0.9289 0.9098 0.9193 388
# 42 0.9544 0.9181 0.9359 342
# 43 0.9119 0.9308 0.9212 289
# 44 0.9106 0.9006 0.9056 362
# 45 0.8525 0.9091 0.8799 286
# 46 0.9283 0.8859 0.9066 263
# 47 0.9068 0.8924 0.8995 316
# 48 0.9282 0.9095 0.9188 199
# 49 0.9648 0.9202 0.9419 238
# 50 0.9274 0.9583 0.9426 120
# 51 0.9167 0.9585 0.9371 241
# 52 0.9507 0.9415 0.9461 205
# 53 0.9248 0.9179 0.9213 134
# 54 0.9200 0.9306 0.9253 173
# 55 0.9329 0.8910 0.9115 156
# 56 0.9073 0.8954 0.9013 153
# 57 0.9304 0.9469 0.9386 113
# 58 0.9417 0.9576 0.9496 118
# 59 0.8947 0.8500 0.8718 100
# 60 0.9770 0.8095 0.8854 105
# 61 0.8020 0.9576 0.8729 165
# 62 0.8767 0.8889 0.8828 72
# 63 0.9355 0.8365 0.8832 104
# 64 0.8852 0.8308 0.8571 65
# 65 0.9375 0.8955 0.9160 67
# 66 0.8690 0.8588 0.8639 85
# 67 0.9839 0.8472 0.9104 72
# 68 0.9223 0.9500 0.9360 100
# 69 0.9367 0.9250 0.9308 80
# 70 0.8442 0.9701 0.9028 67
# 71 0.8462 0.8462 0.8462 65
# 72 0.9200 0.8734 0.8961 79
# 73 0.8909 0.8596 0.8750 57
# 74 0.9487 0.8810 0.9136 42
# 75 0.9296 0.8919 0.9103 74
# 76 0.8333 0.9677 0.8955 31
# 77 0.8056 0.9062 0.8529 32
# 78 0.8750 0.8077 0.8400 26
# 79 0.7636 0.9333 0.8400 45
# 80 0.9180 0.8889 0.9032 63
# 81 0.7188 0.8214 0.7667 28
# 82 0.8983 0.9298 0.9138 57
# 83 1.0000 0.8571 0.9231 28
# 84 0.8605 0.9487 0.9024 39
# 85 0.9474 0.9474 0.9474 19
# 86 0.8919 0.9706 0.9296 34
# 87 0.9231 0.8571 0.8889 14
# 88 0.9474 0.7826 0.8571 23
# 89 1.0000 0.8571 0.9231 14
# 90 0.8929 0.8621 0.8772 29
# 91 0.8462 0.9429 0.8919 35
# 92 0.9333 0.7568 0.8358 37
# 93 0.7895 0.8333 0.8108 18
# 94 1.0000 0.8000 0.8889 20
# 95 0.9048 0.9500 0.9268 20
# 96 0.9412 0.9412 0.9412 17
# 97 0.9583 1.0000 0.9787 23
# 98 0.9000 1.0000 0.9474 9
# 99 1.0000 0.9643 0.9818 28
# 100 0.8333 1.0000 0.9091 5
# 101 1.0000 0.9231 0.9600 13
# 102 1.0000 1.0000 1.0000 13
# 103 0.8750 1.0000 0.9333 14
# 104 1.0000 0.9231 0.9600 26
# 105 1.0000 0.9167 0.9565 12
# 106 0.9444 0.8500 0.8947 20
# 107 1.0000 0.8571 0.9231 21
# 108 1.0000 1.0000 1.0000 20
# 109 1.0000 1.0000 1.0000 6
# 110 0.8750 1.0000 0.9333 7
# 111 1.0000 1.0000 1.0000 4
# 112 0.9200 0.9583 0.9388 24
# 113 0.8889 1.0000 0.9412 8
# 114 1.0000 0.6667 0.8000 3
# 115 1.0000 1.0000 1.0000 5
# 116 0.9474 0.8571 0.9000 21
# 117 0.6667 1.0000 0.8000 2
# 119 1.0000 1.0000 1.0000 3
# 120 0.8824 0.9375 0.9091 16
# 121 1.0000 0.8000 0.8889 5
# 122 0.8889 1.0000 0.9412 8
# 123 0.0000 0.0000 0.0000 2
# 124 1.0000 0.6667 0.8000 3
# 125 1.0000 1.0000 1.0000 8
# 126 1.0000 0.8000 0.8889 10
# 127 1.0000 1.0000 1.0000 3
# 128 1.0000 1.0000 1.0000 1
# 129 1.0000 1.0000 1.0000 5
# 130 1.0000 0.8333 0.9091 12
# 131 1.0000 1.0000 1.0000 2
# 132 1.0000 1.0000 1.0000 1
# 133 1.0000 1.0000 1.0000 9
# 134 1.0000 1.0000 1.0000 6
# 136 1.0000 1.0000 1.0000 3
# 137 1.0000 1.0000 1.0000 10
# 138 1.0000 1.0000 1.0000 10
# 140 1.0000 1.0000 1.0000 4
# 141 1.0000 1.0000 1.0000 2
# 142 0.4000 0.5000 0.4444 4
# 144 0.5714 1.0000 0.7273 4
# 146 0.7500 0.7500 0.7500 4
# 147 1.0000 0.6000 0.7500 5
# 149 1.0000 1.0000 1.0000 2
# 150 0.6667 0.6667 0.6667 3
# 151 0.5000 0.5000 0.5000 2
# 152 0.5000 0.5000 0.5000 2
# 153 1.0000 0.5000 0.6667 2
# 156 1.0000 1.0000 1.0000 2
# 158 0.8889 1.0000 0.9412 8
# 160 0.8000 1.0000 0.8889 4
# 164 1.0000 1.0000 1.0000 4
#
# avg / total 0.9941 0.9941 0.9941 951993
# ```
# #### Concat
#
# ```text
# precision recall f1-score support
#
# PAD 1.0000 1.0000 1.0000 841717
# acl 0.9501 0.9110 0.9301 2965
# advcl 0.8127 0.8719 0.8413 1249
# advmod 0.9423 0.9329 0.9376 4846
# amod 0.9141 0.9104 0.9123 4208
# appos 0.9282 0.9266 0.9274 2412
# case 0.9757 0.9756 0.9756 10896
# cc 0.9613 0.9726 0.9669 3171
# ccomp 0.8115 0.7094 0.7570 437
# compound 0.9176 0.9350 0.9263 6804
# compound:plur 0.9172 0.9767 0.9460 601
# conj 0.9504 0.9493 0.9498 4119
# cop 0.9621 0.9761 0.9690 962
# csubj 0.8095 0.7083 0.7556 24
# csubj:pass 0.7500 0.6000 0.6667 10
# dep 0.8712 0.8333 0.8519 552
# det 0.9288 0.9339 0.9313 4082
# fixed 0.9229 0.8288 0.8733 549
# flat 0.9619 0.9712 0.9666 10328
# iobj 0.7273 0.8000 0.7619 10
# mark 0.9059 0.9260 0.9159 1487
# nmod 0.9159 0.9318 0.9238 4105
# nsubj 0.9284 0.9550 0.9415 6316
# nsubj:pass 0.9367 0.8999 0.9179 2007
# nummod 0.9743 0.9617 0.9680 4024
# obj 0.9428 0.9340 0.9384 5184
# obl 0.9598 0.9292 0.9442 5776
# parataxis 0.8301 0.7537 0.7900 337
# punct 0.9957 0.9984 0.9971 16529
# root 0.9654 0.9694 0.9674 5037
# xcomp 0.8955 0.8575 0.8761 1249
#
# avg / total 0.9943 0.9943 0.9943 951993
#
# precision recall f1-score support
#
# 0 1.0000 1.0000 1.0000 841717
# 1 0.9638 0.9676 0.9657 5037
# 2 0.9526 0.9295 0.9409 4367
# 3 0.9410 0.9395 0.9403 4942
# 4 0.9544 0.9516 0.9530 6440
# 5 0.9453 0.9514 0.9484 6035
# 6 0.9376 0.9633 0.9503 6024
# 7 0.9456 0.9491 0.9473 5398
# 8 0.9506 0.9438 0.9472 5482
# 9 0.9488 0.9455 0.9472 4977
# 10 0.9331 0.9578 0.9453 4430
# 11 0.9453 0.9468 0.9460 4583
# 12 0.9364 0.9420 0.9392 3673
# 13 0.9495 0.9298 0.9395 3719
# 14 0.9425 0.9343 0.9384 3316
# 15 0.9460 0.9197 0.9327 3065
# 16 0.9125 0.9443 0.9281 3071
# 17 0.9350 0.9228 0.9289 2667
# 18 0.9377 0.9198 0.9286 2469
# 19 0.9167 0.9267 0.9217 2197
# 20 0.9076 0.9286 0.9180 2031
# 21 0.9355 0.8701 0.9016 1917
# 22 0.8985 0.8980 0.8983 1834
# 23 0.9038 0.9011 0.9025 1689
# 24 0.9066 0.8968 0.9017 1667
# 25 0.8782 0.9227 0.8999 1320
# 26 0.8769 0.9204 0.8982 1169
# 27 0.9041 0.9049 0.9045 1094
# 28 0.9054 0.8825 0.8938 987
# 29 0.9352 0.8799 0.9067 1099
# 30 0.8952 0.9110 0.9031 910
# 31 0.8745 0.8951 0.8847 810
# 32 0.8978 0.8772 0.8874 741
# 33 0.8782 0.9206 0.8989 705
# 34 0.9467 0.8692 0.9063 818
# 35 0.8893 0.8745 0.8819 542
# 36 0.9258 0.8794 0.9020 539
# 37 0.8603 0.9259 0.8919 459
# 38 0.9019 0.8458 0.8729 402
# 39 0.8577 0.9035 0.8800 487
# 40 0.8374 0.9071 0.8709 420
# 41 0.9148 0.8496 0.8810 379
# 42 0.8424 0.9393 0.8882 313
# 43 0.8852 0.8415 0.8628 284
# 44 0.9130 0.8571 0.8842 245
# 45 0.8829 0.9009 0.8918 343
# 46 0.8036 0.8654 0.8333 208
# 47 0.8803 0.8834 0.8818 283
# 48 0.9158 0.7699 0.8365 226
# 49 0.9074 0.8376 0.8711 234
# 50 0.7014 0.9136 0.7936 162
# 51 0.8268 0.9080 0.8655 163
# 52 0.8539 0.8889 0.8711 171
# 53 0.9136 0.8457 0.8783 175
# 54 0.8881 0.8581 0.8729 148
# 55 0.9073 0.8354 0.8698 164
# 56 0.8456 0.9200 0.8812 125
# 57 0.9000 0.8250 0.8609 120
# 58 0.9027 0.8430 0.8718 121
# 59 0.7947 0.9231 0.8541 130
# 60 0.7705 0.7833 0.7769 60
# 61 0.9315 0.8774 0.9037 155
# 62 0.8611 0.8493 0.8552 73
# 63 0.8172 0.9048 0.8588 84
# 64 0.8571 0.7273 0.7869 66
# 65 0.9130 0.8750 0.8936 72
# 66 0.7500 0.9398 0.8342 83
# 67 0.8409 0.8315 0.8362 89
# 68 0.9545 0.7590 0.8456 83
# 69 0.8916 0.8810 0.8862 84
# 70 0.7727 0.8644 0.8160 59
# 71 0.8679 0.8846 0.8762 52
# 72 0.8876 0.8404 0.8634 94
# 73 0.9298 0.8833 0.9060 60
# 74 0.9273 0.8226 0.8718 62
# 75 0.9070 0.8298 0.8667 47
# 76 0.7885 0.8723 0.8283 47
# 77 0.8000 0.8780 0.8372 41
# 78 0.8542 1.0000 0.9213 41
# 79 0.8696 0.9091 0.8889 44
# 80 0.9375 0.8571 0.8955 70
# 81 0.8667 0.7222 0.7879 36
# 82 0.8514 0.9130 0.8811 69
# 83 0.9024 0.9250 0.9136 40
# 84 0.9444 1.0000 0.9714 34
# 85 0.9189 0.9444 0.9315 36
# 86 0.8810 0.9487 0.9136 39
# 87 0.9310 0.8710 0.9000 31
# 88 0.8857 1.0000 0.9394 31
# 89 0.9200 0.9200 0.9200 25
# 90 0.8667 0.8125 0.8387 32
# 91 0.8519 0.9200 0.8846 25
# 92 0.8913 0.9535 0.9213 43
# 93 0.8500 0.9444 0.8947 18
# 94 0.9231 0.8571 0.8889 28
# 95 0.7500 0.8571 0.8000 7
# 96 0.9375 0.7143 0.8108 21
# 97 0.9688 0.8158 0.8857 38
# 98 0.9091 0.8696 0.8889 23
# 99 0.8462 1.0000 0.9167 33
# 100 1.0000 0.7778 0.8750 9
# 101 0.9744 0.9744 0.9744 39
# 102 0.8636 0.8636 0.8636 22
# 103 0.9677 0.9677 0.9677 31
# 104 1.0000 1.0000 1.0000 7
# 105 1.0000 0.6471 0.7857 17
# 106 0.9600 1.0000 0.9796 24
# 107 0.9750 1.0000 0.9873 39
# 108 0.8947 1.0000 0.9444 17
# 109 1.0000 1.0000 1.0000 14
# 110 0.9524 1.0000 0.9756 20
# 111 0.9091 0.8333 0.8696 12
# 112 0.9259 0.9259 0.9259 27
# 113 0.8889 1.0000 0.9412 16
# 114 0.8000 0.9231 0.8571 13
# 115 0.8235 1.0000 0.9032 14
# 116 1.0000 0.8095 0.8947 21
# 117 1.0000 0.8571 0.9231 7
# 118 0.7692 0.8333 0.8000 12
# 119 1.0000 1.0000 1.0000 4
# 120 0.9500 1.0000 0.9744 19
# 121 1.0000 1.0000 1.0000 7
# 122 0.8235 0.9333 0.8750 15
# 123 1.0000 1.0000 1.0000 6
# 124 1.0000 0.3333 0.5000 3
# 125 1.0000 0.8889 0.9412 18
# 126 1.0000 0.9667 0.9831 30
# 127 0.8750 1.0000 0.9333 7
# 128 0.8333 0.8333 0.8333 6
# 129 0.9412 0.9412 0.9412 17
# 130 0.9333 1.0000 0.9655 14
# 131 1.0000 1.0000 1.0000 9
# 132 1.0000 1.0000 1.0000 3
# 133 1.0000 1.0000 1.0000 11
# 134 0.9412 1.0000 0.9697 16
# 135 1.0000 1.0000 1.0000 6
# 136 1.0000 0.8000 0.8889 10
# 137 1.0000 0.8000 0.8889 10
# 138 1.0000 1.0000 1.0000 22
# 139 0.0000 0.0000 0.0000 1
# 140 1.0000 1.0000 1.0000 2
# 141 1.0000 1.0000 1.0000 2
# 142 1.0000 1.0000 1.0000 4
# 144 1.0000 1.0000 1.0000 4
# 146 1.0000 1.0000 1.0000 3
# 147 0.8889 1.0000 0.9412 8
# 149 1.0000 1.0000 1.0000 4
# 150 0.7500 1.0000 0.8571 3
# 151 1.0000 1.0000 1.0000 2
# 152 1.0000 1.0000 1.0000 1
# 153 1.0000 1.0000 1.0000 1
# 154 1.0000 1.0000 1.0000 2
# 156 1.0000 0.8333 0.9091 6
# 157 1.0000 1.0000 1.0000 1
# 158 1.0000 1.0000 1.0000 5
# 159 1.0000 1.0000 1.0000 1
# 160 1.0000 1.0000 1.0000 2
# 162 1.0000 1.0000 1.0000 3
# 163 0.0000 0.0000 0.0000 2
# 164 1.0000 1.0000 1.0000 2
# 167 0.6667 1.0000 0.8000 4
# 174 1.0000 1.0000 1.0000 2
# 176 1.0000 0.7500 0.8571 4
# 177 1.0000 1.0000 1.0000 2
# 178 1.0000 1.0000 1.0000 1
# 179 1.0000 1.0000 1.0000 1
# 182 1.0000 1.0000 1.0000 4
# 183 1.0000 1.0000 1.0000 4
#
# avg / total 0.9921 0.9920 0.9920 951993
# ```
# #### Attention is all you need
#
# ```text
# precision recall f1-score support
#
# PAD 1.0000 1.0000 1.0000 841796
# acl 0.8768 0.8849 0.8809 3016
# advcl 0.8290 0.7943 0.8113 1196
# advmod 0.9043 0.9163 0.9102 4754
# amod 0.9121 0.8773 0.8943 4149
# appos 0.8934 0.8983 0.8958 2547
# aux 1.0000 1.0000 1.0000 6
# case 0.9593 0.9670 0.9631 10888
# cc 0.9523 0.9606 0.9564 3198
# ccomp 0.7984 0.7385 0.7673 413
# compound 0.8677 0.8956 0.8815 6679
# compound:plur 0.9073 0.9255 0.9163 550
# conj 0.8625 0.9330 0.8964 4162
# cop 0.9296 0.9679 0.9484 996
# csubj 0.9000 0.4091 0.5625 22
# csubj:pass <PASSWORD> 0.8462 0.8462 13
# dep 0.8274 0.7377 0.7800 507
# det 0.8897 0.9196 0.9044 4094
# fixed 0.8851 0.7966 0.8385 580
# flat 0.9468 0.9198 0.9331 10333
# iobj 1.0000 0.6000 0.7500 20
# mark 0.8535 0.8447 0.8491 1359
# nmod 0.8749 0.8907 0.8827 4107
# nsubj 0.8746 0.8881 0.8813 6471
# nsubj:pass 0.8478 0.7116 0.7738 1949
# nummod 0.9568 0.9524 0.9546 3884
# obj 0.9082 0.8946 0.9013 5274
# obl 0.9203 0.8854 0.9025 5740
# parataxis 0.7980 0.7980 0.7980 391
# punct 0.9933 0.9957 0.9945 16561
# root 0.8974 0.9200 0.9085 5037
# xcomp 0.8580 0.8593 0.8587 1301
#
# avg / total 0.9906 0.9906 0.9906 951993
#
# precision recall f1-score support
#
# 0 1.0000 1.0000 1.0000 841796
# 1 0.9486 0.9277 0.9381 5037
# 2 0.9157 0.9547 0.9348 4325
# 3 0.9505 0.9137 0.9318 4856
# 4 0.9439 0.9311 0.9374 6309
# 5 0.9422 0.9396 0.9409 6540
# 6 0.9314 0.9516 0.9414 5697
# 7 0.9468 0.9461 0.9464 5414
# 8 0.9524 0.9394 0.9458 5559
# 9 0.9432 0.9421 0.9427 5028
# 10 0.9308 0.9544 0.9425 4300
# 11 0.9623 0.9323 0.9471 4358
# 12 0.9449 0.9493 0.9471 3903
# 13 0.9338 0.9442 0.9390 3497
# 14 0.9444 0.9475 0.9459 3445
# 15 0.9445 0.9487 0.9466 3177
# 16 0.9411 0.9589 0.9500 3068
# 17 0.9350 0.9589 0.9468 2774
# 18 0.9527 0.9352 0.9439 2499
# 19 0.9767 0.9207 0.9478 2319
# 20 0.9445 0.9558 0.9501 2013
# 21 0.9321 0.9374 0.9347 2124
# 22 0.9337 0.9423 0.9380 1749
# 23 0.9508 0.9175 0.9339 1685
# 24 0.9608 0.9240 0.9421 1540
# 25 0.8654 0.9661 0.9130 1358
# 26 0.9511 0.9245 0.9376 1179
# 27 0.9416 0.9367 0.9392 1154
# 28 0.8961 0.9549 0.9245 975
# 29 0.9260 0.9383 0.9321 1054
# 30 0.9342 0.9551 0.9445 1025
# 31 0.9482 0.9146 0.9311 761
# 32 0.9549 0.9126 0.9333 835
# 33 0.9235 0.9506 0.9368 749
# 34 0.9492 0.9465 0.9478 710
# 35 0.9323 0.9649 0.9483 599
# 36 0.9750 0.9458 0.9602 535
# 37 0.9363 0.9620 0.9490 474
# 38 0.9099 0.9815 0.9443 432
# 39 0.9462 0.9342 0.9401 395
# 40 0.9170 0.9535 0.9349 452
# 41 0.9446 0.9214 0.9328 407
# 42 0.9452 0.9452 0.9452 292
# 43 0.9731 0.9031 0.9368 320
# 44 0.9030 0.9767 0.9384 343
# 45 0.9343 0.9812 0.9572 319
# 46 0.9943 0.7955 0.8838 220
# 47 0.9420 0.9684 0.9550 285
# 48 0.9160 0.9745 0.9443 235
# 49 0.9113 0.9893 0.9487 187
# 50 0.9568 0.8636 0.9078 154
# 51 0.9706 0.9538 0.9621 173
# 52 0.9554 0.9934 0.9740 151
# 53 0.9116 0.9515 0.9311 206
# 54 0.9008 0.9833 0.9402 120
# 55 0.9371 0.9371 0.9371 159
# 56 0.9179 0.9535 0.9354 129
# 57 0.9091 0.8824 0.8955 102
# 58 0.9350 0.9127 0.9237 126
# 59 0.9725 0.7910 0.8724 134
# 60 0.9576 0.9826 0.9700 115
# 61 0.9200 0.9485 0.9340 97
# 62 0.9200 0.9079 0.9139 76
# 63 0.9551 0.9770 0.9659 87
# 64 0.9878 0.9310 0.9586 87
# 65 0.9103 0.9861 0.9467 72
# 66 0.9474 0.9863 0.9664 73
# 67 1.0000 0.9667 0.9831 60
# 68 0.9855 0.8831 0.9315 77
# 69 0.8889 0.9231 0.9057 52
# 70 0.9524 1.0000 0.9756 80
# 71 0.9241 0.9605 0.9419 76
# 72 0.9870 0.9870 0.9870 77
# 73 0.9531 1.0000 0.9760 61
# 74 1.0000 0.9667 0.9831 30
# 75 0.9412 1.0000 0.9697 64
# 76 1.0000 0.8571 0.9231 28
# 77 0.9487 1.0000 0.9737 37
# 78 0.9677 0.9677 0.9677 31
# 79 1.0000 1.0000 1.0000 25
# 80 1.0000 0.9348 0.9663 46
# 81 1.0000 0.9756 0.9877 41
# 82 1.0000 0.9302 0.9639 43
# 83 0.9474 1.0000 0.9730 18
# 84 0.8846 1.0000 0.9388 23
# 85 0.9583 1.0000 0.9787 23
# 86 1.0000 0.8636 0.9268 44
# 87 1.0000 1.0000 1.0000 10
# 88 0.9412 0.9412 0.9412 17
# 89 1.0000 0.8750 0.9333 8
# 90 0.9167 0.9565 0.9362 23
# 91 1.0000 1.0000 1.0000 15
# 92 1.0000 1.0000 1.0000 34
# 93 0.8571 1.0000 0.9231 6
# 94 0.9231 1.0000 0.9600 12
# 95 1.0000 1.0000 1.0000 9
# 96 1.0000 0.9333 0.9655 15
# 97 1.0000 1.0000 1.0000 30
# 98 1.0000 1.0000 1.0000 8
# 99 1.0000 0.9200 0.9583 25
# 100 0.8571 1.0000 0.9231 6
# 101 1.0000 0.9744 0.9870 39
# 102 1.0000 1.0000 1.0000 7
# 103 0.8889 1.0000 0.9412 16
# 104 1.0000 0.9500 0.9744 20
# 105 1.0000 0.9000 0.9474 10
# 106 0.9500 1.0000 0.9744 19
# 107 0.7500 1.0000 0.8571 27
# 108 1.0000 1.0000 1.0000 15
# 109 1.0000 1.0000 1.0000 3
# 110 1.0000 1.0000 1.0000 14
# 111 1.0000 1.0000 1.0000 9
# 112 0.9474 1.0000 0.9730 18
# 113 0.8571 1.0000 0.9231 6
# 114 1.0000 1.0000 1.0000 10
# 115 1.0000 1.0000 1.0000 7
# 116 1.0000 0.9375 0.9677 16
# 117 1.0000 0.5000 0.6667 2
# 118 1.0000 1.0000 1.0000 12
# 119 1.0000 1.0000 1.0000 4
# 120 1.0000 0.9231 0.9600 13
# 121 1.0000 1.0000 1.0000 6
# 122 1.0000 1.0000 1.0000 3
# 123 1.0000 0.8333 0.9091 6
# 124 1.0000 1.0000 1.0000 2
# 125 1.0000 1.0000 1.0000 2
# 126 0.8846 1.0000 0.9388 23
# 127 1.0000 1.0000 1.0000 6
# 128 1.0000 1.0000 1.0000 5
# 129 1.0000 0.8333 0.9091 6
# 130 1.0000 1.0000 1.0000 12
# 131 1.0000 0.7143 0.8333 7
# 132 1.0000 1.0000 1.0000 2
# 133 1.0000 1.0000 1.0000 4
# 134 0.9000 0.9000 0.9000 10
# 135 0.8571 1.0000 0.9231 6
# 136 1.0000 1.0000 1.0000 7
# 137 1.0000 1.0000 1.0000 8
# 138 1.0000 1.0000 1.0000 12
# 139 1.0000 1.0000 1.0000 1
# 140 1.0000 1.0000 1.0000 2
# 141 1.0000 1.0000 1.0000 2
# 142 1.0000 1.0000 1.0000 4
# 144 1.0000 1.0000 1.0000 4
# 146 1.0000 1.0000 1.0000 3
# 147 1.0000 1.0000 1.0000 7
# 149 1.0000 1.0000 1.0000 2
# 150 1.0000 1.0000 1.0000 2
# 151 1.0000 1.0000 1.0000 2
# 152 1.0000 1.0000 1.0000 1
# 153 1.0000 1.0000 1.0000 1
# 154 1.0000 1.0000 1.0000 2
# 156 1.0000 1.0000 1.0000 6
# 157 1.0000 1.0000 1.0000 1
# 158 1.0000 1.0000 1.0000 5
# 159 1.0000 1.0000 1.0000 1
# 160 1.0000 1.0000 1.0000 2
# 162 0.6667 0.6667 0.6667 3
# 163 0.6667 1.0000 0.8000 2
# 164 1.0000 1.0000 1.0000 2
# 167 1.0000 0.7500 0.8571 4
# 174 1.0000 1.0000 1.0000 2
# 176 1.0000 1.0000 1.0000 4
# 177 1.0000 1.0000 1.0000 2
# 178 1.0000 1.0000 1.0000 1
# 179 1.0000 1.0000 1.0000 1
# 182 1.0000 1.0000 1.0000 4
# 183 1.0000 1.0000 1.0000 4
#
# avg / total 0.9933 0.9932 0.9932 951993
# ```
# #### CRF
#
# ```text
# precision recall f1-score support
#
# case 0.9584 0.9687 0.9635 11014
# obl 0.8045 0.8274 0.8158 5810
# flat 0.9469 0.9551 0.9510 10648
# cc 0.9538 0.9652 0.9595 3336
# conj 0.8684 0.8482 0.8582 4560
# punct 0.9848 0.9963 0.9905 17017
# nsubj:pass 0.8336 0.7640 0.7973 2059
# root 0.7960 0.8453 0.8199 5037
# nummod 0.9334 0.9359 0.9347 4088
# mark 0.8739 0.8865 0.8802 1392
# advcl 0.7649 0.6508 0.7033 1200
# advmod 0.8932 0.8924 0.8928 4769
# nmod 0.7762 0.7355 0.7553 4215
# nsubj 0.8600 0.8835 0.8716 6388
# det 0.9020 0.8868 0.8943 4142
# compound 0.8776 0.8974 0.8874 6869
# amod 0.8677 0.8530 0.8602 4128
# obj 0.8749 0.8765 0.8757 5256
# acl 0.8375 0.8094 0.8232 3075
# xcomp 0.8082 0.8070 0.8076 1264
# parataxis 0.7636 0.6208 0.6848 385
# appos 0.8221 0.8177 0.8199 2425
# cop 0.9350 0.9498 0.9423 1015
# fixed 0.8569 0.8056 0.8305 602
# ccomp 0.7516 0.5576 0.6402 434
# compound:plur 0.9154 0.9498 0.9323 638
# dep 0.7820 0.5275 0.6300 510
# csubj 0.8750 0.8400 0.8571 25
# iobj 0.9375 0.6818 0.7895 22
# csubj:pass 1.0000 0.8000 0.8889 5
# aux 0.5000 0.2500 0.3333 4
#
# avg / total 0.8953 0.8961 0.8953 112332
#
# precision recall f1-score support
#
# 5 0.5452 0.5875 0.5656 5964
# 2 0.6193 0.7164 0.6643 4365
# 1 0.8839 0.9031 0.8934 4942
# 7 0.5181 0.5460 0.5317 5505
# 9 0.5569 0.5504 0.5536 4804
# 12 0.5421 0.5309 0.5364 3760
# 15 0.5556 0.5105 0.5321 3181
# 4 0.5195 0.6219 0.5661 6241
# 6 0.5346 0.5571 0.5456 5942
# 11 0.5350 0.5581 0.5463 4150
# 14 0.5425 0.5109 0.5262 3251
# 8 0.5463 0.5414 0.5438 5395
# 10 0.5705 0.5252 0.5469 4682
# 13 0.5506 0.5199 0.5348 3537
# 3 0.5871 0.6077 0.5972 5068
# 18 0.5613 0.5232 0.5415 2504
# 20 0.5772 0.5315 0.5534 2109
# 23 0.6065 0.5814 0.5937 1689
# 26 0.5820 0.5861 0.5841 1138
# 29 0.6089 0.5874 0.5980 1047
# 32 0.6459 0.6241 0.6348 798
# 35 0.6659 0.5931 0.6274 521
# 36 0.6312 0.6406 0.6359 537
# 40 0.6039 0.6620 0.6316 426
# 17 0.5513 0.5303 0.5406 2674
# 22 0.5889 0.5238 0.5545 1827
# 25 0.5898 0.5967 0.5932 1381
# 27 0.5802 0.5588 0.5693 1088
# 28 0.6101 0.6082 0.6092 970
# 34 0.6011 0.6029 0.6020 695
# 39 0.6711 0.5884 0.6270 430
# 37 0.6675 0.5876 0.6250 468
# 42 0.6975 0.6323 0.6633 310
# 43 0.6504 0.6584 0.6544 243
# 44 0.7205 0.6221 0.6677 344
# 47 0.6667 0.7077 0.6866 260
# 49 0.6903 0.7290 0.7091 214
# 51 0.6829 0.7368 0.7089 190
# 53 0.7483 0.6730 0.7086 159
# 55 0.7143 0.6936 0.7038 173
# 57 0.7093 0.6224 0.6630 98
# 59 0.7652 0.6779 0.7189 149
# 60 0.7253 0.7174 0.7213 92
# 61 0.7658 0.7516 0.7586 161
# 62 0.6500 0.5571 0.6000 70
# 63 0.7257 0.7736 0.7489 106
# 64 0.8730 0.7971 0.8333 69
# 65 0.8533 0.6667 0.7485 96
# 66 0.7097 0.8354 0.7674 79
# 67 0.5965 0.6415 0.6182 53
# 72 0.9362 0.6769 0.7857 65
# 70 0.9024 0.5968 0.7184 62
# 75 0.9348 0.7414 0.8269 58
# 77 0.7838 0.8286 0.8056 35
# 78 0.8750 0.8750 0.8750 16
# 80 0.7200 0.8000 0.7579 45
# 82 0.7027 0.7222 0.7123 36
# 83 0.6923 0.7200 0.7059 25
# 84 0.7407 0.5000 0.5970 40
# 85 0.6923 0.8571 0.7660 21
# 86 0.9091 0.6061 0.7273 33
# 87 0.5833 0.5000 0.5385 28
# 88 0.8333 0.4412 0.5769 34
# 89 0.7619 0.9412 0.8421 17
# 90 0.9143 0.7805 0.8421 41
# 91 0.6923 0.8182 0.7500 22
# 92 1.0000 0.8519 0.9200 27
# 93 1.0000 0.7273 0.8421 22
# 94 0.9333 0.6364 0.7568 22
# 95 1.0000 0.6250 0.7692 16
# 96 0.8000 0.6857 0.7385 35
# 97 0.9500 0.8261 0.8837 23
# 100 1.0000 0.6667 0.8000 6
# 103 1.0000 0.7857 0.8800 14
# 104 1.0000 0.6000 0.7500 15
# 101 0.9574 0.8491 0.9000 53
# 107 0.8846 0.8214 0.8519 28
# 112 0.8000 0.8000 0.8000 10
# 115 1.0000 0.5556 0.7143 9
# 120 0.6667 0.6667 0.6667 6
# 122 0.5556 0.7143 0.6250 7
# 124 1.0000 0.2857 0.4444 7
# 125 0.2857 0.4000 0.3333 5
# 126 0.5455 0.3529 0.4286 17
# 127 0.6667 1.0000 0.8000 4
# 128 1.0000 0.3500 0.5185 20
# 129 0.7000 0.7778 0.7368 9
# 130 0.8667 0.9286 0.8966 14
# 132 1.0000 0.7143 0.8333 7
# 133 0.5714 1.0000 0.7273 4
# 134 1.0000 1.0000 1.0000 2
# 138 0.9091 0.7692 0.8333 13
# 147 1.0000 0.5789 0.7333 19
# 149 0.6667 1.0000 0.8000 2
# 150 0.6667 1.0000 0.8000 2
# 21 0.5698 0.5552 0.5624 1940
# 24 0.5665 0.5503 0.5583 1501
# 30 0.6101 0.5757 0.5924 905
# 19 0.5310 0.5163 0.5236 2142
# 16 0.5312 0.5369 0.5340 2917
# 38 0.6839 0.6005 0.6395 418
# 33 0.6255 0.5951 0.6100 741
# 41 0.6913 0.6341 0.6614 399
# 0 0.4083 0.0926 0.1510 529
# 31 0.5924 0.5625 0.5771 752
# 48 0.6432 0.6010 0.6214 198
# 50 0.7320 0.6222 0.6727 180
# 52 0.6685 0.6538 0.6611 182
# 54 0.7024 0.6705 0.6860 176
# 68 0.7791 0.6262 0.6943 107
# 79 0.9020 0.8214 0.8598 56
# 46 0.8037 0.6187 0.6992 278
# 56 0.7721 0.7095 0.7394 148
# 98 0.8000 0.5926 0.6809 27
# 45 0.6513 0.6804 0.6655 291
# 73 0.8261 0.7451 0.7835 51
# 105 0.8571 0.7500 0.8000 8
# 108 0.9091 0.8333 0.8696 12
# 110 0.8462 0.7857 0.8148 14
# 114 0.7778 0.4375 0.5600 16
# 123 0.7500 0.5000 0.6000 6
# 135 1.0000 0.5625 0.7200 16
# 139 0.0000 0.0000 0.0000 1
# 142 1.0000 0.7500 0.8571 4
# 146 1.0000 1.0000 1.0000 3
# 151 1.0000 0.5000 0.6667 2
# 76 0.8400 0.7000 0.7636 30
# 58 0.6838 0.7207 0.7018 111
# 69 0.6824 0.7838 0.7296 74
# 74 0.8605 0.8043 0.8315 46
# 71 0.8077 0.7778 0.7925 81
# 109 0.8889 0.7273 0.8000 11
# 99 0.8889 0.6667 0.7619 12
# 117 1.0000 0.1429 0.2500 7
# 116 0.6000 0.6667 0.6316 9
# 113 0.5833 0.2917 0.3889 24
# 121 0.7500 0.5000 0.6000 6
# 131 0.8333 1.0000 0.9091 5
# 137 1.0000 0.7500 0.8571 4
# 81 0.9375 0.6522 0.7692 46
# 118 0.5000 0.5000 0.5000 6
# 111 0.6000 0.6000 0.6000 5
# 102 1.0000 0.7143 0.8333 7
# 106 1.0000 0.7727 0.8718 22
# 136 0.7778 0.2800 0.4118 25
# 140 1.0000 0.5000 0.6667 2
# 141 0.0000 0.0000 0.0000 5
# 144 1.0000 0.3000 0.4615 10
# 152 0.0000 0.0000 0.0000 1
# 153 0.0000 0.0000 0.0000 1
# 158 1.0000 0.6250 0.7692 8
# 156 0.9091 0.4762 0.6250 21
# 160 1.0000 1.0000 1.0000 2
# 164 1.0000 1.0000 1.0000 2
# 143 0.0000 0.0000 0.0000 0
# 155 0.0000 0.0000 0.0000 0
# 157 0.5000 0.2500 0.3333 4
# 161 0.0000 0.0000 0.0000 0
# 162 1.0000 0.2500 0.4000 12
# 166 0.0000 0.0000 0.0000 0
# 175 0.0000 0.0000 0.0000 0
# 173 0.0000 0.0000 0.0000 0
# 176 1.0000 1.0000 1.0000 16
# 177 1.0000 1.0000 1.0000 8
# 178 1.0000 1.0000 1.0000 4
# 181 0.0000 0.0000 0.0000 0
# 182 1.0000 1.0000 1.0000 16
# 119 1.0000 0.7143 0.8333 21
# 148 0.0000 0.0000 0.0000 0
# 154 1.0000 0.7500 0.8571 8
# 159 1.0000 0.2500 0.4000 4
# 163 0.0000 0.0000 0.0000 8
# 167 1.0000 1.0000 1.0000 16
# 174 1.0000 1.0000 1.0000 8
# 179 1.0000 1.0000 1.0000 4
# 183 1.0000 1.0000 1.0000 16
# 145 0.0000 0.0000 0.0000 0
#
# avg / total 0.5859 0.5847 0.5836 109699
# ```
|
accuracy/models-accuracy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# convert full output from the model into images suitable for cnn learing
import os
import glob
import numpy as np
data_files = glob.glob("/Users/sandesh/datadump/*100*.npy")
N_v = 100
def proc_state(dat,N_v):
states = []
for ele in dat:
if ele['output']['state'] == 'ShortCircuit':
states += [-1]
elif ele['output']['state'] == 'QPC':
states += [0]
elif ele['output']['state'] == 'Dot':
states += [int(ele['output']['num_dot'])]
# invalid NoDot state
else:
states += [-2]
return np.array(states).reshape((N_v,N_v))
for file in data_files:
dat = np.load(file)
current_map = np.array([x['output']['current'] for x in dat]).reshape((N_v,N_v))
state_map = proc_state(dat,N_v)
net_charge_map = np.array([np.sum(x['output']['charge_state']) for x in dat]).reshape((N_v,N_v))
maps = {'current_map' : current_map,'state_map' : state_map,'net_charge_map' : net_charge_map}
np.save(os.path.expanduser('~/quantum-ml/dataproc_cnn/' + os.path.basename(file)),maps)
# +
import numpy as np
dat1 = np.load(data_files[0])
import matplotlib.pyplot as plt
# %matplotlib inline
N_v = 100
current_map = np.array([x['output']['current'] for x in dat1]).reshape((N_v,N_v))
state_map = proc_state(dat1,N_v)
net_charge_map = np.array([np.sum(x['output']['charge_state']) for x in dat1]).reshape((N_v,N_v))
plt.pcolor(net_charge_map)
# -
|
machine_learning/cnn/data_processing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install confluent_kafka
from confluent_kafka.admin import AdminClient, NewTopic, NewPartitions
from confluent_kafka import KafkaException
import sys
from uuid import uuid4
bootstrap_server = "kafka:9092" # Brokers act as cluster entripoints
conf = {'bootstrap.servers': bootstrap_server}
a = AdminClient(conf)
md = a.list_topics(timeout=10)
print(" {} topics:".format(len(md.topics)))
for t in iter(md.topics.values()):
if t.error is not None:
errstr = ": {}".format(t.error)
else:
errstr = ""
##if not (str(t)).startswith("_"):
print(" \"{}\" with {} partition(s){}".format(t, len(t.partitions), errstr))
# +
#fs = a.create_topics([NewTopic("test1p", num_partitions=1, replication_factor=1)])
#for topic, f in fs.items():
# try:
# f.result() # The result itself is None
# print("Topic {} created".format(topic))
# except Exception as e:
# print("Failed to create topic {}: {}".format(topic, e))
# +
from confluent_kafka import SerializingProducer
from confluent_kafka.serialization import *
import time
topic = "SmokeSensorEvent"
def delivery_report(err, msg):
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
# +
producer_conf = {
'bootstrap.servers': bootstrap_server,
'key.serializer': StringSerializer('utf_8'),
'value.serializer': StringSerializer('utf_8')
}
producer = SerializingProducer(producer_conf)
# -
# ## run the following cell to demostrate that fire is not detected
# +
import json
while True:
key = "S1"
value = {"sensor": "S1","smoke": False,"ts":int(time.time())}
producer.produce(topic=topic, value=json.dumps(value), key=key, on_delivery=delivery_report)
print(value)
producer.poll(1)
time.sleep(10)
# -
# ## run the following cell to demostrate to detect fire
# +
while True:
key = "S1"
value = {"sensor": "S1","smoke": True,"ts":int(time.time())}
producer.produce(topic=topic, value=json.dumps(value), key=key, on_delivery=delivery_report)
print(value)
producer.poll(1)
time.sleep(10)
# -
|
sss_firealarm/datagen/smoke_sensor_simulator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.6 64-bit
# language: python
# name: python38664bit6b21dd3ac91e4180a9157b2041b747d8
# ---
# +
import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def DarkChannel(im,sz):
b,g,r = cv2.split(im)
dc = cv2.min(cv2.min(r,g),b)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(sz,sz))
dark = cv2.erode(dc,kernel)
return dark
def AtmLight(im,dark):
[h,w] = im.shape[:2]
imsz = h*w
numpx = int(max(math.floor(imsz/1000),1))
darkvec = dark.reshape(imsz,1)
imvec = im.reshape(imsz,3)
indices = darkvec.argsort()
indices = indices[imsz-numpx::]
atmsum = np.zeros([1,3])
for ind in range(1,numpx):
atmsum = atmsum + imvec[indices[ind]]
A = atmsum / numpx
return A
def TransmissionEstimate(im,A,sz):
omega = 0.95 # default 0.95
im3 = np.empty(im.shape,im.dtype)
for ind in range(0,3):
im3[:,:,ind] = im[:,:,ind]/A[0,ind]
transmission = 1 - omega*DarkChannel(im3,sz)
return transmission
def Guidedfilter(im,p,r,eps):
mean_I = cv2.boxFilter(im,cv2.CV_64F,(r,r))
mean_p = cv2.boxFilter(p, cv2.CV_64F,(r,r))
mean_Ip = cv2.boxFilter(im*p,cv2.CV_64F,(r,r))
cov_Ip = mean_Ip - mean_I*mean_p
mean_II = cv2.boxFilter(im*im,cv2.CV_64F,(r,r))
var_I = mean_II - mean_I*mean_I
a = cov_Ip/(var_I + eps)
b = mean_p - a*mean_I
mean_a = cv2.boxFilter(a,cv2.CV_64F,(r,r))
mean_b = cv2.boxFilter(b,cv2.CV_64F,(r,r))
q = mean_a*im + mean_b
return q
def TransmissionRefine(im,et):
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
gray = np.float64(gray)/255
r = 60 #default value 60
eps = 0.0001 # default value 0.0001
t = Guidedfilter(gray,et,r,eps)
return t
def Recover(im,t,A,tx = 0.1):
res = np.empty(im.shape,im.dtype)
t = cv2.max(t,tx)
for ind in range(0,3):
res[:,:,ind] = (im[:,:,ind]-A[0,ind])/t + A[0,ind]
return res
if __name__ == '__main__':
# import sys
# try:
# fn = sys.argv[1]
# except:
# fn = './image/15.png'
# def nothing(*argv):
# pass
# Image path
path_input = "Scenes"
path_output = "Testing"
total_image = 10
for idx in range(1, total_image+1):
fn = "./{}/scene ({}).png".format(path_input, idx)
src = cv2.imread(fn)
I = src.astype('float64')/255
dark = DarkChannel(I,2)
A = AtmLight(I,dark)
te = TransmissionEstimate(I,A,15)
t = TransmissionRefine(src,te)
J = Recover(I,t,A,0.1)
# cv2.imshow("dark",dark)
# cv2.imshow("t",t)
# cv2.imshow('src',src)
# cv2.imshow('Recover',J)
cv2.imwrite("./image/dark_channel.png",dark*255)
cv2.imwrite("./image/transmission_refine.png",t*255)
cv2.imwrite("./image/transmission_estimate.png", te*255)
cv2.imwrite("./image/source_image.png", src)
cv2.imwrite("./image/reconstructed_image.png",J*255)
path_image = './image/dark_channel.png'
img = cv2.imread(path_image, 0)*255
# PRINT VALUE IMAGE MATRIX #
# print(img)
# HISTOGRAM #
# plt.hist(img.ravel(), 256, [0,256])
# plt.show()
ret, thresh = cv2.threshold(img, 0 , 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# print("Index", idx)
# print("Return threshold", ret)
# ret, thresh = cv2.threshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)
# ret, thresh = cv2.threshold(img, 0, 255, cv2.THRESH_TRUNC + cv2.THRESH_OTSU)
# thresh = cv2.adaptiveThreshold(img , 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
output = "./{}/output ({}).png".format(path_output, idx)
cv2.imwrite(output, thresh)
# -
|
dehaze.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="X9AC3_xOAzzZ" colab_type="text"
# Mount the drive
# + id="qx1I_DxiCa5l" colab_type="code" outputId="149cc96b-6cf8-406f-bf95-151df5a83ad9" executionInfo={"status": "ok", "timestamp": 1566264988316, "user_tz": -330, "elapsed": 1296, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16758202139208450511"}} colab={"base_uri": "https://localhost:8080/", "height": 54}
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="6-jRuSwAA9Je" colab_type="text"
#
# + [markdown] id="sRx1ifPfA__E" colab_type="text"
# Necessary imports
# + id="vq_DhN27B_2Q" colab_type="code" colab={}
import numpy as np
import cv2
import pandas as pd
import random
import os
# + id="8PvD_N9NCHHb" colab_type="code" colab={}
# !mkdir data
os.chdir('data/')
# + id="WjMfKc6-p-Xj" colab_type="code" colab={}
# ls
# + [markdown] id="09mQo4IsBELN" colab_type="text"
# change the link to the link of your fer2013.csv file link
# + id="wQaxyJg9DNpx" colab_type="code" colab={}
ferlink = '../gdrive/My Drive/privateaiproject/fer2013.csv'
# + id="DPJpy0PdCM_u" colab_type="code" outputId="d0d9b4e6-d398-4162-afb9-69998dbca17b" executionInfo={"status": "ok", "timestamp": 1566264671858, "user_tz": -330, "elapsed": 12608, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16758202139208450511"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
os.getcwd()
# + id="dKA0tREpCpsR" colab_type="code" colab={}
# os.chdir('../')
# + id="1wrOPa3RDD0v" colab_type="code" colab={}
curdir = os.getcwd()
# + [markdown] id="WXKjV9JSBJ3A" colab_type="text"
# A function to create a directory for each class and move the images into those folders
# + id="hGgP6TdHBe8V" colab_type="code" colab={}
# curdir = os.path.abspath(os.path.dirname(__file__))
def gen_record(csvfile,channel):
data = pd.read_csv(csvfile,delimiter=',',dtype='a')
labels = np.array(data['emotion'],np.float)
# print(labels,'\n',data['emotion'])
imagebuffer = np.array(data['pixels'])
images = np.array([np.fromstring(image,np.uint8,sep=' ') for image in imagebuffer])
del imagebuffer
num_shape = int(np.sqrt(images.shape[-1]))
images.shape = (images.shape[0],num_shape,num_shape)
# img=images[0];cv2.imshow('test',img);cv2.waitKey(0);cv2.destroyAllWindow();exit()
dirs = set(data['Usage'])
subdirs = set(labels)
class_dir = {}
for dr in dirs:
dest = os.path.join(curdir,dr)
class_dir[dr] = dest
if not os.path.exists(dest):
os.mkdir(dest)
data = zip(labels,images,data['Usage'])
for d in data:
destdir = os.path.join(class_dir[d[-1]],str(int(d[0])))
if not os.path.exists(destdir):
os.mkdir(destdir)
img = d[1]
filepath = unique_name(destdir,d[-1])
print('[^_^] Write image to %s' % filepath)
if not filepath:
continue
sig = cv2.imwrite(filepath,img)
if not sig:
print('Error')
exit(-1)
def unique_name(pardir,prefix,suffix='jpg'):
filename = '{0}_{1}.{2}'.format(prefix,random.randint(1,10**8),suffix)
filepath = os.path.join(pardir,filename)
if not os.path.exists(filepath):
return filepath
unique_name(pardir,prefix,suffix)
# + [markdown] id="Ej67p7nlBSLK" colab_type="text"
# Call the function on the link
# + id="Rcbnqk7FBpgm" colab_type="code" outputId="e1db0594-1a7c-410f-c9fd-039f2d76049f" executionInfo={"status": "ok", "timestamp": 1566264692138, "user_tz": -330, "elapsed": 27522, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16758202139208450511"}} colab={"base_uri": "https://localhost:8080/", "height": 1000, "output_embedded_package_id": "1frjEnaedTG5gx0TTSXC1cDChCPqvhKqp"}
# filename = fer2013link
# filename = os.path.join(curdir,filename)
gen_record(ferlink,1)
# + [markdown] id="IcLgr-37BVyY" colab_type="text"
# install pysyft
# + id="luL34Eu8BL4o" colab_type="code" outputId="606fce48-9c74-4d25-e379-b8b994ecee2a" executionInfo={"status": "ok", "timestamp": 1566264730301, "user_tz": -330, "elapsed": 64062, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16758202139208450511"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !pip install syft
# + [markdown] id="OYeTSpPCBZTs" colab_type="text"
# import torch libraries
# + id="-gA6ldOLAqwn" colab_type="code" colab={}
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
# + [markdown] id="x_45qtcqBdMP" colab_type="text"
# import pysyft and create three workers and a crypto provider
# + id="vGHuvn3SBG1l" colab_type="code" outputId="7d03a4f1-2b5c-4b61-9545-cff3288461c1" executionInfo={"status": "ok", "timestamp": 1566264736866, "user_tz": -330, "elapsed": 55438, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "16758202139208450511"}} colab={"base_uri": "https://localhost:8080/", "height": 104}
import syft as sy
hook = sy.TorchHook(torch)
client = sy.VirtualWorker(hook, id="client")
bob = sy.VirtualWorker(hook, id="bob")
alice = sy.VirtualWorker(hook, id="alice")
crypto_provider = sy.VirtualWorker(hook, id="crypto_provider")
# + [markdown] id="G3StW3KwGvy6" colab_type="text"
# some basic args
# + id="Zo-CSE8xBlRX" colab_type="code" colab={}
# + id="l4ATjbZ3EZTz" colab_type="code" colab={}
class Arguments():
def __init__(self):
self.batch_size = 256
self.test_batch_size = 200
self.epochs = 1
self.lr = 0.0001 # learning rate
self.log_interval = 100
args = Arguments()
# + [markdown] id="yJjUZ5eaBoIY" colab_type="text"
# load the images transform them and create data loaders
# + id="gvVkbWcsEgRw" colab_type="code" colab={}
normalize = transforms.Compose([transforms.Resize((48,48)),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_dataset =datasets.ImageFolder('Training', transform=normalize)
test_dataset = datasets.ImageFolder('PublicTest', transform=normalize)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True)
# + id="urt7ugcBFEsN" colab_type="code" colab={}
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.test_batch_size,
shuffle=True)
# Convert to integers and privately share the dataset
private_test_loader = []
for data, target in test_loader:
private_test_loader.append((
data.fix_prec().share(alice, bob, crypto_provider=crypto_provider),
target.fix_prec().share(alice, bob, crypto_provider=crypto_provider)
))
# + [markdown] id="jtBnXK4iB-QZ" colab_type="text"
# use a simple neural net for baseline
# + id="bxuDunl6Fgf0" colab_type="code" colab={}
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(784, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = x.view(-1, 784)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
# + id="xKaCpQRR12K1" colab_type="code" colab={}
# + [markdown] id="d1w0DGT3CCCi" colab_type="text"
# it had high bias so added more layers
# + id="z_UR7vtSpWV5" colab_type="code" colab={}
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(784, 1000)
self.fc2 = nn.Linear(1000, 1000)
self.fc3 = nn.Linear(1000, 500)
self.fc4 = nn.Linear(500, 10)
def forward(self, x):
x = x.view(-1, 784)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
# x = F.dropout(x,p=0.2)
x = self.fc4(x)
return x
# + [markdown] id="ReT1WysmCHni" colab_type="text"
# started overfitting so added dropout
# + id="wSvvwkZc8xRh" colab_type="code" colab={}
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(784, 1000)
self.fc2 = nn.Linear(1000, 500)
self.fc3 = nn.Linear(500, 500)
self.fc4 = nn.Linear(500, 10)
def forward(self, x):
x = x.view(-1, 784)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = F.dropout(x,p=0.5)
x = self.fc3(x)
x = F.relu(x)
x = F.dropout(x,p=0.5)
x = self.fc4(x)
return x
# + id="TD337DLBWKjv" colab_type="code" colab={}
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# self.fc1 = nn.Linear(784, 1000)
# self.fc2 = nn.Linear(1000, 500)
# self.fc3 = nn.Linear(500, 500)
# self.fc4 = nn.Linear(500, 10)
self.cnn1a = nn.Conv2d(1,32, 3, padding=2, dilation = 2)
self.cnn1b = nn.Conv2d(32,32, 3, padding=2)
self.cnn1_maxpool = nn.MaxPool2d(2)
# Second Sparse CNN layer
self.cnn2a = nn.Conv2d(32, 64, 3, padding=2, dilation = 2)
self.cnn2b = nn.Conv2d(64,64, 3, padding=2)
self.cnn2_maxpool = nn.MaxPool2d(2)
# Third Sparse CNN layer
self.cnn3a = nn.Conv2d(64, 96, 3, padding=2, dilation = 2)
self.cnn3b = nn.Conv2d(96,96, 3)
self.cnn3_maxpool = nn.MaxPool2d(2)
# Fourth Sparse CNN layer
self.cnn4a = nn.Conv2d(96, 128, 3, padding=2, dilation = 2)
self.cnn4b = nn.Conv2d(128,128, 3)
self.cnn4_maxpool = nn.MaxPool2d(2)
# self.flatten", Flatten()
# Sparse Linear layer
self.linear = nn.Linear(128, 64)
# Classifier
self.output = nn.Linear(64, 7)
def forward(self, x):
# x = x.view(-1, 784)
x = self.cnn1a(x)
x = self.cnn1b(x)
x = self.cnn1_maxpool(x)
x = self.cnn2a(x)
x = self.cnn2b(x)
x = self.cnn2_maxpool(x)
x = self.cnn3a(x)
x = self.cnn3b(x)
x = self.cnn3_maxpool(x)
x = self.cnn4a(x)
x = self.cnn4b(x)
x = self.cnn4_maxpool(x)
x = x.view(x.size()[0], -1)
# x = F.relu(x)
x = self.linear(x)
x = F.relu(x)
# x = F.dropout(x,p=0.5)
x = self.output(x)
# x = F.relu(x)
# x = F.dropout(x,p=0.5)
# x = self.fc4(x)
return x
# + id="0SCyKjxeFmcM" colab_type="code" colab={}
def train(args, model, train_loader, test_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
print("tr start")
optimizer.zero_grad()
output = model(data)
output = F.log_softmax(output, dim=1)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
# if batch_idx % args.log_interval == 0:
print('Train Epoch: {} '.format(epoch))
# if batch_idx % (args.log_interval * 8) == 0:
# tr_loss = []
te_loss = []
n_correct_priv=0
# model.eval()
n_correct_priv = 0
n_total = 0
with torch.no_grad():
for data, target in test_loader:
outputt = model(data)
outputt = F.log_softmax(outputt, dim=1)
losste = F.nll_loss(outputt, target)
te_loss.append(losste.numpy())
pred = outputt.argmax(dim=1)
n_correct_priv += pred.eq(target.view_as(pred)).sum()
n_total += args.test_batch_size
# n_correct = n_correct_priv.copy().get().float_precision().long().item()
print('Test set: Accuracy: {}/{} ({:.0f}%)'.format(
n_correct_priv, n_total,
100. * n_correct / n_total))
print('train loss ', loss)
print('test loss ', np.average(np.array(te_loss)))
model = Net()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, args.epochs + 1):
train(args, model, train_loader, test_loader, optimizer, epoch)
# + id="duV_tjnBFvNK" colab_type="code" colab={}
model.fix_precision().share(alice, bob, crypto_provider=crypto_provider)
# + id="TsP_83CDF82l" colab_type="code" colab={}
def test(args, model, test_loader):
model.eval()
n_correct_priv = 0
n_total = 0
with torch.no_grad():
for data, target in test_loader:
output = model(data)
pred = output.argmax(dim=1)
n_correct_priv += pred.eq(target.view_as(pred)).sum()
n_total += args.test_batch_size
n_correct = n_correct_priv.copy().get().float_precision().long().item()
print('Test set: Accuracy: {}/{} ({:.0f}%)'.format(
n_correct, n_total,
100. * n_correct / n_total))
test(args, model, private_test_loader)
# + id="XrludEn-q2yz" colab_type="code" colab={}
# + id="mmBcsF4YGCDU" colab_type="code" colab={}
|
Mohammad Hasnain Rajan/ferdnncp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
from IPython.display import display, Math, Latex
from scipy import *
# +
arc = load('mnist.npz')
x_train = arc['arr_0']
y_train = arc['arr_1']
x_test = arc['arr_2']
y_test = arc['arr_3']
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
# +
##function given by prof.
def classify(z):
all_distances = array([dist(x, z) for x in x_train])
digit = y_train[argmin(all_distances)]
return digit
# -
# Show image number 15, and write in the title what digit it should correspond to
N=15
imshow(x_train[N], cmap='gray_r')
_ = title('Hand written digit '+str(y_train[N]))
# ## GENERAL GUIDELINES
#
#
# 1. Time all functions you construct, and try to make them run as fast as possible by precomputing anything that can be precomputed
# 2. Extra points are gained if you reduce the complexity of the given algorithms in any possible way, for example by exploiting linearity, etc.
# 3. If something takes too long to execute, make sure you time it on a smaller set of input data, and give estimates of how long it would take to run the full thing (without actually running it). Plot only the results you manage to run on your PC.
#
#
#
# ## ASSIGNMENT 1
#
# Implement the following distance functions.
# 1. Dinfty
# 2. D_one
# 3. D_two
#
# +
def d_infty(a,b):
return norm((b-a).reshape(-1),inf)
def d_one(a,b):
return norm((b-a).reshape(-1),1)
def d_two(a,b):
return norm((b-a).reshape(-1),2)
# -
# #### COMMENT OF ASSIGNMENT 1
#
# funtion that I used:
# 1. norm(x, ord): This function is able to return one of eight different matrix norms, or one of an infinite number of vector norms , depending on the value of the ord parameter (in our case ord= inf,1,2)
# 2. reshape(): Gives a new shape to an array without changing its data.
# ## Assignment 2
#
# Write a function that, given a number N, and a distance function dist, computes the distance matrix D of shape (N,N) between the first N entries of x_train:
# \begin{equation}
# D[i,j] = dist(x_train[i], x_train[j])
# \end{equation}
#
# erforming the minimum number of operations (i.e., avoid computing a distance if it has already been computed before, i.e., keep in mind that dist(a,b) = dist(b,a)).
#
# +
def distance(N,dist):
D = zeros((N,N))
for i in range(N):
for j in range(i+1,N):
D[i][j] = D[j][i] = dist(x_train[i],x_train[j])
return D
A= distance(4,d_infty)
print(A)
# -
# ## Asiignment 3
#
# Compute and plot the three distance matrices
# 1. Dinfty
# 2. D1
# 3. D2
#
# for the first 100 images of the training set, using the function imshow applied to the three matrices
# +
# define the 3 matrices
Dinfty = distance(100,d_infty)
D1 = distance(100,d_one)
D2 = distance(100,d_two)
# -
matshow(Dinfty)
_ = title('plot of the d_infty distance matrix')
matshow(D1,cmap='Blues_r')
_ = title('plot of the D-one matrix')
matshow(D2,cmap='Blues_r')
_ = title('plot of the D-two matrix')
# ## ASSIGNMENT 4
#
# Using only a distance matrix, apply the algorithm described above and compute the efficency of the algorithm, i.e., write a function that:
#
# Given a distance matrix with shape (N,N), constructed on the first N samples of the x_train set, count the number of failures of the leave one out strategy, i.e.,
#
# 1. set error_counter to zero
#
# 2. for every line i of the matrix:
#
# 1. find the index j (different from i) for which D[i,k] >= D[i,j] for all k different from i and j.
#
# 2. if y_train[j] is different from y_train[i], increment by one error_counter.
#
# 3. return the error: error_counter/N.
#
# 4. apply the function above to the 3 different distance matrices you computed before
# +
# Funzione che riceve in input una matrice distanza e ritorna un numero che rappresenta l'errore
#Nell'ass.4 chiede di prendere una riga della matrice delle distanze, trovare l'indice per cui c'é
#il valore minimo della disranza e vedere se é le y_train sono diverse
#(ovvero se i numeri corrispondenti sono diversi)
#tanto le matrici sono uguali nelle dimensioni comuni
def performance(N,dist_matrix):
D = dist_matrix[0:N,0:N] # considero solo le righe e le colonne che mi interessano
error_counter = 0
for i in range(N):
if i!=N-1:
D[i,i] = D[i,i+1] + 100 # faccio in modo che non prenda elemento diagonale
else:
D[i,i] = D[i,i-1] + 100
minimo = argmin(D[i,:]) #trova l'indice dove c'è l'elemento minimo
if (y_train[i]!=y_train[minimo]):
error_counter = error_counter + 1
return error_counter/N
##questo algoritmo è molto dispendioso, perché ogni volta crea una matrice delle distanze
##l'algoritmo di prima invece lo riceve in input e prende la sottomatrice che gli serve
def performance2(N,dist):
D = distance(N,dist)
error_counter = 0
for i in range(N):
if i!=N-1:
D[i,i] = D[i,i+1] + 100 # faccio in modo che non prenda elemento diagonale
else:
D[i,i] = D[i,i-1] + 100
minimo = argmin(D[i,:]) #trova l'indice dove c'è l'elemento minimo
if (y_train[i]!=y_train[minimo]):
error_counter = error_counter + 1
return error_counter/N
#qualcosa non va..introduco un errore? secondo me ci starebbe
# -
# ## ASSIGNMENT 5
#
# Run the algorithm implemented above for N=100,200,400,800,1600 on the three different distances, and plot the three error rate as a function of N (i.e., compute the distance matrix, and compute the efficiency associated to the distance matrix).
#
#
#
# +
from timeit import default_timer as timer
Size = [100,200,400,800,1600]
errors = [] #array con gli
start = timer()
D_I = distance(1600,d_infty)
D_1 = distance(1600,d_one)
D_2 = distance(1600,d_two)
errors = zeros((5,3))
for i in range(5):
errors[i][0] = performance(Size[i],D_I)
errors[i][1] = performance(Size[i],D_1)
errors[i][2] = performance(Size[i],D_2)
print(errors)
end = timer()
t = end -start
print("tempo trascorso per l'intero procedimento: ",t)
# +
##verifico che l'algoritmo performance2 è molto più lento del primo
err = zeros((5,3))
inizio = timer()
for i in range(5):
err[i][0] = performance2(Size[i],d_infty)
err[i][1] = performance2(Size[i],d_one)
err[i][2] = performance2(Size[i],d_two)
print(err)
fine = timer()
tt = fine - inizio
print("tempo trascorso per l'intero procedimento: ",tt)
# -
for i in range(3): # tre volte--> una per ogni tipo di distanza
plot(Size,errors.T[i],)
title("efficiency plot for our three different distance")
legend(["d_infty","d_one","d_two"])
grid()
# ## ASSIGNMENT 6
#
# In principle, it should be possible to decrease the error by using a better norm. From the table above, it is clear that the L2 distance works better than the L1 distance, which works better than the Linfty distance.
#
# However, none of these distances exploit the fact that the image is a two-dimensional object, and that there is information also in the neighboring information of the pixels.
#
# One way to exploit this, is to interpret the image as a continuous function with values between zero and one, defined on a square domain \Omega=[0,27]x[0,27].
# \begin{equation*}
# f:\Omega \longrightarrow \mathbb{R}
# \end{equation*}
#
#
#
#
#
#
# 1. Implement a function that computes an approximation of the H1 norm distance on the renormalized images. Given two images f1 e f2.
#
# 1. Compute
# \begin{equation*}
# a = \frac{f_{1}}{\int_{\Omega} f_{1} }
# \end{equation*}
#
# \begin{equation*}
# b = \frac{f_{2}}{\int_{\Omega} f_{2} }
# \end{equation*}
# 2. Define the H1 distance as
#
# \begin{equation*}
# d_{H1}(f_{1},f_{2}) = \sqrt{\int_{\Omega}| \nabla(a-b)|^{2} + (a-b)^{2}}
# \end{equation*}
#
# 3. Compute the distance matrix and the efficiency for this distance for N=100,200,400,800,1600
#
#
# +
#function which returns a and b---> normalized function!
from scipy import integrate
def normalization(f1,f2):
a = f1/sum(f1) #fa la somma dei valori pixel per pixel---matrice 28X28
b = f2/sum(f2)
return a,b
def h1_distance(f1,f2):
a,b = normalization(f1,f2)
z = a-b
func = absolute(gradient(z))**2 + z**2
res = sqrt(sum(func))
return res
Size = [100,200,400,800,1600]
#creo una dict di matrici che definiscono la distanza!
print("inizio calcolo del dict")
start = timer()
matrici = {}
for i in Size:
matrici[i] = distance(i,h1_distance)
end = timer()
print("fine calcolo del dict")
print(end-start)
#creo array di efficiency
print("inizio con efficienza")
inizio = timer()
H = distance(1600,h1_distance)
efficienza = []
for i in Size:
efficienza.append(performance(i,H))
fine = timer()
print("fine con efficienza")
print(efficienza)
print("tempo necessario")
print(fine - inizio)
#compute distance matrix for N = 10
# -
# ## Assignment 7
#
# An even better improvement on the previous distance function is given by the following algorithm
#
# - Given two images $f1$ and $f2$:
# - Compute $$a = \frac{f_1}{\int_\Omega f_1}$$, $$b=\frac{f_2}{\int_\Omega f_2}$$
# - Solve
# $$
# -\Delta \phi = a - b \qquad \text{ in } \Omega
# $$
# $$
# \phi = 0 \text{ on } \partial\Omega
# $$
# - Define the *Monge Ampere* distance
# $$
# d_{MA}(f_1,f_2) = \int_\Omega (a+b)|\nabla \phi|^2
# $$
#
# - Compute the distance matrix and the efficiency for this distance for N=100,200,400,800,1600
#
#
# +
from scipy.sparse import *
from scipy.sparse.linalg import *
from scipy.sparse import csc_matrix
# +
from timeit import default_timer as timer
def laplace(N):
A = diags([-1,-1,4,-1,-1], [-N,-1, 0, 1,N], shape=(N*N,N*N)).toarray()
for i in range(1,N):
A[i*N,(i*N)-1] = 0
A[(i*N)-1,i*N] = 0
return A
# voglio risolvere delta(phi) = b-a
def LU(N):
L = laplace(N)
LAP = csc_matrix(L) # compact representation of sparse matrices--> witout this there would be a warning
return splu(LAP)
A = LU(len(x_train[0]) - 2 )
def resolve(immagine):
immagine = immagine[1:-1,1:-1]
lun = len(immagine)
u = immagine.reshape(-1)
v = A.solve(u)
v = v.reshape(lun,lun)
sol = zeros((28,28))
sol[1:-1,1:-1]=v
return sol
# +
## distanza di Monge ampere
from timeit import default_timer as timer
Size = [100,200,400,800,1600]
def grad_norm(f):
grad = gradient(f)
return grad[0]**2 + grad[1]**2
def D_MA(f1,f2):
a = f1/sum(f1)
b = f2/sum(f2)
phi = resolve(a-b)
func = (a+b)*grad_norm(phi)
return sum(func)
print("inizio operazione")
start = timer()
ampere = distance(1600,D_MA)
end = timer()
print(end - start)
eff = []
for i in Size:
eff.append(performance(i,ampere))
end = timer()
print("fine operazione")
print(end - start)
# -
print(eff)
plot(Size,eff)
title("efficiency plot for monge-ampere distance")
grid()
|
final_project/Alberto_Presta_project.py.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started with MaterialsCoord
#
# This notebook will introduce the basic functionality of MaterialsCoord and demonstrate how to benchmark different coordination algorithms implemented in pymatgen.
#
# *Written using:*
# - MaterialsCoord==0.1.0
#
# *Authors: <NAME> (10/14/19)*
#
# ---
#
# ## The benchmark class
#
# The primary object in MaterialsCoord is the `Benchmark` class. This contains all the functionality needed for loading structures, running near neighbor algorithms on the structures and calculating the benchmark score. `Benchmark` can be loaded using:
from materialscoord.core import Benchmark
# The benchmark object can be initialized from a set of structures with the correct coordination provided as a site attribute. More information on preparing your own structures for benchmarking is given in the [benchmarking-custom-structures notebook](benchmarking-custom-structures.ipynb).
#
# To make benchmarking easy, we have provided a set of default structures that can be used to assess the performance of coordination algorithms. These structures have been split into several *structure groups*, including:
#
# - `"elemental"`: Simple elemental materials, including diamond, graphite, Ga,
# and α-As.
# - `"common_binaries"`: Simple and more complex binary structures, including
# rocksalt NaCl, rutile TiO<sub>2</sub>, and γ-brass.
# - `"ABX3"`: ABX<sub>3</sub> structured ternary materials, including perovskite
# SrTiO<sub>3</sub> and argonite CaCO<sub>3</sub>.
# - `"ABX4"`: ABX<sub>4</sub> structured ternary materials, including zircon,
# (ZrSiO<sub>4</sub>) and wolframite (FeWO<sub>4</sub>).
# - `"A2BX4"`: A<sub>2</sub>BX<sub>4</sub> structured ternary materials, including
# olivine Fe<sub>2</sub>SiO<sub>4</sub>.
#
# The full list of available structure groups can be seen by inspecting the `Benchmark.all_structure_groups` variable.
Benchmark.all_structure_groups
# We can load structure groups into the benchmark using the `from_structure_group` function. This function can accept one or more structure groups. For example:
# +
# load the elemental structure group into the benchmark
bm = Benchmark.from_structure_group("elemental")
# alternatively, load multiple structure groups
bm = Benchmark.from_structure_group(["elemental", "common_binaries"])
# -
# We can see which structures were included in the benchmark by inspecting the `structures` attribute.
bm.structures
# The human interpreted coordination number is stored in the `"coordination"` site property for each structure. For example, the human interpreted coordination of the `P_black_23836` structure can accessed using:
bm.structures["P_black_23836"].site_properties["coordination"]
# ## Near neighbor algorithms
#
# The `pymatgen.analysis.local_env` module contains implementations of many near neighbor classes used to calculate bonding and coordination numbers. The full information on each of these classes is given in the MaterialsCoord paper and in the pymatgen documentation. We can load the near neighbor classes using:
from pymatgen.analysis.local_env import BrunnerNN_reciprocal, EconNN, JmolNN, \
MinimumDistanceNN, MinimumOKeeffeNN, MinimumVIRENN, \
VoronoiNN, CrystalNN
# MaterialsCoord requires initialized versions of each of the classes. We therefore prepare the classes for benchmarking as follows:
nn_methods = [
MinimumDistanceNN(), MinimumOKeeffeNN(), MinimumVIRENN(), JmolNN(),
EconNN(), BrunnerNN_reciprocal(), VoronoiNN(tol=0.5), CrystalNN()
]
# ## Running the benchmark
#
# The benchmark class provides two primary functions:
#
# - `benchmark()`: Run the near neighbor methods on each of the structures to obtain the coordination numbers for each symmetry inequivalent site.
# - `score()`: Compare the results of the near neighbor algorithms to the human determined coordination numbers and calculate the benchmark score.
#
# Both functions require a list of near neighbor methods as input. We can see the output for the `benchmark()` method using:
bm.benchmark(nn_methods)
# The results are returned as a Pandas `DataFrame` object. The dataset contains a new column for each symmetry unique site and near neighbor algorithm combination. The total number of columns per algorithm is set by the structure with the largest number of sites.
#
# For example, the "U_alpha_16056" structure only has 1 unique site. For the `MinimumDistanceNN`, the result for this site is given in the "MinimumDistanceNN0" column. The other MinimumDistanceNN columns (i.e., "MinimumDistanceNN1", "MinimumDistanceNN2", and "MinimumDistanceNN3") are therefore empty.
#
# ## Calculating the overall score
#
# The score overall scores for each structure and near neighbor method can be calculated using the `score()` function. The score is calculated by taking the summation of the absolute value of the error in coordination prediction ($\mathrm{CN}^\mathrm{calc} - \mathrm{CN}^\mathrm{expected}$) multiplied by the site degeneracy divided the total number of sites. I.e.,
#
# $$
# \mathrm{score} = \frac{\sum_i^{N_\mathrm{sites}^\mathrm{unique}} | \mathrm{CN}_i^\mathrm{calc} - \mathrm{CN}_i^\mathrm{expected} | \times N_i^\mathrm{degen}}{N_\mathrm{sites}}
# $$
#
#
# Again, a list of near neighbor methods are required as input. For example:
bm.score(nn_methods)
# MaterialsCoord can further break down the scores into those for anion and cation sites. Note, the input structures must have oxidation states for this to work. All structures in the default structure groups have oxidation states. This behavior is controlled by the `site_type` variable. The options are:
#
# - `"all"` (default): The score is calculated taking into account the coordination numbers of all sites.
# - `"cation"`: The score is calculated only taking into account cation (i.e., positively charged) sites and neutral (oxidation state of 0) sites.
# - `"anion"`: The score is calculated only taking into account anion sites.
#
# For example, the following can be used to calculate the scores for just the cation sites:
bm.score(nn_methods, site_type="cation")
# ## Plotting the results
#
# Finally, MaterialsCoord implements a convenience function `plot_benchmark_scores` to plot the benchmark scores. The matplotlib and seaborn packages are used for plotting.
#
# The output of the `Benchmark.score()` function is the only required input for the plotting function:
# +
# %matplotlib inline
from materialscoord.plot import plot_benchmark_scores
scores = bm.score(nn_methods)
plt = plot_benchmark_scores(scores)
|
examples/introduction-to-MaterialsCoord.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
with open('./my_key.key', 'r') as f:
api_key = next(f)
# +
import datetime
import requests
import urllib
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
hist_base_url = 'https://api.worldweatheronline.com/free/v2/past-weather.ashx'
base_url = 'https://api.worldweatheronline.com/free/v2/weather.ashx'
url = '{base_url}?key={api_key}&q={location}&num_of_days=3&tp=3&format=json&cc=no&extra=utcDateTime'.format(
base_url=base_url,
api_key=api_key,
location=urllib.parse.quote('Köln'),
)
#dates in yyyy-MM-dd
hist_url = '{base_url}?key={api_key}&q={location}&date={start}&enddate={end}&tp=3&format=json&cc=no&extra=utcDateTime'.format(
base_url=hist_base_url,
api_key=api_key,
location='Aachen',
start='2016-02-01',
end='2016-03-01',
)
resp = requests.get(url)
data = resp.json()
list(data['data'].keys())
data['data']['request']
# +
#data['data']['current_condition']
# +
#data['data']['weather'][0]
# -
hist_resp = requests.get(hist_url)
hist_data = hist_resp.json()
list(hist_data['data']['weather'][0].keys())
dts = []
vals = []
idxs = []
weather_descritpions = set()
for element in hist_data['data']['weather']:
for hour_element in element['hourly']:
year, month, day = list(map(int, hour_element.get('UTCdate').split('-')))
time = int(hour_element.get('UTCtime'))
hour, minute = time//100, time%100
dt = datetime.datetime(year, month, day, hour, minute, 0)
dts.append(dt)
weather_descritpions.add(hour_element.get('weatherDesc')[0].get('value'))
vals.append(float(hour_element.get('tempC')))
idxs.append(float(hour_element.get('HeatIndexC')))
# +
#hist_data['data']['weather']
# -
plt.figure(figsize=(15, 8))
plt.step(dts, vals)
#plt.step(dts, idxs)
weather_descritpions
|
forecast/weather_data/testing_worldweather_online_api.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import scipy as sp
from scipy import stats
import random
import math
import os
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
# +
import sys
module_path = '/Users/pasqualini'
if module_path not in sys.path:
sys.path.append(module_path)
import omico as om
#from omico import plot as pl
from omico import fit as ft
from omico import analysis as an
from omico import table as tb
# -
import scipy
import scipy.special as sc
# +
PROJ_ROOT = '..'
DATA_DIR = os.path.join(PROJ_ROOT,'data/')
# data
TABLE_DIR = os.path.join(PROJ_ROOT,'data/JKR2/')
# metadata
METADATA_DIR = os.path.join(PROJ_ROOT,'data/metadata/AGGREGATED')
# +
# load raw tables from the two sequence alignment
ref_raw = pd.read_csv(os.path.join(TABLE_DIR,'ref_table.csv'),index_col='taxon_name',sep='\t').fillna(0)
pfam_raw = pd.read_csv(os.path.join(TABLE_DIR,'pfam_table.csv'),index_col='taxon_name',sep='\t').fillna(0)
tot_reads=ref_raw.sum(axis=0).sort_values()
nop = ['unclassified', 'cannot be assigned to a (non-viral) species', 'Viruses']
ref_raw=ref_raw.drop(nop)
pfam_raw=pfam_raw.drop(nop)
# +
core_cut=10
# load you data, this can be a pd.read_cav as well
core_raw = tb.core_protocol(std_t=ref_raw,core_t=pfam_raw,core_cut=core_cut)
core_raw.head()
# +
# get structured data from the raw ones
# initialization
C = tb.table(core_raw)
# choose you transofrm
C.built_in_transform(which=['binary','relative'])
# -
C.form['binary']
C.annotation
# observable original: senza binnare calcolo medie e varianze
X_c = C.get_observables(zipf=True,out=True)
X_c = X_c.sort_values(('zipf rank','original'))
X_c
X_c = X_c.sort_values(('zipf rank','original'))
core_raw
# +
samples=core_raw.columns
metadata = pd.read_csv(os.path.join(METADATA_DIR,'metadata_db.csv'),index_col='run')
metadata = metadata.loc[samples]
# +
C.size_partitioning(scale='log',n_bins=11)
# cambia grouping con binning
Y_c = C.get_observables(zipf=True,out=True,grouping='size')
Y_c
# -
# # barplot automatici
for c in C.components:
v=Y_c['binary mean'].loc[c].fillna(0)
plt.scatter(np.log10(v.index+1),np.log10(v.values+1))
C.partitions['size']
diagnosis = (metadata['diagnosis']).map({'CD':'U','UC':'U','IBS-C':'U','IBS-D':'U','H':'H'})
diagnosis_partition = {}
diagnosis_partition['H']=list(diagnosis[diagnosis=='H'].index)
diagnosis_partition['U']=list(diagnosis[diagnosis=='U'].index)
C.add_partition(partition=diagnosis_partition,name='diagnosis')
Z_c = C.get_observables(zipf=True,out=True,grouping='diagnosis')
Z_c
|
quick.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
# -
# # Data
wrong_filename = '../spirals/WRONG-master_file_vflag_10_smooth2-27.txt'
corrected_filename = '../spirals/Pipe3D-master_file_vflag_10_smooth2-27.txt'
wrong_data = Table.read(wrong_filename, format='ascii.ecsv')
corrected_data = Table.read(corrected_filename, format='ascii.ecsv')
# ##### Remove bad galaxies
# +
bad_boolean_wrong = np.logical_or(wrong_data['curve_used'] == 'non',
wrong_data['curve_used'] == 'none')
bad_boolean = np.logical_or(corrected_data['curve_used'] == 'non',
corrected_data['curve_used'] == 'none')
# We want to keep the same galaxies, so we want to remove any galaxy
# that is "bad" in either data file
either_bad_boolean = np.logical_or(bad_boolean_wrong, bad_boolean)
wrong_good_galaxies = wrong_data[np.logical_not(either_bad_boolean)]
good_galaxies = corrected_data[np.logical_not(either_bad_boolean)]
# -
# # $M_\text{DM}/M_*$ comparison
# +
# %matplotlib inline
plt.figure()
plt.plot(wrong_good_galaxies['Mdark_Mstar_ratio'], good_galaxies['Mdark_Mstar_ratio'], '.')
plt.xlabel('Wrong ratio')
plt.ylabel('Correct ratio')
plt.axis('square')
plt.xlim((0,200))
plt.ylim((0,200))
# -
# ##### Not all the same galaxies are kept / removed. Which ones changed?
changed_galaxies = bad_boolean_wrong != bad_boolean
corrected_data[changed_galaxies]
|
notebooks/Angular_scale_fix_comparisons.ipynb
|