code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Naive Benchmark: Time series cross validation.
# ---
#
# A naive baseline forecasting method was chosen. This was to ensure that the sophisticated methods we test in the study were only considered for the final benchmark if they provided more more accurate point forecasts than the simplest of models. As emergency care demand data are seasonal we opted for the well-known Seasonal Naive method. This method works by using the most recent observation for the same day and carrying it forward. For example, if we are forecasting next Tuesday then the observation from the most recent Tuesday is used as the predicted value.
#
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from forecast_tools.baseline import SNaive
from forecast_tools.metrics import (mean_absolute_scaled_error,
root_mean_squared_error,
symmetric_mean_absolute_percentage_error)
import warnings
warnings.filterwarnings('ignore')
# -
# # Data Input
#
# The constants `TOP_LEVEL`, `STAGE`, `REGION`,`TRUST` and `METHOD` are used to control data selection and the directory for outputting results.
#
# > Output file is `f'{TOP_LEVEL}/{STAGE}/{REGION}-{METHOD}_{metric}.csv'.csv`. where metric will be smape, rmse, mase, coverage_80 and coverage_95. Note: `REGION`: is also used to select the correct data from the input dataframe.
# +
TOP_LEVEL = '../../../results/model_selection'
STAGE = 'stage1'
REGION = 'Trust'
METHOD = 'snaive'
FILE_NAME = 'Daily_Responses_5_Years_2019_full.csv'
#split training and test data.
TEST_SPLIT_DATE = '2019-01-01'
#second subdivide: train and val
VAL_SPLIT_DATE = '2017-07-01'
#discard data after 2020 due to coronavirus
#this is the subject of a seperate study.
DISCARD_DATE = '2020-01-01'
# -
#read in path
path = f'../../../data/{FILE_NAME}'
def pre_process_daily_data(path, index_col, by_col,
values, dayfirst=False):
'''
Daily data is stored in long format. Read in
and pivot to wide format so that there is a single
colmumn for each regions time series.
'''
df = pd.read_csv(path, index_col=index_col, parse_dates=True, dayfirst=dayfirst)
df.columns = map(str.lower, df.columns)
df.index.rename(str(df.index.name).lower(), inplace=True)
clean_table = pd.pivot_table(df, values=values.lower(), index=[index_col.lower()],
columns=[by_col.lower()], aggfunc=np.sum)
clean_table.index.freq = 'D'
return clean_table
clean = pre_process_daily_data(path, 'Actual_dt', 'ORA', 'Actual_Value',
dayfirst=False)
clean.head()
# ## Train Test Splot
def ts_train_test_split(data, split_date):
'''
Split time series into training and test data
Parameters:
-------
data - pd.DataFrame - time series data. Index expected as datatimeindex
split_date - the date on which to split the time series
Returns:
--------
tuple (len=2)
0. pandas.DataFrame - training dataset
1. pandas.DataFrame - test dataset
'''
train = data.loc[data.index < split_date]
test = data.loc[data.index >= split_date]
return train, test
# +
train, test = ts_train_test_split(clean, split_date=TEST_SPLIT_DATE)
#exclude data after 2020 due to coronavirus.
test, discard = ts_train_test_split(test, split_date=DISCARD_DATE)
#train split into train and validation
train, val = ts_train_test_split(train, split_date=VAL_SPLIT_DATE)
# -
train.shape
val.shape
# # Cross Validation
#
# `time_series_cv` implements rolling forecast origin cross validation for time series.
# It does not calculate forecast error, but instead returns the predictions, pred intervals and actuals in an array that can be passed to any forecast error function. (this is for efficiency and allows additional metrics to be calculated if needed).
def time_series_cv(model, train, val, horizons, alpha=0.2, step=1):
'''
Time series cross validation across multiple horizons for a single model.
Incrementally adds additional training data to the model and tests
across a provided list of forecast horizons. Note that function tests a
model only against complete validation sets. E.g. if horizon = 15 and
len(val) = 12 then no testing is done. In the case of multiple horizons
e.g. [7, 14, 28] then the function will use the maximum forecast horizon
to calculate the number of iterations i.e if len(val) = 365 and step = 1
then no. iterations = len(val) - max(horizon) = 365 - 28 = 337.
Parameters:
--------
model - forecasting model
train - np.array - vector of training data
val - np.array - vector of validation data
horizon - list of ints, forecast horizon e.g. [7, 14, 28] days
alpha - float, optional (default=0.2)
1 - alpha prediction interval specification
step -- int, optional (default=1)
step taken in cross validation
e.g. 1 in next cross validation training data includes next point
from the validation set.
e.g. 7 in the next cross validation training data includes next 7 points
(default=1)
Returns:
-------
np.array, np.array, np.array
- cv_preds, cv_test, cv_intervals
'''
#point forecasts
cv_preds = []
#ground truth observations
cv_actuals = []
#prediction intervals
cv_pis = []
split = 0
print('split => ', end="")
for i in range(0, len(val) - max(horizons) + 1, step):
split += 1
print(f'{split}, ', end="")
train_cv = np.concatenate([train, val[:i]], axis=0)
model.fit(train_cv)
#predict the maximum horizon
preds, pis = model.predict(horizon=len(val[i:i+max(horizons)]),
return_predict_int=True,
alpha=[alpha])
cv_h_preds = []
cv_test = []
cv_h_pis = []
#sub horizon calculations
for h in horizons:
#store the h-step prediction
cv_h_preds.append(preds[:h])
#store the h-step actual value
cv_test.append(val.iloc[i:i+h])
cv_h_pis.append(pis[:h])
cv_preds.append(cv_h_preds)
cv_actuals.append(cv_test)
cv_pis.append(cv_h_pis)
print('done.\n')
return cv_preds, cv_actuals, cv_pis
# +
def split_cv_error(cv_preds, cv_test, error_func):
n_splits = len(cv_preds)
cv_errors = []
for split in range(n_splits):
pred_error = error_func(cv_test[split], cv_preds[split])
cv_errors.append(pred_error)
return np.array(cv_errors)
def forecast_errors_cv(cv_preds, cv_test, error_func):
cv_test = np.array(cv_test)
cv_preds = np.array(cv_preds)
n_horizons = len(cv_test)
horizon_errors = []
for h in range(n_horizons):
split_errors = split_cv_error(cv_preds[h], cv_test[h], error_func)
horizon_errors.append(split_errors)
return np.array(horizon_errors)
def split_coverage(cv_test, cv_intervals):
n_splits = len(cv_test)
cv_errors = []
for split in range(n_splits):
val = np.asarray(cv_test[split])
lower = cv_intervals[split].T[0]
upper = cv_intervals[split].T[1]
coverage = len(np.where((val > lower) & (val < upper))[0])
coverage = coverage / len(val)
cv_errors.append(coverage)
return np.array(cv_errors)
def prediction_int_coverage_cv(cv_test, cv_intervals):
cv_test = np.array(cv_test)
cv_intervals = np.array(cv_intervals)
n_horizons = len(cv_test)
horizon_coverage = []
for h in range(n_horizons):
split_coverages = split_coverage(cv_test[h], cv_intervals[h])
horizon_coverage.append(split_coverages)
return np.array(horizon_coverage)
# +
def split_cv_error_scaled(cv_preds, cv_test, y_train):
n_splits = len(cv_preds)
cv_errors = []
for split in range(n_splits):
pred_error = mean_absolute_scaled_error(cv_test[split], cv_preds[split],
y_train, period=7)
cv_errors.append(pred_error)
return np.array(cv_errors)
def forecast_errors_cv_scaled(cv_preds, cv_test, y_train):
cv_test = np.array(cv_test)
cv_preds = np.array(cv_preds)
n_horizons = len(cv_test)
horizon_errors = []
for h in range(n_horizons):
split_errors = split_cv_error_scaled(cv_preds[h], cv_test[h], y_train)
horizon_errors.append(split_errors)
return np.array(horizon_errors)
# -
model = SNaive(7)
horizons = [7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 365]
results = time_series_cv(model, train['Trust'], val['Trust'],
horizons, alpha=0.2, step=7)
cv_preds, cv_test, cv_intervals = results
#CV point predictions smape
cv_errors = forecast_errors_cv(cv_preds, cv_test,
symmetric_mean_absolute_percentage_error)
df = pd.DataFrame(cv_errors)
df.columns = horizons
df.describe()
#output sMAPE results to file
metric = 'smape'
print(f'{TOP_LEVEL}/{STAGE}/{REGION}-{METHOD}_{metric}.csv')
df.to_csv(f'{TOP_LEVEL}/{STAGE}/{REGION}-{METHOD}_{metric}.csv')
#CV point predictions rmse
cv_errors = forecast_errors_cv(cv_preds, cv_test, root_mean_squared_error)
df = pd.DataFrame(cv_errors)
df.columns = horizons
df.describe()
#output rmse
metric = 'rmse'
print(f'{TOP_LEVEL}/{STAGE}/{REGION}-{METHOD}_{metric}.csv')
df.to_csv(f'{TOP_LEVEL}/{STAGE}/{REGION}-{METHOD}_{metric}.csv')
#mase
cv_errors = forecast_errors_cv_scaled(cv_preds, cv_test, train['Trust'])
df = pd.DataFrame(cv_errors)
df.columns = horizons
df.describe()
#output mean absolute scaled error
metric = 'mase'
print(f'{TOP_LEVEL}/{STAGE}/{REGION}-{METHOD}_{metric}.csv')
df.to_csv(f'{TOP_LEVEL}/{STAGE}/{REGION}-{METHOD}_{metric}.csv')
| analysis/model_selection/stage1/00a_naive_benchmark-tscv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env
# language: python
# name: env
# ---
# + tags=[]
# Convenient jupyter setup
# %load_ext autoreload
# %autoreload 2
# -
# See [here](https://planetlabs.github.io/planet-client-python/api/reference.html) for an API reference
# # 0. Imports
# + tags=[]
import shapely.geometry
import planet.api
import tqdm
import logging
from src.utils.logging import get_logger
from src.utils.os import list_content
from src.constants import PLANET_API_KEY, DATA_PATH
logger = get_logger(__name__)
logger.setLevel(logging.INFO)
# -
# # 1. Set up planet client and region of interest
# + tags=[]
assert PLANET_API_KEY is not None
planet_client = planet.api.ClientV1(api_key=PLANET_API_KEY)
# + tags=[]
# Coordinates in WGS84 coordinate system
lat = -5.3494
lon = -56.0282
height = 1
width = 1
roi = shapely.geometry.box(lon - width/2, lat - height/2, lon + height/2, lat + width/2)
# -
# # 2. Select relevant mosaic to query
# We first start by looking at all the mosaics (aggregated data products) that are available for our user. In our case we are interested in data from 2019, which overlaps with our GEDI data. Let us therefore look for all mosaics which contain 2019 in their name.
# + tags=[]
mosaics_result = planet_client.get_mosaics(name_contains=2019)
available_mosaics = list(mosaics_result.items_iter(limit=None))
print(f"Found {len(available_mosaics)} mosaics for 2019:")
for mosaic in available_mosaics:
print(f"\t{mosaic['name']}")
# -
# How does one of these mosaic responses look like? Let's look at the keys of the first one:
# + tags=[]
available_mosaics[0].keys()
# -
# We can see a few interesting things here:
#
# 1. The item type is `PSScene4Band` which stands for [Planet scope](https://earth.esa.int/eogateway/missions/planetscope) (a planet satellite constellation- details in screenshot below) and specifically to the 4 band product of it (RGB + near infrared).
# 2. Level 15: The zoom level. Level 15 means we get about 4.7m resolution GSD at the equator.
# 3. Quad size and resolution: The number of pixels per quad axis (quadratic) as well as the corresponding GSD of 4.777m.
# 4. The date of the first and last acquired scenes which were combined to make the quads (which in turn make up the mosaic): from 1 Dec 2018 to 1 Jun 2019.
#
# 
# For our demo purposes, we will use the 2019_Jan_to_Jun mosaic
# + tags=[]
relevant_mosaic = available_mosaics[0]
# -
# # 3. Find out which quads of the mosaic overlap with our ROI
# Next, we check which quads (quadratic tiles) of the mosaic overlap with our region of interest.
# + tags=[]
quad_result = planet_client.get_quads(relevant_mosaic, bbox=roi.bounds)
roi_quads = list(quad_result.items_iter(limit=None))
print(f"Found {len(roi_quads)} relevant quads in given mosaic `{relevant_mosaic['name']}` which overlap with ROI")
# -
# # 4. Finally, let us download the quads to our folder of choice
# + tags=[]
planet_path = DATA_PATH / "Planet"
mosaic_path = planet_path / relevant_mosaic['name']
mosaic_path.mkdir(parents=True, exist_ok=True)
assert mosaic_path.exists()
# + tags=[]
mosaic_path
# + tags=[]
progress_bar = tqdm.tqdm(roi_quads, position=0, leave=True)
logger.info(f"Downloading to: {mosaic_path}")
for quad in progress_bar:
progress_bar.set_description(f"Downloading quad {quad['id']}")
# Check if quad already downloaded:
east_pos, north_pos = quad["id"].split("-")
matching_quads = list(mosaic_path.glob(f"*{east_pos.zfill(4)}E-{north_pos.zfill(4)}N*.tif"))
if len(matching_quads) == 1:
logger.debug(f"Already downloaded: {matching_quads[0]}")
continue
elif len(matching_quads) > 1:
logger.error(f"Found multiple matching quads {matching_quads[0]}")
raise RuntimeError
# If not, download it ...
quad_download_response = planet_client.download_quad(quad)
body = quad_download_response.get_body()
# ... and save it
progress_bar.set_description(f"Saving quad {quad['id']}")
body.write(file=mosaic_path / body.name)
# -
# # 5. Let us look at a sample quad
# + tags=[]
downloaded_quads = list_content(mosaic_path)
sample_quad_path = downloaded_quads[0]
# + tags=[]
import rioxarray as rxr
sample_quad = rxr.open_rasterio(sample_quad_path)
# + tags=[]
sample_quad
# -
# We can see there are 4096 x 4096 pixels in this quad and 5 available bands (B, G, R, NIR, Alpha).
#
# Here's more information directly from the [Planet PlanetScope data specification](https://developers.planet.com/docs/data/sr-basemaps/). For more details see [here](https://assets.planet.com/products/basemap/planet-basemaps-product-specifications.pdf)
# 
# 
# + tags=[]
sample_quad
# + tags=[]
import numpy as np
import matplotlib.pyplot as plt
# + tags=[]
plt.hist(sample_quad[0,...].data.flatten(), bins=500, color="C0", alpha=0.5, label="Blue");
plt.hist(sample_quad[1,...].data.flatten(), bins=500, color="firebrick", alpha=0.5, label="Red");
plt.hist(sample_quad[2,...].data.flatten(), bins=500, color="green", alpha=0.5, label="Green");
plt.hist(sample_quad[3,...].data.flatten(), bins=500, color="orange", alpha=0.5, label="NIR");
plt.legend()
plt.title("RGB-NIR value distribution")
plt.show()
# + tags=[]
plt.hist(sample_quad[0,...].data.flatten(), bins=500, color="C0", alpha=0.5, label="Blue");
plt.hist(sample_quad[1,...].data.flatten(), bins=500, color="firebrick", alpha=0.5, label="Red");
plt.hist(sample_quad[2,...].data.flatten(), bins=500, color="green", alpha=0.5, label="Green");
plt.hist(sample_quad[3,...].data.flatten(), bins=500, color="orange", alpha=0.5, label="NIR");
plt.semilogy()
plt.legend()
plt.title("RGB-NIR value distribution (log)")
plt.show()
# + tags=[]
sample_quad[4,...].data.flatten() # alpha value
# + tags=[]
plt.hist(sample_quad[4,...].data.flatten(), bins=10, color="grey", alpha=0.5, label="alpha");
plt.legend()
plt.loglog()
plt.show();
# -
# Let's display the raster RGB data
# + tags=[]
import folium
import tqdm
import folium.features
import folium.plugins
# Create map and add layers
world_map = folium.Map(location=roi.centroid.coords[0][::-1],
control_scale=True,
zoom_start=8,
tiles="OpenStreetMap")
# Add minimap
folium.plugins.MiniMap(zoom_level_fixed=2).add_to(world_map)
# Add ROI
ROI_STYLE = {'fillColor': '#2a74ac', 'color': '#2a74ac'}
folium.GeoJson(data=roi.__geo_interface__,
name="Region of interest",
style_function= lambda x: ROI_STYLE).add_to(world_map)
# Add ESRI background tiles
folium.TileLayer(
tiles = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',
attr = 'Esri',
name = 'Esri Satellite',
overlay = False,
control = True
).add_to(world_map)
# Add Planet background tiles
folium.TileLayer(
tiles = "https://tiles.planet.com/basemaps/v1/planet-tiles/planet_medres_normalized_analytic_2018-12_2019-05_mosaic/gmap/{z}/{x}/{y}.png?api_key=" + PLANET_API_KEY,
attr = 'Planet_dec18_may19',
name = 'Planet Basemap Dec18-May19',
overlay = False,
control = True
).add_to(world_map)
folium.TileLayer(
tiles = "https://tiles.planet.com/basemaps/v1/planet-tiles/planet_medres_normalized_analytic_2019-06_2019-11_mosaic/gmap/{z}/{x}/{y}.png?api_key=" + PLANET_API_KEY,
attr = 'Planet_jun19_nov19',
name = 'Planet Basemap Jun19-Nov19',
overlay = False,
control = True
).add_to(world_map)
folium.TileLayer(
tiles = "https://tiles.planet.com/basemaps/v1/planet-tiles/planet_medres_normalized_analytic_2019-12_2020-05_mosaic/gmap/{z}/{x}/{y}.png?api_key=" + PLANET_API_KEY,
attr = 'Planet_dec19_may20',
name = 'Planet Basemap Dec19-May20',
overlay = False,
control = True
).add_to(world_map)
folium.LayerControl().add_to(world_map)
world_map
# -
# # Legacy
# + tags=[]
aoi = {
"type": "Polygon",
"coordinates": [
[
[-122.54, 37.81],
[-122.38, 37.84],
[-122.35, 37.71],
[-122.53, 37.70],
[-122.54, 37.81]
]
]
}
# + tags=[]
query = planet.api.filters.and_filter(
planet.api.filters.geom_filter(aoi),
planet.api.filters.range_filter('cloud_cover', gt=0),
)
# + tags=[]
query
# + tags=[]
# build a request for only PlanetScope imagery
request = planet.api.filters.build_search_request(
query, item_types=['PSScene4Band']
)
# + tags=[]
request
# + tags=[]
result = planet_client.quick_search(request)
# + tags=[]
result
# + tags=[]
#for item in result.items_iter(limit=None):
#props = item['properties']
#print('{0},{cloud_cover},{acquired}\n'.format(item['id'], **props))
| notebooks/7-Planet data.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Javascript (Node.js)
// language: javascript
// name: javascript
// ---
// # JavaScript and NodeJS
//
// ## Table of Contents
//
// **[01- Introduction](Ch01-JavaScriptIntro.ipynb)**<br>
// **[02- Data Types and Std Input/Output](Ch02-Output-DataTypes-Variables.ipynb)**<br>
// **[03- Built-in Functions & NPM](Ch03-Built-inFunctions-NPM.ipynb)**<br>
// **[04- User-defined Functions](Ch04-UserDefinedFunctions.ipynb)**<br>
// **[05- Numbers & Math Library](Ch05-Numbers-And-Math.ipynb)**<br>
// **[06- Conditionals](Ch06-Conditionals.ipynb)**<br>
// **[07- User-defined Modules](Ch07-Modules.ipynb)**<br>
// **[08- Unit Testing](Ch08-UnitTesting.ipynb)**<br>
// **[09- Loops](Ch09-Loops.ipynb)**<br>
// **[11- Strings](Ch10-Strings.ipynb)**<br>
// **[11- Arrays](Ch11-Arrays.ipynb)**<br>
// **[12- Object-Dict](Ch12-Object-Dict.ipynb)**<br>
// **[13- Files](Ch13-Files.ipynb)**<br>
// **[14- Errors & Exceptions](Ch14-Errors-And-Exceptions.ipynb)**<br>
// **[15- JSON](Ch15-JSON.ipynb)**<br>
// **[16- Object Oriented Programming (OOP)](Ch16-OOP-Class.ipynb)**<br>
// **[17- Regular Expression](Ch17-RegularExpression.ipynb)**<br>
// **[18- React.ipynb](Ch18-React.ipynb)**<br>
// **[19- NodeJS Web Server](Ch19-Node.js-Server.ipynb)**<br>
// **[20- Express Web Framework & React](Ch20-Express-React.ipynb)**<br>
// **[21- MongoDB.ipynb](Ch21-MongoDB.ipynb)**<br>
// **[22- MERN Stack](Ch22-Mongo-Express-React-Node.ipynb)**<br>
// **[23- Next.js-React-Fullstack](Ch23-Next-React-Fullstack.ipynb)**<br>
//
// ## Demo Programs & Scripts
//
// ### JSDemo
// - JSDemo folder has all individual JavaScript and NodeJS individual scripts
//
// ### ExpressDemo
// - ExpressDemo folder has a simple express demo app
//
// ### NodeDemo
// - NodeDmeo folder has a simple NodeJS server demo app
//
// ### MongoDemo
// - MongoDemo folder has simple MongoDB demo scripts
| Ch00-TableOfContents.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Computational & Deep Learning Frameworks Benchmark on real-world tasks
#
# Frameworks compared:
# * [Python 3](https://www.python.org/)/[Numpy](http://www.numpy.org/) with or without [Numba JIT compilation](http://numba.pydata.org/)
# * [Julia](https://julialang.org/) The Julia Programming Language
# * C++ called from Python using [pybind11](https://github.com/pybind/pybind11)
# * [TensorFlow](https://www.tensorflow.org) v1 (tf.compat.v1*) and v2 in CPU/GPU mode
# * [Theano](http://deeplearning.net/software/theano)
# * [MinPy](https://github.com/dmlc/minpy) a NumPy interface above [MXNet](http://mxnet.io) backend, in CPU/GPU mode
# * [Gluon](https://gluon.mxnet.io/) library in Apache [MXNet](http://mxnet.io), CPU/GPU mode
# * [CuPy](https://cupy.chainer.org/) an open-source matrix library accelerated with NVIDIA CUDA.
# * [R](https://www.r-project.org/) Project for Statistical Computing
#
# Single precision float numbers are used when possible. R works with double precision.
#
# *TensorFlow v1 versions are using tf.compat.v1 endpoint to run v1 code in TensorFlow 2.x, change the import statements if you are using TF 1.x.
#
# !python3 --version
# !Rscript --version
# !julia --version
# !pip list --format freeze | grep "numpy\|tensorflow\|Theano\|pygpu\|numba\|minpy\|mxnet\|cupy"
# +
import os
import subprocess
import json
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from subprocess import Popen, PIPE
import re
import matplotlib
# %matplotlib inline
# +
# define utility functions
def get_json(x):
return re.match("^[^\{]*(\{[^\}]+\})[^}]*$", x.decode()).group(1)
def get_exec(file_name):
for ext, exe in {'.py': 'python3', '.jl': 'julia', '.R': 'Rscript'}.items():
if file_name.endswith(ext):
return exe
def exe(file_name, mode, cwd, **kwargs):
p = Popen([get_exec(file_name), version, '-mode', mode], stdout=PIPE, stderr=PIPE, cwd=cwd)
output, err = p.communicate()
if p.returncode==0:
try:
json_str = get_json(output)
result_json = json.loads(json_str)
print(result_json)
return result_json
except Exception as e:
print("Exception in benchmark", e, str(json_str))
else:
print("Benchmark failed, check program output:", err)
def exe(file_name, mode, cwd='', **kwargs):
args = sum([[f'-{k}', f'{v}'] for k, v in kwargs.items()], [get_exec(file_name), version, '-mode', mode])
p = Popen(args, stdout=PIPE, stderr=PIPE, cwd=cwd)
output, err = p.communicate()
if p.returncode==0:
try:
json_str = get_json(output)
result_json = json.loads(json_str)
print(result_json)
return result_json
except Exception as e:
print("Exception in benchmark", e, str(json_str))
else:
print("Benchmark failed, check program output:", err)
# -
# ## Task I: Time-series model (*Scan* operation benchmark)
# We estimate Log-Likelihood of GARCH (Generalized Autoregressive Conditional Heteroscedasticity) model, this is an econometric model used for modeling and forecasting time-dependent variance. Parameters in GARCH models are usually determined by Maximum Likelihood Estimation.
# The main part of the calculation involves scan over the time-series ɛ² using the next equation (α, β and ω are model parameters):
# $$h[i] = ω + α*ɛ²[i-1] + β*h[i-1]$$
# ### References:
# <NAME> (1986): Generalized Autoregressive Conditional Heteroscedasticity. Journal of Econometrics 31, 307–327.
# https://github.com/AndreyKolev/GARCH.jl - Julia library for GARCH modeling.
#
# ### Comments:
# **It's a very small-scale sequential task, so we are not expecting great numbers from gpu versions. Large scale version of the task will be realeased later.**
# For Theano and Tensorflow frameworks we also perform the loop unrolling to evaluate efficiency of the built-in scan operations
#
#
# +
results = {}
results['garch'] = {}
n = 1000
versions = {'garch.py': ['std', 'numba', 'c++'],
'garch-minpy.py': ['cpu', 'gpu'],
'garch-gluon.py': ['cpu', 'gpu'],
'garch-cupy.py': ['std'],
'garch-theano.py': ['cpu', 'gpu'],
'garch-tf.py': ['cpu', 'gpu'],
'garch-tf-v1.py': ['cpu', 'gpu'],
'garch.jl': ['std'],
'garch.R': ['reduce']}
for version, modes in versions.items():
for mode in modes:
print(version, 'mode:', mode)
result_json = exe(version, mode, './garch/', n=n)
if result_json is not None:
results['garch'].update(result_json)
# +
def plot_results(results, title, topn=None):
sns.set_style("whitegrid")
sns.mpl.rcParams['figure.figsize'] = (15, 6)
sns.mpl.rcParams['font.size'] = 14
plt.xticks(rotation=50)
results_sorted = sorted(results.items(), key=lambda x: x[1])
if topn is None:
plot = sns.barplot([x[0] for x in results_sorted], [x[1] for x in results_sorted])
else:
plot = sns.barplot([x[0] for x in results_sorted[:topn]], [x[1] for x in results_sorted[:topn]])
plot.set_title(title)
plot.set(xlabel='Framework / mode', ylabel='runtime, secs');
plot_results(results['garch'], 'Task I: Time-series model (Scan function benchmark)')
# -
plot_results(results['garch'], 'Task I: Time-series model (Scan function benchmark)\nTop 5 results', 5)
# ## Task II - Monte Carlo simulation
# We use MC simulation to calculate the price of the [Barrier Option](https://en.wikipedia.org/wiki/Barrier_option)
#
# Geometric Brownian motion is used to model stock prices
# +
results['mc'] = {}
versions = {'mc.py': ['std', 'multiprocessing', 'numba', 'c++', 'c++-parallel'],
'mc.jl': ['std', 'parallel', 'matrix'],
'mc-theano.py': ['cpu', 'gpu'],
'mc-tf.py': ['cpu', 'gpu'],
'mc-tf-v1.py': ['cpu', 'gpu'],
'mc-minpy.py': ['cpu', 'gpu'],
'mc-gluon.py': ['cpu', 'gpu'],
'mc-cupy.py': ['std'],
'mc.R': ['std', 'parallel']}
for version, modes in versions.items():
for mode in modes:
print(version, 'mode:', mode)
result_json = exe(version, mode, './mc/')
if result_json is not None:
results['mc'].update(result_json)
# -
plot_results(results['mc'], 'Task II: Monte Carlo simulation')
# ### Task III: Logistic regression using [Hybrid Monte Carlo (HMC)](https://en.wikipedia.org/wiki/Hamiltonian_Monte_Carlo)
#
# Dataset: [UCI/Adult](https://archive.ics.uci.edu/ml/datasets/Adult), # of classes: 2, # of data: 32,561/16,281 (testing), # of features: 123
# +
results['hmc'] = {}
versions = {'hmc-tf.py': ['cpu', 'gpu'],
'hmc-tf-v1.py': ['cpu', 'gpu'],
'hmc-theano.py': ['cpu', 'gpu'],
'hmc-cupy.py': ['std'],
'hmc-minpy.py': ['cpu', 'gpu'],
'hmc-gluon.py': ['cpu', 'gpu'],
'hmc.py': ['std'],
'hmc.jl': ['std'],
'hmc.R': ['std']}
for version, modes in versions.items():
for mode in modes:
print(version, 'mode:', mode)
result_json = exe(version, mode, './hmc/')
if result_json is not None:
results['hmc'].update(result_json)
# -
plot_results(results['hmc'], 'Task III: Hybrid Monte Carlo')
plot_results(results['hmc'], 'Task III: Hybrid Monte Carlo\nTop 5 results', 5)
| benchmark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: jsl368
# language: python
# name: jsl368
# ---
import pandas as pd, boto3, re, os
pd.set_option("display.max_rows",1000)
pd.set_option("display.max_colwidth",1000)
public_classes = pd.read_csv("docs_module/metadata/class_metadata.csv")
licensed_classes = pd.read_csv("docs_module/metadata/class_metadata_licensed.csv")
class_metadata = pd.concat([licensed_classes, public_classes], sort=False).reset_index(drop=True)
array_to_text = ["inputs","output","tags"]
for c in array_to_text:
class_metadata[c] = class_metadata[c].str.replace("[\[\]'‘]","")
class_metadata.to_csv("docs_module/metadata/class_metadata_all.csv", index=False)
| python/0 Merge Class Datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.6 64-bit (''base'': conda)'
# name: python3
# ---
# # Problem formulation
#
# We want to classify between Harry Potter and Star Wars theme songs based on humming and whistles made by people to these songs. Since the data is crowd sourced, it has people singing in different pitches and tune, which is out of sync with the actual music. Also, the data was collected from a diverse group of students with different nationality, ethnicity, gender, and age. Given that the data is crowd sourced, it can also have noise, leading to quality issues between data points. These challenges make it an interesting problem to solve.
# # Machine Learning pipeline
# The input data consists of the audio file and meta data encoded in the file name of audio files.
# The ML pipeline involves three stages.
# - Data Preprocessing - Meta data from file names needs to be extracted. Output of this stage will be dataframes with each file name and song name columns.
# - Feature Engineering - Features should be extracted from the audio files which are in WAV format. These can be audio/signal processing features such as pitch, frequencies, power, etc. Noise needs to be removed before feature extraction and then the extracted features will be noramalized.
# - Classification - Models can be trained based on the extracted features and song name collected earlier. The classifier will predict the song label from features extracted from unseen audio data.
# # Transformation stage
#
# There are two stages in transformations.
# - Meta data extraction - Audio files are written in `[participant_number]_[audio type]_[audio number]_[song label]` format. Audio type can be `hum` or `whistle`. Audio number can be 1, 2, 3 or 4. Song label can be `Potter` or `StarWars`. All the audio files from part 1 and part 2 for Star Wars and H<NAME> were downloaded and merged into two folders `Potter` or `StarWars`. Meta data will be extracted from the audio file names and the folder names and will be stored as a dataframe. This will include `file_name` and `song name`.
# - Feature extraction - Features from WAV file are extracted in this stage. Noice is removed from audio file before feature extraction using the [noisereduce](https://github.com/timsainb/noisereduce) package. They are digital signal processing features such as `power`, `mean pitch`, `std of pitch`, `natural frequency`, `tempo`, `zero crossings`, etc. Dask was used to perform this task in parellal since sequential execution was taking more than 5 mins for 100 records while trying to extract many features. Dask brought down feature extraction time for whole data set to below 10 mins.
# +
import glob
import numpy as np
import pandas as pd
import librosa
import noisereduce as nr
from dask.distributed import Client
from dask import delayed
# Initialize the Dask client with 4 workers
client = Client(n_workers=4)
# +
def get_metadata():
'''
Read files and extract the metadata
'''
potter_files_path = 'Potter/*.wav'
potter_files = glob.glob(potter_files_path)
potter = pd.DataFrame(potter_files, columns=['file_name']).set_index('file_name')
potter['song'] = 0
starwars_files_path = 'StarWars/*.wav'
starwars_files = glob.glob(starwars_files_path)
starwars = pd.DataFrame(starwars_files, columns=['file_name']).set_index('file_name')
starwars['song'] = 1
df = pd.concat([potter, starwars])
return df
def getPitch(x, fs, winLen=0.02):
'''
Extract the pitch from the audio file.
'''
p = winLen*fs
frame_length = int(2**int(p-1).bit_length())
hop_length = frame_length//2
f0, voiced_flag, voiced_probs = librosa.pyin(y=x, fmin=80, fmax=450, sr=fs,
frame_length=frame_length,hop_length=hop_length)
return f0,voiced_flag
def extract_features(file_name, fs=None, scale_audio=False, onlySingleDigit=False):
'''
Get the features from a single audio file
'''
x, fs = librosa.load(file_name, sr=fs)
x = nr.reduce_noise(y=x, sr=fs)
if scale_audio: x = x/np.max(np.abs(x))
f0, voiced_flag = getPitch(x, fs, winLen=0.02)
power = np.sum(x**2)/len(x)
pitch_mean = np.nanmean(f0) if np.mean(np.isnan(f0))<1 else 0
pitch_std = np.nanstd(f0) if np.mean(np.isnan(f0))<1 else 0
voiced_fr = np.mean(voiced_flag)
zero_crossings = librosa.feature.zero_crossing_rate(x).sum()
tempo = librosa.beat.tempo(x, sr=fs)[0]
return [power, pitch_mean, pitch_std, voiced_fr, zero_crossings, tempo]
def get_features(labels_file):
'''
Generate features for given dataset
'''
X = []
for file in labels_file.index:
x = delayed(extract_features)(file, fs=44100, scale_audio=True, onlySingleDigit=True)
X.append(x)
X_delayed = delayed(np.array)(X)
return X_delayed.compute()
# -
# # Modelling
#
# Multiple classifier models are evaluated. They will be evaluated based on the cross validation accuracy and test data accuracy
# - Logistic Regression
# - Naive Bayes model
# - Support Vector Classifier
# - K Nearest Neighbour
# - Decision Tree
# - Random Forest Ensemble model
# +
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
algorithms = {
'logistic': LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=1000),
'NB': GaussianNB(),
'SVM': svm.SVC(C=1, gamma=2),
'KNN': KNeighborsClassifier(n_neighbors=3),
'DecisionTree': DecisionTreeClassifier(),
'RandomForest': RandomForestClassifier()
}
def get_pipeline(algorithm):
'''
Returns a pipeline for the algorithm.
'''
model = algorithms[algorithm]
return make_pipeline(StandardScaler(), model)
# -
# # Methodology
# Train and Test data will be created using a random split with 80:20 ratio. Multiple models will be trained and validated with K-fold cross validation and finally tested against the test data. Models will be evaludated using cross validation accuracy since the data is balanced. Aparat from accuracy Precision, Recall and F1-score will also be analysed. A model with high accuracy on test data will be considered the best model since that model is generalising better.
# # Dataset
#
# The data set consist of 6 features **power, pitch_mean, pitch_std, frequency, tempo, zero_crossings** and an output variable **song**, which is enocded as 0 and 1 indicating Potter and StarWars respectively. They were obtained after preprocessing and extracting features from the raw audio files. Each of those stages are explained in the Transformations section above.
#
# The data was split into training and testing set with a random 80:20 split.
metadata = get_metadata()
print(f'Length of data: {len(metadata)}')
print(metadata.head())
print('\ncount of each song:')
print(metadata.song.value_counts())
# +
X = get_features(labels_file=metadata)
y = metadata['song'].values
print('\nThe shape of X is', X.shape)
print('\nThe shape of y is', y.shape)
# +
import matplotlib.pyplot as plt
import seaborn as sns
columns = ['power', 'pitch_mean', 'pitch_std', 'voiced_fr', 'zero_crossings', 'tempo']
df = pd.DataFrame(X, columns=columns)
df['song'] = y
sns.pairplot(df, hue='song')
# +
# select only the features that are useful for classification
req_cols = [columns.index(x) for x in ('pitch_mean', 'pitch_std', 'tempo')]
X_req = X[:, req_cols]
# Split the data into training and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_req, y, test_size=0.2, random_state=10)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# -
# # Results
#
# Multiple models were evaluated for different set of features. I used [MLFlow](https://mlflow.org/) to track the experiments since I was trying out multiple experiments.
# +
from sklearn.model_selection import cross_val_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import classification_report
from mlflow import start_run, log_param, log_params, log_metric
for algorithm in ('logistic', 'SVM', 'KNN', 'DecisionTree', 'NB', 'RandomForest'):
with start_run(run_name=algorithm) as run:
print('Model:', algorithm)
log_param('noise_reduction', True)
log_param('Training samples', X_train.shape)
pipeline = get_pipeline(algorithm)
log_params(pipeline.get_params())
scores = cross_val_score(pipeline, X_train, y_train, cv=5)
print('%0.2f accuracy with a standard deviation of %0.2f' % (scores.mean(), scores.std()))
log_metric('CV accuracy', scores.mean())
log_metric('CV accuracy std', scores.std())
model = pipeline.fit(X_train, y_train)
score = model.score(X_test, y_test)
print('Test accuracy %0.2f' % score)
log_metric('Test accuracy', score)
predicted = model.predict(X_test)
print(classification_report(y_test, predicted))
prf = precision_recall_fscore_support(y_test, predicted, average='weighted')
log_metric('Precision', prf[0])
log_metric('Recall', prf[1])
log_metric('F1', prf[2])
print(prf)
print('-'*100)
# -
# Normalise the data
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
from sklearn.model_selection import GridSearchCV
rfc = RandomForestClassifier()
param_grid = {
'n_estimators': [200, 500],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [4,5,6,7,8],
'criterion' :['gini', 'entropy']
}
grid = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5)
grid.fit(X_train_scaled, y_train)
grid.best_params_
rf = RandomForestClassifier(criterion='gini', max_depth=6, max_features='log2', n_estimators= 500)
scores = cross_val_score(rf, X_train_scaled, y_train, cv=5)
print('%0.2f accuracy with a standard deviation of %0.2f' % (scores.mean(), scores.std()))
rf.fit(X_train_scaled, y_train)
rf.score(X_test_scaled, y_test)
# **Observations**
# - SVM is performing very well with minimal features.
# - Random forest is performing slightly better than SVM with more features. However, Random Forest takes more time to train.
# - Main features distinguishing between Potter and Starwars are tempo and mean pitch. SVM is performing really well with just these 2 features.
#
# Experiments summary from MLFlow dashboard is attached below.
#
# 
# +
## Saving the model
import joblib
pipeline = make_pipeline(StandardScaler(), svm.SVC(C=1, gamma=2, probability=True))
pipeline.fit(X_train, y_train)
score = pipeline.score(X_test, y_test)
print('Test accuracy %0.2f' % score)
joblib.dump(pipeline, 'model.pkl')
# -
# # Conclusions
#
# - Tempo and Pitch are the most important features to distinguish between Harry Potter and Star Wars
# - SVM is performing really well with minimal features for classifications.
# - Ensemble models outperform standalone models like SVM with enough features.
# - Both Random Forest and SVM models are giving decent accuracy on test data (~80%). However, this could be improved by exploring better feature extraction, boosting algorithms, and advanced algorithms like Neural Nets.
# - Feature extraction was the key to improve accuracy of model. I have tried various features that librosa supports such as zero crossing rate, mfcc, spectral_centroid, beat_track, etc. However, they weren't much helpful.
#
# **Possible Improvements**
# - Other ensemble models like Light GBM and XGBOOST might improve the accuracy. This is an improvement that can be considered.
# - Reducing noise in audio improved the accuracy. Other methods for noise reduction can be evaluated to improve classifier.
# - Noise trimming can be applied to remove the begining and end of audio with empty voice.
# - Explore the accuracy by training only on Hummings from the dataset.
# - Other features can also be explored to improve accuracy.
| model/training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="-8-trVo__vRE" colab_type="text"
# _Lambda School Data Science_
#
# # Make explanatory visualizations
#
#
#
#
# Tody we will reproduce this [example by FiveThirtyEight:](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/)
# + id="ya_w5WORGs-n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 355} outputId="01efec1a-f9e8-4057-990e-339750750110"
from IPython.display import display, Image
url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png'
example = Image(url=url, width=400)
display(example)
# + [markdown] id="HP4DALiRG3sC" colab_type="text"
# Using this data: https://github.com/fivethirtyeight/data/tree/master/inconvenient-sequel
# + [markdown] id="HioPkYtUG03B" colab_type="text"
# Objectives
# - add emphasis and annotations to transform visualizations from exploratory to explanatory
# - remove clutter from visualizations
#
# Links
# - [Strong Titles Are The Biggest Bang for Your Buck](http://stephanieevergreen.com/strong-titles/)
# - [Remove to improve (the data-ink ratio)](https://www.darkhorseanalytics.com/blog/data-looks-better-naked)
# - [How to Generate FiveThirtyEight Graphs in Python](https://www.dataquest.io/blog/making-538-plots/)
# + [markdown] id="0w_iMnQ6-VoQ" colab_type="text"
# ## Make prototypes
#
# This helps us understand the problem
# + id="5uz0eEaEN-GO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="1bb0350e-6560-46d2-80d8-61f9f8ea2144"
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.style.use('fivethirtyeight')
fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33],
index=range(1,11)) # index will start from 0 if not for this
fake.plot.bar(color='C1', width=0.9);
# + id="KZ0VLOV8OyRr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 285} outputId="3b3eb257-dd00-496b-e2a9-769eb08458af"
fake2 = pd.Series(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2,
3, 3, 3,
4, 4,
5, 5, 5,
6, 6, 6, 6,
7, 7, 7, 7, 7,
8, 8, 8, 8,
9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10])
fake2.value_counts().sort_index().plot.bar(color='C1', width=0.9);
# + [markdown] id="mZb3UZWO-q05" colab_type="text"
# ## Annotate with text
# + id="Wqcama3C87xL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 355} outputId="9c5b6373-b7e7-4848-b0e1-adb747d41802"
display(example)
# + id="f6U1vswr_uWp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="ffb376d1-0145-482b-ad71-327b2580b1a0"
plt.style.use('fivethirtyeight')
fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33],
index=range(1,11)) # index will start from 0 if not for this
fake.plot.bar(color='C1', width=0.9);
# + id="aMyWl8lg9g5x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="06bd63d4-2ee6-4592-c19c-b76ab7de4b31"
# rotate x axis numbers
plt.style.use('fivethirtyeight')
fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33],
index=range(1,11)) # index will start from 0 if not for this
ax = fake.plot.bar(color='C1', width=0.9)
ax.tick_params(labelrotation=0) #to unrotate or remove the rotation
ax.set(title="'An Incovenient Sequel: Truth to Power' is divisive");
#or '\'An Incovenient Sequel: Truth to Power\' is divisive'
# + id="J20ZDe4CFIim" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 359} outputId="6823580f-ec52-4ab6-a0f6-0b5752b88e6b"
plt.style.use('fivethirtyeight')
fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33],
index=range(1,11)) # index will start from 0 if not for this
ax = fake.plot.bar(color='C1', width=0.9)
ax.tick_params(labelrotation=0)
ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive",
fontsize=16, fontweight='bold')
ax.text(x=-2,y=45, s='IMDb ratings for the film as of Aug. 29',
fontsize=12)
ax.set(xlabel='Rating',
ylabel='Percent of total votes',
yticks=range(0,50,10));
#(start pt., end pt., increment)
# + [markdown] id="x8jRZkpB_MJ6" colab_type="text"
# ## Reproduce with real data
# + id="3SOHJckDUPI8" colab_type="code" colab={}
df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/inconvenient-sequel/ratings.csv')
# + id="dXfg4M3hOI-i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4eef2e73-01bc-4a48-e467-429671a6e2c2"
df.shape
# + id="cDltXxhC_yG-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 338} outputId="7b219a6b-75d7-4678-954d-08053e7f5954"
df.head()
# + id="xh2DBRS6PWKS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="67cc0f8d-a8e5-4ef3-e6ef-d3b258c1f7a3"
width,height = df.shape
width*height
# + id="kD7VRlI9QJfn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 309} outputId="38cdd573-a46e-45b0-f484-b3d75fe132f3"
pd.options.display.max_columns = 500
df.head()
# + id="QRJWmIJKQLHp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 886} outputId="aecf72ce-10dc-4939-96d8-0225a25a0e8a"
df.sample(1).T
# + id="XzrpRQOtSk6X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="38d65668-0a2d-4b7d-8654-b7770954ce5b"
df.timestamp.describe()
# + id="h6xeQLH9Sw6g" colab_type="code" colab={}
# convert timestamp to date time
df.timestamp = pd.to_datetime(df.timestamp)
# + id="aCneFtJwTIHn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="0567ae28-668e-40f5-c7eb-e1c78137bc94"
df.timestamp.describe()
# + id="1w6CgcXkTVDn" colab_type="code" colab={}
# Making datetime index of your df
df = df.set_index('timestamp')
# + id="CY00lpulTh_8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 340} outputId="72d11edd-a438-4a40-c833-6836ed01dd79"
df.head()
# + id="F_NcFZawTpLj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 3278} outputId="f4416aa8-4cfd-4cb1-cc78-12292c8b237d"
df['2017-08-09']
# everything from this date
# + id="69uxFO1JUQac" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="e3bc3a63-0539-4242-8bd1-9358a2d7037e"
df.category.value_counts()
# + [markdown] id="aq36jLfOUypD" colab_type="text"
# ####only interested in IMDb users
# + id="RDLmQ6ISUxZl" colab_type="code" colab={}
df.category == 'IMDb users'
# + id="nwUSNdKYU-is" colab_type="code" colab={}
# As a filter to select certain rows
df[df.category == 'IMDb users']
# + id="hrN3TeHSVUVr" colab_type="code" colab={}
lastday = df['2017-08-09']
# + id="bBT0Sl30Wh3X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 148} outputId="3986cfb9-ccf2-401b-ca92-124b57a361bc"
lastday.head(1)
# + id="_ShCQrj6WUbn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 340} outputId="7f4db8ef-4b2b-44de-a87b-0d1215103766"
lastday[lastday.category =='IMDb users'].tail()
# + id="m8yz6zcSXN61" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="b39f7b4e-7d81-4585-c817-ecafca0f3b12"
lastday[lastday.category =='IMDb users'].respondents.plot();
# + id="hWb7wyETX59C" colab_type="code" colab={}
final = df.tail(1)
# + id="gCczGiMCX956" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 111} outputId="2bd038d8-1954-488c-d85e-db3b56c0ecf9"
#columns = ['1_pct','2_pct','3_pct','4_pct','5_pct','6_pct','7_pct','8_pct','9_pct','10_pct']
#OR
columns = [str(i) + '_pct' for i in range(1,11)]
final[columns]
#OR
#data.index.str.replace('_pct', '')
# + id="q4lfFcv3Zm-9" colab_type="code" colab={}
data = final[columns].T
# + id="1a-QtJK8a30J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 359} outputId="39ca0262-92f0-4640-c4e8-58b7922caeca"
data
# + id="M7UmcykgZzDb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 321} outputId="67dee877-68a8-4551-f662-9814645e7c36"
data.plot.bar()
# + id="7IEA3wedaAsW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="420c3023-8d1c-4e7e-9da3-a1bc8b5821d8"
plt.style.use('fivethirtyeight')
ax = data.plot.bar(color='C1', width=0.9)
ax.tick_params(labelrotation=0)
ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive",
fontsize=16, fontweight='bold')
ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29',
fontsize=12)
ax.set(xlabel='Rating',
ylabel='Percent of total votes',
yticks=range(0,50,10));
#(start pt., end pt., increment)
# + id="qhiSpGt6ai9X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="7496779e-7325-4e62-af76-37622f3fdbb9"
# to remove the timestamp texts in the center
# to change the x axis texts
plt.style.use('fivethirtyeight')
ax = data.plot.bar(color='C1', width=0.9, legend=False)
ax.tick_params(labelrotation=0)
ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive",
fontsize=16, fontweight='bold')
ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29',
fontsize=12)
ax.set(xlabel='Rating',
ylabel='Percent of total votes',
yticks=range(0,50,10));
# + id="7fD2GcwEbSDF" colab_type="code" colab={}
data.index = range(1,11)
data
# + id="vq45KwP2bb5g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="0982a4f4-86ae-45fe-cd8d-92da1b655576"
plt.style.use('fivethirtyeight')
ax = data.plot.bar(color='C1', width=0.9, legend=False)
ax.tick_params(labelrotation=0)
ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive",
fontsize=16, fontweight='bold')
ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29',
fontsize=12)
ax.set(xlabel='Rating',
ylabel='Percent of total votes',
yticks=range(0,50,10))
plt.xlabel('Rating', fontsize=14);
# + [markdown] id="NMEswXWh9mqw" colab_type="text"
# # ASSIGNMENT
#
# Replicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit).
#
# # STRETCH OPTIONS
#
# #### Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/).
#
# For example:
# - [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) (try the [`altair`](https://altair-viz.github.io/gallery/index.html#maps) library)
# - [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) (try the [`statsmodels`](https://www.statsmodels.org/stable/index.html) library)
# - or another example of your choice!
#
# #### Make more charts!
#
# Choose a chart you want to make, from [FT's Visual Vocabulary poster](http://ft.com/vocabulary).
#
# Find the chart in an example gallery of a Python data visualization library:
# - [Seaborn](http://seaborn.pydata.org/examples/index.html)
# - [Altair](https://altair-viz.github.io/gallery/index.html)
# - [Matplotlib](https://matplotlib.org/gallery.html)
# - [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html)
#
# Reproduce the chart. [Optionally, try the "<NAME>."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes.
#
# Take notes. Consider sharing your work with your cohort!
#
#
#
#
#
#
#
#
| module3-make-explanatory-visualizations/LS_DS_223_Make_explanatory_visualizations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is a simple iPython notebook which uses the "editdistance" package to find the best match for a string within the dictionary.
# (NJ, Aug 2018)
import editdistance
# We'll load the entire dictionary into a list. This doesn't actually use much memory.
f = open("./dict.txt")
my_dict = f.readlines()
f.close()
# Now we compare each dictionary entry with a string; we'll keep track of the best entry we've seen so far:
f = open('wiki_misspell.txt','r')
for line in f:
string = line.strip()
bestv = 10000000 # This is intentionally overkill
bests = ""
for entry in my_dict:
thisv = editdistance.eval(string,entry.strip())
if (thisv < bestv):
# Note that this script only updates the best entry when it is better than all of the previous ones
# It turns out that this is actually a really bad strategy
bests = entry.strip()
bestv = thisv
print(string,bests,bestv)
| other/ged-lev/P1-ged-lev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="J0i5MRP0SV8D"
# This notebook uses [GPTNeo](https://github.com/EleutherAI/GPTNeo) by [EleutherAI](eleuther.ai) to fine tune the model and predict a batch of instances.
# + [markdown] id="zoGedGqhbble"
# # Product Description Generation
#
# If a new batch is being generated:
#
# 1. Make sure you have prepared the dataset with the "prepare" notebook
#
# 2. Make sure the fine tuned model is uploaded to the bucket
#
# + [markdown] id="-5HYR_tf3MIG"
# Choose the following options:
# 1. re-initialize this configuration [1]
# 2. the google account with the cloud storage [1]
# 3. gpt project [10]
# 4. No [n]
# + id="2VNod-J73IKL"
from google.colab import auth
auth.authenticate_user()
# #!gcloud auth login
# !gcloud init
# + [markdown] id="_Jmdi1Ol30Ee"
# Mount the drive with the excel files where also the generated descriptions will be stored.
#
# + id="vthqcBXs3dYG"
# Mount drive
from google.colab import drive
drive.mount('/content/drive')
# + id="K-53qkZV6Lv9"
import os
# %tensorflow_version 2.x
# !git clone https://github.com/EleutherAI/gpt-neo
# %cd gpt-neo
# !pip3 install -q -r requirements.txt
pretrained_model = None
dataset = None
# + id="wid6pXwlYuIN"
# !pip install -U tensorflow-gcs-config==2.1.3
# !pip install -q t5 tensorflow-text==2.3
# + id="Cr_c6A2NBK5i"
path_to_cloud_bucket = 'gs://test-gpt-j/'
# + [markdown] id="zUY6IWIXPU7E"
# # Configs
# dataset configs
# + id="MCsZP48vavCP"
# %%writefile configs/dataset_configs/prod_desc_gpt_j.json
{
"path": "gs://test-gpt-j/datasets/prod_desc_gpt_j_*.tfrecords",
"eval_path": "",
"n_vocab": 50256,
"tokenizer_is_pretrained": true,
"tokenizer_path": "gpt2",
"eos_id": 50256,
"padding_id": 50257
}
# + [markdown] id="FK4Sfh9GPdAk"
# Model configs
# + id="L9hUDdokiWj6"
# %%writefile configs/GPT3_XL.json
{
"n_head": 16,
"n_vocab": 50257,
"embed_dropout": 0,
"lr": 0.0002,
"lr_decay": "cosine",
"warmup_steps": 3000,
"beta1": 0.9,
"beta2": 0.95,
"epsilon": 1e-8,
"opt_name": "adam",
"weight_decay": 0,
"train_batch_size": 256,
"attn_dropout": 0,
"train_steps": 600000,
"eval_steps": 0,
"predict_steps": 1,
"res_dropout": 0,
"eval_batch_size": 4,
"predict_batch_size": 1,
"iterations": 100,
"n_embd": 2048,
"datasets": [["prod_desc_gpt_j", null, null, null]],
"model": "GPT",
"model_path": "gs://test-gpt-j/",
"n_ctx": 2048,
"n_layer": 24,
"scale_by_depth": true,
"scale_by_in": false,
"attention_types" : [[["global", "local"],12]],
"mesh_shape": "x:4,y:2",
"layout": "intermediate_expanded:x,heads:x,vocab:n_vocab,memory_length:y,embd:y",
"activation_function": "gelu",
"recompute_grad": true,
"gradient_clipping": 1.0,
"tokens_per_mb_per_replica": 2048,
"precision": "bfloat16"
}
# + [markdown] id="koKQHA5ikCvD"
# #Fine tuned model
# + id="GU3BDNJN_ZXE"
bucket_base = "gs://" + path_to_cloud_bucket.replace('gs://', '').split('/')[0]
pretrained_model = 'GPT3_XL'
# !mkdir pretrained
# !gsutil -m cp gs://test-gpt-j/GPT3_XL/config.json pretrained
path_to_local_weights = f"/content/gpt-neo/pretrained/"
# + id="Laf0slBMDCUj"
import json
from pprint import pprint
path_to_model = ""
batch_size = 8
dset = "prod_desc_gpt_j"
mesh_shape = "x:4,y:2"
train_steps = 1000
steps_per_checkpoint = 500
start_step = 400000 if pretrained_model == "GPT3_2-7B" else 362000
if path_to_model == "":
path_to_model = f'{bucket_base.strip("/")}/{pretrained_model}'
print(f'MODEL PATH: {path_to_model}\n')
if dset == "" and dataset != "Sampling_Only":
dset = dataset
elif dataset is None and dset == "":
dset = "pile"
def pad_to_multiple_of(n, mult):
"""
pads n to a multiple of mult
"""
extra = n % mult
if extra > 0:
n = n + mult - extra
return n
with open(f'{path_to_local_weights}config.json', 'r') as f:
data = json.load(f)
pprint(data)
dset_val = [[dset, None, None, None]] if dset != "" else data["datasets"]
mods = {
"mesh_shape": mesh_shape,
"layout": "intermediate_expanded:x,heads:x,memory_length:y,embd:y",
"model_path": path_to_model,
"datasets": dset_val,
"train_steps": start_step + train_steps,
"eval_steps": 0,
"train_batch_size": batch_size,
"predict_batch_size": batch_size
}
data.update(mods)
print('\n--->\n')
pprint(data)
with open(f'configs/{pretrained_model}.json', 'w') as outfile:
json.dump(data, outfile, indent=2)
# + [markdown] id="I_HxtEmBGTGT"
# ### Sample from your model
#
# Once the pretrained model (fine tuned) is in the bucket, sample from it.
# + id="OLPyuWz_j1q9"
# %cd ..
# !mkdir drive/MyDrive/dataset/gen/
# %cd gpt-neo
# + [markdown] id="krwBdj-sQXBz"
# Copy the test set to gpt-neo/test/
# + id="EC5uFTMbCSJR"
from data.encoders import encode
from functools import partial
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
from tensorflow.python.tpu import tpu_config, tpu_estimator
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from utils import save_config, expand_attention_types_params, yes_or_no, remove_gs_or_filepath, setup_logging, \
check_dataset
from inputs import sequential_input, mlm_sample_text, generic_text
from export import export_model
from model_fns import model_fn
from data.encoders import fetch_encoder
from configs import fetch_model_params
from tasks import task_descriptors
import argparse
import json
import numpy as np
import gc
import sys
# + id="HE4AmPd8Hdyo"
def pred_input(params, enc=None,
path_to_prompt=""):
unicorns = "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " \
"previously unexplored valley, in the Andes Mountains. Even more surprising to the " \
"researchers was the fact that the unicorns spoke perfect English."
text = unicorns if path_to_prompt == "" else open(path_to_prompt, "r").read()
tokens = encode(enc, text)
if len(tokens) > params["n_ctx"]:
tokens = tokens[len(tokens) - params["n_ctx"]:]
if len(tokens) < params["n_ctx"]:
tokens = tf.pad(tokens, [[0, params["n_ctx"] - len(tokens)]], constant_values=params["padding_id"])
t = tf.broadcast_to(tokens, [params["batch_size"], params["n_ctx"]])
dataset = tf.data.Dataset.from_tensors(t)
def _dummy_labels(x):
return x, x
del t
del tokens
gc.collect()
return dataset
# + id="pj3CFswIHCSf"
def handle_pred_output(predictions, enc, params, out_name="test"):
with tf.gfile.Open(out_name, "w") as f:
for i, p in enumerate(predictions):
p = p["outputs"]
# remove eos + padding ids from output
idx = np.argmax(p == params['eos_id'])
if idx > 0:
p = p[:idx]
idx = np.argmax(p == params['padding_id'])
if idx > 0:
p = p[:idx]
text = enc.decode(p)
f.write(text)
#only using the first prediction
break
return
# + id="7Bz745eU6fRu"
def infer(path,name):
tf.disable_v2_behavior()
tpu= "colab"
model= pretrained_model
steps_per_checkpoint = 500
# Read params of model
params = fetch_model_params(model)
# Fetch appropriate input functions
input_fn = params.get("input_fn", "sequential_input")
if input_fn == "sequential_input":
input_fn = sequential_input
elif input_fn == "generic_text":
input_fn = generic_text
pred_input_fn = pred_input
handle_pred_output_fn = handle_pred_output
# get current step
current_step = int(estimator_lib._load_global_step_from_checkpoint_dir(params["model_path"]))
if params["mlm_training"]:
mlm_sample_text_fn = partial(mlm_sample_text, params)
input_fn = partial(generic_text, sample_text_fn=mlm_sample_text_fn)
if args.check_dataset:
check_dataset(input_fn, params)
# Fetch encoder per params
encoder = fetch_encoder(params)
pred_input_fn = partial(pred_input_fn, path_to_prompt=path, enc=encoder)
# Save config to logdir for experiment management
save_config(params, params["model_path"])
# Add to params: auto_layout, auto_layout_and_mesh_shape, use_tpu, num_cores
mesh_shape = mtf.convert_to_shape(params["mesh_shape"])
params["num_cores"] = mesh_shape.size
params["auto_layout"] = True
params["auto_layout_and_mesh_shape"] = True
params["use_tpu"] = True
params["gpu_ids"] = None
params["steps_per_checkpoint"] = steps_per_checkpoint
# Expand attention types param
params["attention_types"] = expand_attention_types_params(params["attention_types"])
assert len(params["attention_types"]) == params["n_layer"] # Assert that the length of expanded list = num layers
params["predict_batch_size"] = params.get("predict_batch_size", 1) # Default to 1
params["predict"] = True
params['model'] = params.get("model", "GPT") # Default model selection to GPT since it's the only option for now
params["export"] = False
# Set sampling parameters
params["sampling_use_entmax"] = False
# Sample quality of MoE models suffers when using the faster sampling method, so default to slow_sampling if
# moe layers are present
params["slow_sampling"] = True if params["moe_layers"] is not None else False
#logger.info(f"params = {params}")
# Get eval tasks from params
eval_tasks = params.get("eval_tasks", [])
has_predict_or_eval_steps_or_eval_tasks = params["predict_steps"] > 0 or params["eval_steps"] > 0 or len(
eval_tasks) > 0
for t in eval_tasks:
assert t in task_descriptors, f"Eval task '{t}' is not known"
task_descriptors[t]["init_fn"](params)
# Set up TPUs and Estimator
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver() if params["use_tpu"] else None
config = tpu_config.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=params["model_path"],
save_checkpoints_steps=None, # Disable the default saver
save_checkpoints_secs=None, # Disable the default saver
log_step_count_steps=params["iterations"],
save_summary_steps=params["iterations"],
tpu_config=tpu_config.TPUConfig(
num_shards=mesh_shape.size,
iterations_per_loop=params["iterations"],
num_cores_per_replica=1,
per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST))
estimator = tpu_estimator.TPUEstimator(
use_tpu=params["use_tpu"],
model_fn=model_fn,
config=config,
train_batch_size=params["train_batch_size"],
eval_batch_size=params["train_batch_size"],
predict_batch_size=params["predict_batch_size"],
params=params)
def _make_task_estimator(task):
task_params = params.copy()
task_params["eval_task"] = task
return tpu_estimator.TPUEstimator(
use_tpu=params["use_tpu"],
model_fn=model_fn,
config=config,
train_batch_size=params["train_batch_size"],
eval_batch_size=params["eval_batch_size"],
predict_batch_size=params["predict_batch_size"],
params=task_params)
predictions = estimator.predict(input_fn=pred_input_fn)
#logger.info("Predictions generated")
enc = fetch_encoder(params)
out = "/content/drive/MyDrive/dataset/gen/"+name
handle_pred_output(predictions, enc, params, out_name=out)
del predictions
del estimator
del enc
del current_step
del mesh_shape
gc.collect()
tf.keras.backend.clear_session()
tf.reset_default_graph()
return
# + id="G8jffnKBdXoU"
def infer_all(dir):
to_be_gen = []
generated = []
with open("/content/drive/MyDrive/dataset/checkpoint.txt","r") as f:
generated = f.read().split('\n')
for path in os.listdir(dir):
full_path = os.path.join(dir, path)
if os.path.isfile(full_path):
if path not in generated:
to_be_gen.append(path)
c=0
for path in to_be_gen:
full_path = dir + path
infer(full_path,path)
with open("/content/drive/MyDrive/dataset/checkpoint.txt","a") as f:
f.write(f"{path}\n")
c+=1
return
# + id="8jlnMUPlisjT"
import time
start = time.time()
infer_all("/content/drive/MyDrive/dataset/test/")
print(f"All done in {time.time()-start}s")
# + [markdown] id="D0l0dr_721x2"
# ##Warning
# The results will be deleted from the drive upon running the model on another dataset.
| generate-neo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_excel('DryBeanDataset/Dry_Bean_Dataset.xlsx')
df.head()
X = df.iloc[:, :-1].values
y_str = df['Class'].values
# +
import numpy as np
np.unique(y_str)
# +
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y_str)
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42, stratify=y)
# -
np.savetxt("X_train.csv", X_train, delimiter=",")
np.savetxt("y_train.csv", y_train, delimiter=",")
np.savetxt("X_test.csv", X_test, delimiter=",")
np.savetxt("y_test.csv", y_test, delimiter=",")
| hw02-starter/dataset/make-splits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/suneel87/Deep-Fake-Image-Detection/blob/main/DeepFake_Image.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="26_qT2-7YPNR" outputId="9d3e8c21-2855-46ff-d44c-48388571cb7b"
from google.colab import drive
drive.mount('/content/drive')
# + id="6Tx2r8YQ_QS5" colab={"base_uri": "https://localhost:8080/"} outputId="0e441661-d401-43da-e1ca-07fd6d0c8c27"
pip install streamlit
# + [markdown] id="m5naTQ8yy5UU"
# ### **Import Necessary Modules**
# + id="njb7fTuzYmc0"
from itertools import permutations, product
from random import sample, choice, shuffle
from glob import glob
import time
from datetime import timedelta
from tqdm import tqdm
# %matplotlib inline
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
import matplotlib.pyplot as plt
import torchvision.utils
import numpy as np
import random
from PIL import Image
import torch
from torch.autograd import Variable
import PIL.ImageOps
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import torchvision.models as models
import streamlit as st
# + [markdown] id="FPTCS58ezAzC"
# ### **Functions Used**
# + id="-32B_u_pZrbs"
'''
This functions computes the Contrastive loss for a CFFN
Input: Label(y)(0 or 1), Feature Vectors for a pair of images(fx1,fx2)
Output: The value of Contrastive loss
Return Type: PyTorch Tensor
'''
def ContrastiveLoss(y,fx1,fx2):
m=2
E=torch.pow((fx1-fx2),2)
mat_max = np.maximum(np.zeros((y.shape[0], 128)), torch.pow((m-E),2).detach().cpu().numpy())
torch_loss = 0.5*(y.detach().cpu().numpy()*torch.pow(E,2).detach().cpu().numpy().T) + (1-y.detach().cpu().numpy())*mat_max.T
return torch.tensor(torch.sum(torch.tensor(torch_loss.T)), requires_grad=True)
# + id="vSsXcRYazZoM"
'''
This functions plots multiple images in one grid
Input: A grid of images made using torch.utils
Output: The plot (matplotlib) for these images in one grid
'''
def imshow(img,text=None,should_save=False):
npimg = img.numpy()
plt.axis("off")
if text:
plt.text(75, 8, text, style='italic',fontweight='bold',
bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# + id="8IAHXgkezbpy"
'''
This functions plots the loss for that particular iteration in an epoch
Input: Iteration Number (iteration) and the Loss (loss)
Output: The matplotlib plot for the loss
'''
def show_plot(iteration,loss):
plt.plot(iteration,loss)
plt.show()
# + id="jifCutAg1mxw"
'''
This functions predicts the state of a given image
Input: The image (image)
Output: The value of the state of the image (0 for fake and 1 for real)
Return Type: Integer
'''
def predict_image(image):
net = SiameseNetwork(models.resnet50(pretrained=True),1000).cuda()
net2 = NeuralNet().cuda()
net.load_state_dict(torch.load("net.pth"))
net2.load_state_dict(torch.load("net2.pth"))
net.eval()
net2.eval()
test_transforms = transforms.Compose([transforms.Resize((100,100)),transforms.ToTensor()])
img=Image.open(image)
image_tensor = test_transforms(img).float()
image_tensor = image_tensor.unsqueeze_(0)
input = Variable(image_tensor)
input = input.cuda()
out1=net([input])
output = net2(out1[0])
print(output.data.cpu().numpy())
index = output.data.cpu().numpy().argmax()
return index
# + [markdown] id="VICbCScXzFuK"
# ### **Reading the images and pairing them**
# + id="atyPSYVkYvMc"
fake = glob("/content/drive/My Drive/faces/fake/*")
real = glob("/content/drive/My Drive/faces/real/*")
# + id="Dxzc1BpdZepn"
fake_ = [x.split("/")[-1] for x in fake]
real_ = [x.split("/")[-1] for x in real]
fake_ = ["fake_" + x for x in fake_]
# + colab={"base_uri": "https://localhost:8080/"} id="td6l60uaZvvU" outputId="06548051-cfe2-40bb-8769-db58ca45f61e"
start = time.time()
pairs_distinct = list(product(real_,fake_))
pairs_real = list(product(real_,real_))
pairs_fake = list(product(fake_,fake_))
end = time.time()
print(f'Time Taken: {timedelta(seconds=(end - start))}')
# + colab={"base_uri": "https://localhost:8080/"} id="rhbK4S-0CRs5" outputId="9610cea8-f902-410c-f73e-ca2949f40f37"
len(pairs_distinct) + len(pairs_fake) + len(pairs_real)
# + id="RadXNKYraD0V"
pairs = pairs_distinct + pairs_real + pairs_fake
pairs = sample(pairs, len(pairs))
pairs = list(set(pairs))
# + colab={"base_uri": "https://localhost:8080/"} id="K5PJGEAfaG_S" outputId="ae441c84-df07-44e9-bbb2-55d4d381b6f0"
ord('f'), ord('r')
# + colab={"base_uri": "https://localhost:8080/"} id="6GGJ9WFHaIi5" outputId="2c41cf6e-1cd5-4674-cb3a-0fda24f117d1"
start = time.time()
label_dict = {}
for idx, pair in enumerate(pairs):
l1, l2 = pair[0].split("_")[0][0], pair[1].split("_")[0][0]
l_sum = ord(l1) + ord(l2)
if l_sum == 204:
label = 1 #impostor pair
elif l_sum == 216:
label = 0
elif l_sum == 228:
label = 1 # real pair
label_dict[idx] = label
end = time.time()
print(f'Time Taken: {timedelta(seconds=(end - start))}')
# + [markdown] id="7his_jz1zgIi"
# ### **Forming a Dataset**
# + id="vEzLD_VFaPUn"
BATCH_SIZE=64
NUMBER_EPOCHS=30
IMG_SIZE=100
# + id="UWnu-SRu0GQr"
import sklearn.model_selection
import pandas as pd
# + id="HzVY_HU4x2wj"
train_per = .70
test_per = .15
val_per = .15
total_len = len(pairs)
a, b = sklearn.model_selection.train_test_split(pairs, train_size=train_per, test_size=test_per + val_per)
b, c = sklearn.model_selection.train_test_split(b, train_size=test_per, test_size=val_per)
# + colab={"base_uri": "https://localhost:8080/"} id="7kPSvT7PkJXQ" outputId="0449080b-af8b-49b0-b0fd-48dbfd75d77c"
len(a), len(b), len(c)
# + id="ekbS8d16aNEc"
class DFDataset(Dataset):
def __init__(self, root_dir, real_dir, fake_dir, pairs, lab_dict, transform=None):
self.root_dir = root_dir
self.real_dir = real_dir
self.fake_dir = fake_dir
self.transform = transform
self.pairs = pairs
self.lab_dict = lab_dict
def __len__(self):
return len(self.pairs)
def __getitem__(self, idx):
pair = self.pairs[idx]
class_labels = []
if pair[0].split("_")[0] == "real":
img1 = Image.open(f'{self.root_dir}/{self.real_dir}/{pair[0]}')
class_labels.append(1)
elif pair[0].split("_")[0] == "fake":
path_ = pair[0].replace('fake_', '')
img1 = Image.open(f'{self.root_dir}/{self.fake_dir}/{path_}')
class_labels.append(0)
if pair[1].split("_")[0] == "real":
img2 = Image.open(f'{self.root_dir}/{self.real_dir}/{pair[1]}')
class_labels.append(1)
elif pair[1].split("_")[0] == "fake":
path_ = pair[1].replace('fake_', '')
img2 = Image.open(f'{self.root_dir}/{self.fake_dir}/{path_}')
class_labels.append(0)
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
label = self.lab_dict[idx]
return img1, img2, label, class_labels
# + id="CAAmK6JZaQ-D"
trainset = DFDataset('/content/drive/My Drive/faces', 'real', 'fake', sample(list(a), 10000), label_dict,
transform=transforms.Compose([transforms.Resize((100,100)),
transforms.ToTensor()
]))
trainloader = DataLoader(trainset,
shuffle=True,#whether randomly shuffle data in each epoch, but cannot let data in one batch in order.
batch_size=BATCH_SIZE)
valset = DFDataset('/content/drive/My Drive/faces', 'real', 'fake', sample(list(c), 10000), label_dict,
transform=transforms.Compose([transforms.Resize((100,100)),
transforms.ToTensor()
]))
valloader = DataLoader(valset,
shuffle=True,#whether randomly shuffle data in each epoch, but cannot let data in one batch in order.
batch_size=BATCH_SIZE)
# + colab={"base_uri": "https://localhost:8080/", "height": 132} id="l03d7g2Zahd5" outputId="70dfd379-f1b2-413b-e97f-137bb711554c"
vis_dataloader = DataLoader(trainset,
shuffle=True,
batch_size=8)
dataiter = iter(vis_dataloader)
example_batch = next(dataiter)
concatenated = torch.cat((example_batch[0],example_batch[1]),0)
imshow(torchvision.utils.make_grid(concatenated))
print(example_batch[2].numpy())
# + [markdown] id="B9N5nl5E0RvT"
# ### **Building a CFFN**
# + id="I9-7De2VbCp7"
class SiameseNetwork(nn.Module):# A simple implementation of siamese network
def __init__(self,model,n):#Parameters: The name of the model used and the size
super(SiameseNetwork, self).__init__()
self.cnn1 = model
self.fc1 = nn.Linear(n, 500)
self.fc2 = nn.Linear(500, 500)
self.fc3 = nn.Linear(500, 128)
def forward(self, inputs):
results = []
for input in inputs:
output = self.cnn1(input)
output = output.view(output.size()[0], -1)
output = F.relu(self.fc1(output))
output = F.relu(self.fc2(output))
output = self.fc3(output)
results.append(output)
return results
# + colab={"base_uri": "https://localhost:8080/"} id="z-jnl2-wbErT" outputId="f160b9b4-5ec4-45d6-a0e2-a1cbf22091b5"
# custom_cnn = nn.Sequential(
# nn.ReflectionPad2d(1),
# nn.Conv2d(3, BATCH_SIZE, kernel_size=3),
# nn.ReLU(inplace=True),
# nn.BatchNorm2d(BATCH_SIZE),
# nn.Dropout2d(p=.2),
# nn.ReflectionPad2d(1),
# nn.Conv2d(BATCH_SIZE, BATCH_SIZE, kernel_size=3),
# nn.ReLU(inplace=True),
# nn.BatchNorm2d(BATCH_SIZE),
# nn.Dropout2d(p=.2),
# nn.ReflectionPad2d(1),
# nn.Conv2d(BATCH_SIZE, 32, kernel_size=3),
# nn.ReLU(inplace=True),
# nn.BatchNorm2d(32),
# nn.Dropout2d(p=.2),
# )
# custom_cnn_n = 32*100*100
#models.resnet50(pretrained=True)
#models.alexnet(pretrained=True)
#models.densenet(pretrained=True)
#models.vgg16(pretrained=True)
#models.resnet50(pretrained=True)
#models.googlenet(pretrained=True)
net = SiameseNetwork(models.resnet50(pretrained=True),1000).cuda()
criterion = nn.CrossEntropyLoss() # use a Classification Cross-Entropy loss
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
counter = []
loss_history = []
iteration_number= 0
for epoch in range(0,NUMBER_EPOCHS):
print("Epoch:", epoch, " start.")
for i, data in enumerate(trainloader,0):
img0, img1 , labels, _ = data
img0, img1 , labels = img0.cuda(), img1.cuda() , labels.cuda()#move to GPU
#print("epoch:", epoch, "No." , i, "th inputs", img0.data.size(), "labels", labels.data.size())
optimizer.zero_grad()#clear the calculated grad in previous batch
outputs = net([img0,img1])
loss = ContrastiveLoss(labels, outputs[0], outputs[1])
loss.backward()
optimizer.step()
print("Iteration number {}\n Current loss {}\n".format(i,loss.item()))
# + [markdown] id="LS6HAxUA0YqG"
# ### **Building a Classification Network**
# + id="JsH4H72rr8-x"
class NeuralNet(nn.Module):
def __init__(self, in_features=4, out_features=3):
super().__init__()
self.fc1 = nn.Linear(in_features=128,
out_features=120)
self.fc2 = nn.Linear(in_features=120,
out_features=84)
self.fc3 = nn.Linear(in_features=84,
out_features=2)
self.activation = nn.Softmax(dim=1)
def forward(self, X):
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = F.relu(self.fc3(X))
return self.activation(X)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="YsDUg3tVM1Qo" outputId="cb3cb974-edf6-4b8c-fabf-daeb2acd0564"
net2 = NeuralNet().cuda()
criterion = nn.CrossEntropyLoss() # use a Classification Cross-Entropy loss
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
counter = []
loss_history = []
iteration_number= 0
for epoch in range(0,NUMBER_EPOCHS):
print("Epoch:", epoch, " start.")
for i, data in enumerate(trainloader,0):
img0, img1 , labels, class_labels = data
img0, img1 , labels = img0.cuda(), img1.cuda() , labels.cuda()#move to GPU
optimizer.zero_grad()#clear the calculated grad in previous batch
outputs = net([img0,img1])
out2 = net2(outputs[0])
loss = criterion(out2, class_labels[0].cuda())
loss.backward()
optimizer.step()
print("Iteration number {}\n Current loss {}\n".format(i,loss.item()))
if i %10 == 0 :#show changes of loss value after each 10 batches
iteration_number +=10
counter.append(iteration_number)
loss_history.append(loss.item())
torch.save(net2.state_dict(),"/content/drive/My Drive/saved_models/resnet-clf.pth")
#test the network after finish each epoch, to have a brief training result.
correct_val = 0
total_val = 0
with torch.no_grad():#essential for testing
for data in valloader:
img0, img1 , labels, class_labels = data
img0, img1 , labels = img0.cuda(), img1.cuda() , labels.cuda()
outputs = net([img0,img1])
out = net2(outputs[0])
_, predicted = torch.max(out.data, 1)
total_val += class_labels[0].cuda().size(0)
correct_val += (predicted == class_labels[0].cuda()).sum().item()
print('Accuracy of the network on the', total_val,': %d %%' % (100 * correct_val / total_val))
show_plot(counter,loss_history)
# + id="PRbgGOBa0iTs"
torch.save(net.state_dict(),"/content/drive/My Drive/saved_models/resnet-cffn.pth")
# + [markdown] id="Fv4aOpOe19kW"
# ### **Streamlit GUI**
# + id="IYaTnxv628FL" colab={"base_uri": "https://localhost:8080/"} outputId="914c9c7d-b7ca-4931-c879-107e6ec28d57"
#Writing all necessary functions in one cell
# %%writefile DeepFakeGUI.py
import streamlit as st
from itertools import permutations, product
from random import sample, choice, shuffle
from glob import glob
import time
from datetime import timedelta
from tqdm import tqdm
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
from torch.utils.data import DataLoader,Dataset
import matplotlib.pyplot as plt
import torchvision.utils
import numpy as np
import random
from PIL import Image
import torch
from torch.autograd import Variable
import PIL.ImageOps
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import torchvision.models as models
class SiameseNetwork(nn.Module):# A simple implementation of siamese network, ResNet50 is used, and then connected by three fc layer.
def __init__(self,model,n):
super(SiameseNetwork, self).__init__()
self.cnn1 = model
self.fc1 = nn.Linear(n, 500)
self.fc2 = nn.Linear(500, 500)
self.fc3 = nn.Linear(500, 128)
def forward(self, inputs):#did not know how to let two resnet share the same param.
results = []
for input in inputs:
output = self.cnn1(input)
output = output.view(output.size()[0], -1)
output = F.relu(self.fc1(output))
output = F.relu(self.fc2(output))
output = self.fc3(output)
results.append(output)
return results
class NeuralNet(nn.Module):
def __init__(self, in_features=4, out_features=3):
super().__init__()
self.fc1 = nn.Linear(in_features=128,
out_features=120)
self.fc2 = nn.Linear(in_features=120,
out_features=84)
self.fc3 = nn.Linear(in_features=84,
out_features=2)
self.activation = nn.Softmax(dim=1)
def forward(self, X):
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = F.relu(self.fc3(X))
return self.activation(X)
st.title("Deepfake Detection App")
st.subheader("This app takes in an image uploaded by the user and upon clicking the 'Verdict' button, displays if the uploaded image is fake or real" )
uploaded_file = st.file_uploader("Choose an image from your file directory", type=["jpg","png","jpeg"])
if uploaded_file is not None:
image = Image.open(uploaded_file)
net = SiameseNetwork(models.resnet50(pretrained=True),1000).cuda()
net2 = NeuralNet().cuda()
net.load_state_dict(torch.load("net.pth"))
net2.load_state_dict(torch.load("net2.pth"))
net.eval()
net2.eval()
st.image(image, caption='Uploaded Image.', use_column_width= True)
st.write("")
test_transforms = transforms.Compose([transforms.Resize((100,100)),transforms.ToTensor()])
image_tensor = test_transforms(image).float()
image_tensor = image_tensor.unsqueeze_(0)
input = Variable(image_tensor)
input = input.cuda()
out1=net([input])
output = net2(out1[0])
print(output.data.cpu().numpy())
prediction = output.data.cpu().numpy().argmax()
if(st.button('Verdict')):
if prediction == 0:
st.error("The image uploaded is fake")
elif prediction == 1:
st.success("The image uploaded is real")
# + id="3zUnCLSM3HKl" colab={"base_uri": "https://localhost:8080/"} outputId="4e6603be-05f6-4669-8162-54af5cbd9512"
#Command to run the GUI
# !streamlit run DeepFakeGUI.py & npx localtunnel --port 8501
| DeepFake_Image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Generating and implementing many-body equations
#
# ## Preliminaries
#
# Once more, let's start by importing wick&d and defining a Slater determinant reference
# +
import wicked as w
from IPython.display import display, Math, Latex
def latex(expr):
"""Function to render any object that has a member latex() function"""
display(Math(expr.latex()))
w.reset_space()
w.add_space("o", "fermion", "occupied", ['i','j','k','l','m','n'])
w.add_space("v", "fermion", "unoccupied", ['a','b','c','d','e','f'])
wt = w.WickTheorem()
# -
# ## Generating equations for fully contracted terms
# In the previous notebook, we computed the coupled cluster energy expression
# \begin{equation}
# E = \langle \Phi | e^{-\hat{T}} \hat{H} e^{\hat{T}} | \Phi \rangle
# = E_0 + \sum_{i}^\mathbb{O} \sum_{a}^\mathbb{V} f^{a}_{i} t^{i}_{a} +
# \frac{1}{4} \sum_{ij}^\mathbb{O} \sum_{ab}^\mathbb{V}
# (t^{i j}_{a b} + 2 t^{i}_{a} t^{j}_{b}) v^{a b}_{i j}
# \end{equation}
# with the following code
E0 = w.op("E_0",[""])
F = w.utils.gen_op('f',1,'ov','ov')
V = w.utils.gen_op('v',2,'ov','ov')
H = E0 + F + V
T = w.op("t",["v+ o", "v+ v+ o o"])
Hbar = w.bch_series(H,T,2)
expr = wt.contract(Hbar,0,0)
expr
# First we convert the expression derived into a set of equations. You get back a dictionary that shows all the components to the equations. The vertical bar (`|`) in the key separates the lower (left) and upper (right) indices in the resulting expression
mbeq = expr.to_manybody_equations('r')
mbeq
# ## Converting equations to code
#
# From the equations generated above, you can get tensor contractions by calling the `compile` function on each individual term in the equations. Here we generate python code that uses numpy's `einsum` function to evaluate contractions. To use this code you will need to import `einsum`
# ```python
# from numpy import einsum
# ```
# and you will need to define a dictionary of tensors (`f["vo"],v["vvoo"],t["ov"],...`) of appropriate dimensions:
for eq in mbeq['|']:
print(eq.compile('einsum'))
# ## Many-body equations
# Suppose we want to compute the contributions to the coupled cluster residual equations
# \begin{equation}
# r^{i}_{a} = \langle \Phi| \{ \hat{a}^\dagger_{i} \hat{a}_a \} [\hat{F},\hat{T}_1] | \Phi \rangle
# \end{equation}
# Wick&d can compute this quantity using the corresponding **many-body representation** of the operator $[\hat{F},\hat{T}_1]$.
# If you expand the operator $[\hat{F},\hat{T}_1]$ into its second quantized operator components we can identify a particle-hole excitation term:
# \begin{equation}
# [\hat{F},\hat{T}_1] = g^{j}_{b} \{ \hat{a}^\dagger_{b} \hat{a}_j \} + \cdots
# \end{equation}
# From this expression we see that the residual $r_{a}^{i}$ is precisely the quantity we need to evaluate since
# \begin{equation}
# r^{i}_{a} = \langle \Phi| \{ \hat{a}^\dagger_{i} \hat{a}_a \} [\hat{F},\hat{T}_1] | \Phi \rangle
# = g^{j}_{b} \langle \Phi| \{ \hat{a}^\dagger_{i} \hat{a}_a \} \{ \hat{a}^\dagger_{b} \hat{a}_j \} | \Phi \rangle = g^{i}_{a}
# \end{equation}
# where in the last step we applied Wick's theorem to evaluate the expectation value.
#
# Let's start by computing $[\hat{F},\hat{T}_1]$ with Wick's theorem:
F = w.utils.gen_op('f',1,'ov','ov')
T1 = w.op("t",["v+ o"])
expr = wt.contract(w.commutator(F,T1),2,2)
latex(expr)
# Next, we call `to_manybody_equations` to generate many-body equations
mbeq = expr.to_manybody_equations('g')
print(mbeq)
# Out of all the terms, we select the terms that multiply the excitation operator $\{ \hat{a}^\dagger_{a} \hat{a}_i \}$ (`"o|v"`)
# +
mbeq_ov = mbeq["o|v"]
for eq in mbeq_ov:
latex(eq)
# -
# Lastly, we can compile these equations into code
for eq in mbeq_ov:
print(eq.compile('einsum'))
# ## Antisymmetrization of uncontracted operator indices
#
# To gain efficiency, Wick&d treats contractions involving inequivalent lines in a special way. Consider the following term contributing to the CCSD doubles amplitude equations that arises from $[\hat{V}_\mathrm{ovov},\hat{T}_2]$ (see the sixth term in Eq. (153) of Crawford and Schaefer, https://doi.org/10.1002/9780470125915.ch2)
# \begin{equation}
# r^{ij}_{ab} \leftarrow \langle \Phi| \{ \hat{a}^\dagger_{i}\hat{a}^\dagger_{j} \hat{a}_b \hat{a}_a \} [\hat{V}_\mathrm{ovov},\hat{T}_2] | \Phi \rangle = - P(ij)P(ab) \sum_{kc} \langle kb \| jc \rangle t^{ik}_{ac}
# \end{equation}
# where $P(pq)$ is an antisymmetric permutation operator [$P(pq)f(p,q) = f(p,q) - f(q,p)$].
# This expression corresponds to a **single diagram**, but algebraically it consists of **four terms** obtained by index permutations $i \leftrightarrow j$ and $a \leftrightarrow b$, so that the residual is antisymmetric with respect to separate permutations of upper and lower indices.
#
# Let's first take a look at what happens when we apply Wick's theorem with wick&d to the quantity $[\hat{V}_\mathrm{ovov},\hat{T}_2]$
T2 = w.op("t", ["v+ v+ o o"])
Vovov = w.op("v", ["o+ v+ v o"])
expr = wt.contract(w.commutator(Vovov, T2), 4, 4)
latex(expr)
# In wick&d the two-body part of $[\hat{V}_\mathrm{ovov},\hat{T}_2]$ gives us only a single term
# \begin{equation}
# [\hat{V}_\mathrm{ovov},\hat{T}_2]_\text{2-body} = - \sum_{abcijk} \langle kb \| jc \rangle t^{ik}_{ac} \{ \hat{a}^{ab}_{ij} \} = \sum_{abij} g^{ij}_{ab} \{ \hat{a}^{ab}_{ij} \}
# \end{equation}
# where the tensor $g^{ij}_{ab}$ is defined as
# \begin{equation}
# g^{ij}_{ab} = -\sum_{kc} \langle kb \| jc \rangle t^{ik}_{ac}
# \end{equation}
# **Note that contrary to $r^{ij}_{ab}$, the tensor** $g^{ij}_{ab}$ **does not have any specific index symmetry**. In other words, **you need to enforce the antisymmetry**.
# <!-- In particular, the many-body tensors generated by wick&d are not guaranteed to be antisymmetric, i -->
#
# This quantity is related to the CCSD residual contribution reported above in the following way
# \begin{equation}
# r^{ij}_{ab} \leftarrow \langle \Phi| \{ \hat{a}^\dagger_{i}\hat{a}^\dagger_{j} \hat{a}_b \hat{a}_a \} [\hat{V}_\mathrm{ovov},\hat{T}_2] | \Phi \rangle = g^{ij}_{ab} - g^{ji}_{ab} - g^{ij}_{ba} + g^{ji}_{ba} = P(ij)P(ab) g^{ij}_{ab}
# \end{equation}
#
# Therefore, this example shows an important distinction between the traditional projective equation (which yields $P(ij)P(ab) g^{ij}_{ab}$) vs. the many-body approach (which yields $g^{ij}_{ab}$).
#
# How are the difference between these two approaches reconciled in practic? When you solve the many-body equations, you must enforce the antisymmetry of the equations, which means that the residual contribution should be written as
# \begin{equation}
# \sum_{abij} g^{ij}_{ab} \{ \hat{a}^{ab}_{ij} \}
# = \frac{1}{4} \sum_{abij} (P(ij)P(ab) g^{ij}_{ab}) \{ \hat{a}^{ab}_{ij} \}
# \end{equation}
# The factor $\frac{1}{4}$ now brings this term in a form consistent with the prefactor we associate with the operator $ \{ \hat{a}^{ab}_{ij} \}$.
#
# When you ask wick&d to compile the many-body equation we again get a single term
for eq in expr.to_manybody_equations('g')['oo|vv']:
print(eq.compile('einsum'))
# This is done for efficiency, since the correct term [$P(ij)P(ab) g^{ij}_{ab}$] can be recovered by antisymmetrizing the residual **after adding all the contributions**, for example, in this way
# ```python
# def antisymmetrize_residual_2(Roovv):
# # antisymmetrize the oovv residual
# Roovv_anti = np.zeros((nocc,nocc,nvir,nvir))
# Roovv_anti += np.einsum("ijab->ijab",Roovv)
# Roovv_anti -= np.einsum("ijab->jiab",Roovv)
# Roovv_anti -= np.einsum("ijab->ijba",Roovv)
# Roovv_anti += np.einsum("ijab->jiba",Roovv)
# return Roovv_anti
# ```
| tutorials/04-GeneratingCode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: EDC 0.24.5 (Python3)
# language: python
# name: edc
# ---
# + papermill={"duration": 0.212974, "end_time": "2021-12-14T12:25:50.552310", "exception": false, "start_time": "2021-12-14T12:25:50.339336", "status": "completed"} tags=[]
from edc import print_info
print_info("geodb-manage")
# + papermill={"duration": 0.087366, "end_time": "2021-12-14T12:25:50.715324", "exception": false, "start_time": "2021-12-14T12:25:50.627958", "status": "completed"} tags=[]
from edc import check_compatibility
check_compatibility("user-2021.12", dependencies=["GEODB"])
# + [markdown] papermill={"duration": 0.079128, "end_time": "2021-12-14T12:25:50.879961", "exception": false, "start_time": "2021-12-14T12:25:50.800833", "status": "completed"} tags=[]
# ## Manage Collections in your GeoDB
#
#
# + [markdown] papermill={"duration": 0.07811, "end_time": "2021-12-14T12:25:51.041239", "exception": false, "start_time": "2021-12-14T12:25:50.963129", "status": "completed"} tags=[]
# ### Connecting to the GeoDB
# + papermill={"duration": 0.819361, "end_time": "2021-12-14T12:25:51.940741", "exception": false, "start_time": "2021-12-14T12:25:51.121380", "status": "completed"} tags=[]
from xcube_geodb.core.geodb import GeoDBClient
# + papermill={"duration": 0.090135, "end_time": "2021-12-14T12:25:52.111068", "exception": false, "start_time": "2021-12-14T12:25:52.020933", "status": "completed"} tags=[]
geodb = GeoDBClient()
# + papermill={"duration": 1.148277, "end_time": "2021-12-14T12:25:53.341396", "exception": false, "start_time": "2021-12-14T12:25:52.193119", "status": "completed"} tags=[]
# If you are logged in, this will tell you what account the system currently uses
geodb.whoami
# + papermill={"duration": 0.846636, "end_time": "2021-12-14T12:25:54.267888", "exception": false, "start_time": "2021-12-14T12:25:53.421252", "status": "completed"} tags=[]
# Lets get already existing collections
ds = geodb.get_my_collections()
ds
# + [markdown] papermill={"duration": 0.080472, "end_time": "2021-12-14T12:25:54.428772", "exception": false, "start_time": "2021-12-14T12:25:54.348300", "status": "completed"} tags=[]
# ### Creating collections
#
# Once the connection has been established you will be able to create a collection. The collection will contain standard properties (fields) plus custom properties
# which you can add at your disgretion. Please use [PostGreSQL type definitions](https://www.postgresql.org/docs/11/datatype.html). We recommend stying simple with
# your data types as we have not tested every single type.
# + papermill={"duration": 1.855679, "end_time": "2021-12-14T12:25:56.363759", "exception": false, "start_time": "2021-12-14T12:25:54.508080", "status": "completed"} tags=[]
ds = geodb.get_my_collections()
if ds[(ds.database == geodb.whoami) & (ds.collection == 'land_use')].collection.count() == 0:
# Have a look at fiona feature schema
collections = {
"land_use":
{
"crs": 3794,
"properties":
{
"RABA_PID": "float",
"RABA_ID": "float",
"D_OD": "date"
}
}
}
geodb.create_collections(collections)
import geopandas
import os
gdf = geopandas.read_file(os.path.expanduser("~/.shared/notebooks/eurodatacube/notebooks/curated/data/sample/land_use.shp"))
geodb.insert_into_collection('land_use', gdf)
# + papermill={"duration": 1.612247, "end_time": "2021-12-14T12:25:58.057325", "exception": false, "start_time": "2021-12-14T12:25:56.445078", "status": "completed"} tags=[]
ds = geodb.get_my_collections(database=geodb.whoami)
ds
# + [markdown] papermill={"duration": 0.087264, "end_time": "2021-12-14T12:25:58.226683", "exception": false, "start_time": "2021-12-14T12:25:58.139419", "status": "completed"} tags=[]
# ### Loading data into a dataset
#
# Once the table has been created, you can load data into the dataset. The example below loads a shapefile. The attributes of the shapefile correspond to the dataset's properties.
#
# + papermill={"duration": 1.525797, "end_time": "2021-12-14T12:25:59.835935", "exception": false, "start_time": "2021-12-14T12:25:58.310138", "status": "completed"} tags=[]
import geopandas
import os
gdf = geopandas.read_file(os.path.expanduser("~/.shared/notebooks/eurodatacube/notebooks/curated/data/sample/land_use.shp"))
gdf
# + papermill={"duration": 10.717742, "end_time": "2021-12-14T12:26:10.638255", "exception": false, "start_time": "2021-12-14T12:25:59.920513", "status": "completed"} tags=[]
geodb.insert_into_collection('land_use', gdf)
# + papermill={"duration": 3.770818, "end_time": "2021-12-14T12:26:14.557504", "exception": false, "start_time": "2021-12-14T12:26:10.786686", "status": "completed"} tags=[]
geodb.get_collection('land_use', query="raba_id=eq.7000")
# + [markdown] papermill={"duration": 0.095116, "end_time": "2021-12-14T12:26:14.749915", "exception": false, "start_time": "2021-12-14T12:26:14.654799", "status": "completed"} tags=[]
# ### Delete from a Collection
# + papermill={"duration": 1.994597, "end_time": "2021-12-14T12:26:16.838196", "exception": false, "start_time": "2021-12-14T12:26:14.843599", "status": "completed"} tags=[]
geodb.delete_from_collection('land_use', query="raba_id=eq.7000")
# + papermill={"duration": 3.473795, "end_time": "2021-12-14T12:26:20.408203", "exception": false, "start_time": "2021-12-14T12:26:16.934408", "status": "completed"} tags=[]
geodb.get_collection('land_use', query="raba_id=eq.7000")
# + [markdown] papermill={"duration": 0.096835, "end_time": "2021-12-14T12:26:20.607418", "exception": false, "start_time": "2021-12-14T12:26:20.510583", "status": "completed"} tags=[]
# ### Updating a Collection
# + papermill={"duration": 3.824186, "end_time": "2021-12-14T12:26:24.526337", "exception": false, "start_time": "2021-12-14T12:26:20.702151", "status": "completed"} tags=[]
geodb.get_collection('land_use', query="raba_id=eq.1300")
# + papermill={"duration": 3.751145, "end_time": "2021-12-14T12:26:28.375291", "exception": false, "start_time": "2021-12-14T12:26:24.624146", "status": "completed"} tags=[]
geodb.update_collection('land_use', query="raba_id=eq.1300", values={'d_od': '2000-01-01'})
# + papermill={"duration": 4.034741, "end_time": "2021-12-14T12:26:32.531352", "exception": false, "start_time": "2021-12-14T12:26:28.496611", "status": "completed"} tags=[]
geodb.get_collection('land_use', query="raba_id=eq.1300")
# + [markdown] papermill={"duration": 0.102994, "end_time": "2021-12-14T12:26:32.736008", "exception": false, "start_time": "2021-12-14T12:26:32.633014", "status": "completed"} tags=[]
# ### Managing Properties of a Collection
# + papermill={"duration": 0.674813, "end_time": "2021-12-14T12:26:33.511085", "exception": false, "start_time": "2021-12-14T12:26:32.836272", "status": "completed"} tags=[]
geodb.get_my_collections()
# + papermill={"duration": 1.554997, "end_time": "2021-12-14T12:26:35.172211", "exception": false, "start_time": "2021-12-14T12:26:33.617214", "status": "completed"} tags=[]
geodb.get_properties('land_use')
# + papermill={"duration": 1.606413, "end_time": "2021-12-14T12:26:36.883037", "exception": false, "start_time": "2021-12-14T12:26:35.276624", "status": "completed"} tags=[]
geodb.add_property('land_use', "test_prop", 'integer')
# + papermill={"duration": 1.53222, "end_time": "2021-12-14T12:26:38.523965", "exception": false, "start_time": "2021-12-14T12:26:36.991745", "status": "completed"} tags=[]
geodb.get_properties('land_use')
# + papermill={"duration": 7.145428, "end_time": "2021-12-14T12:26:45.776636", "exception": false, "start_time": "2021-12-14T12:26:38.631208", "status": "completed"} tags=[]
geodb.drop_property('land_use', 'test_prop')
# + papermill={"duration": 1.452704, "end_time": "2021-12-14T12:26:47.348040", "exception": false, "start_time": "2021-12-14T12:26:45.895336", "status": "completed"} tags=[]
geodb.get_properties('land_use')
# + papermill={"duration": 1.623618, "end_time": "2021-12-14T12:26:49.083067", "exception": false, "start_time": "2021-12-14T12:26:47.459449", "status": "completed"} tags=[]
geodb.add_properties('land_use', properties={'test1': 'integer', 'test2': 'date'})
# + papermill={"duration": 1.507613, "end_time": "2021-12-14T12:26:50.698384", "exception": false, "start_time": "2021-12-14T12:26:49.190771", "status": "completed"} tags=[]
geodb.get_properties('land_use')
# + papermill={"duration": 5.249575, "end_time": "2021-12-14T12:26:56.061212", "exception": false, "start_time": "2021-12-14T12:26:50.811637", "status": "completed"} tags=[]
geodb.drop_properties('land_use', properties=['test1', 'test2'])
# + papermill={"duration": 1.748808, "end_time": "2021-12-14T12:26:57.924862", "exception": false, "start_time": "2021-12-14T12:26:56.176054", "status": "completed"} tags=[]
geodb.get_properties('land_use')
# + papermill={"duration": 1.568013, "end_time": "2021-12-14T12:26:59.606114", "exception": false, "start_time": "2021-12-14T12:26:58.038101", "status": "completed"} tags=[]
geodb.drop_collection('land_use')
# + papermill={"duration": 0.111196, "end_time": "2021-12-14T12:26:59.829658", "exception": false, "start_time": "2021-12-14T12:26:59.718462", "status": "completed"} tags=[]
| notebooks/curated/EDC_GeoDB_1_manage-datasets_v11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf2
# language: python
# name: tf2
# ---
# +
# # %load /home/sjkim/.jupyter/head.py
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
from importlib import reload
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
# seaborn
#import seaborn as sns
#sns.set( style = 'white', font_scale = 1.7)
#sns.set_style('ticks')
#plt.rcParams['savefig.dpi'] = 200
# font for matplotlib
#import matplotlib
#import matplotlib.font_manager as fm
#fm.get_fontconfig_fonts()
#font_location = '/usr/share/fonts/truetype/nanum/NanumGothicBold.ttf'
#font_name = fm.FontProperties(fname=font_location).get_name()
#matplotlib.rc('font', family=font_name)
# -
# ## Keras in Tensorflow 2.0
# +
##############################################
# Modeling
##############################################
from tensorflow.keras import layers, models
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
# Prepare network layers and activate functions
super().__init__()
self.hidden = layers.Dense(Nh)
self.last = layers.Dense(Nout)
def call(self, x):
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
# Connect network elements
#x = layers.Input(shape=(Nin,))
h = relu(self.hidden(x))
y = softmax(self.last(h))
return y
##############################################
# Data
##############################################
import numpy as np
from tensorflow.keras import datasets # mnist
from tensorflow.keras import utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = utils.to_categorical(y_train)
Y_test = utils.to_categorical(y_test)
L, W, H = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
##############################################
# Plotting
##############################################
import matplotlib.pyplot as plt
def plot_acc(history, title=None):
# summarize history for accuracy
if not isinstance(history, dict):
history = history.history
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
if title is not None:
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
def plot_loss(history, title=None):
# summarize history for loss
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
##############################################
# Main
##############################################
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
model = ANN_models_class(Nin, Nh, Nout)
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
(X_train, Y_train), (X_test, Y_test) = Data_func()
##############################################
# Training
##############################################
history = model.fit(X_train, Y_train, epochs=5, batch_size=100, validation_split=0.2)
performace_test = model.evaluate(X_test, Y_test, batch_size=100, verbose=0)
print('Test Loss and Accuracy ->', performace_test)
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
# -
# ## Tensorflow 2.0 with Keras IO
# - Reference: https://www.tensorflow.org/tutorials/quickstart/advanced
# +
##############################################
# Modeling
##############################################
import tensorflow as tf2
from tensorflow.keras import layers, models
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
# Prepare network layers and activate functions
super().__init__()
self.hidden = layers.Dense(Nh)
self.last = layers.Dense(Nout)
def call(self, x):
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
# Connect network elements
#x = layers.Input(shape=(Nin,))
h = relu(self.hidden(x))
y = softmax(self.last(h))
return y
##############################################
# Data
##############################################
import numpy as np
from tensorflow.keras import datasets # mnist
from tensorflow.keras import utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = utils.to_categorical(y_train)
Y_test = utils.to_categorical(y_test)
L, W, H = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
##############################################
# Plotting
##############################################
import matplotlib.pyplot as plt
def plot_acc(history, title=None):
# summarize history for accuracy
if not isinstance(history, dict):
history = history.history
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
if title is not None:
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
def plot_loss(history, title=None):
# summarize history for loss
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
##############################################
# Main
##############################################
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
model = ANN_models_class(Nin, Nh, Nout)
# model.build(input_shape=[None,Nin])
(X_train, Y_train), (X_test, Y_test) = Data_func()
train_ds = tf2.data.Dataset.from_tensor_slices(
(X_train, Y_train)).shuffle(10000).batch(100)
test_ds = tf2.data.Dataset.from_tensor_slices(
(X_test, Y_test)).batch(100)
train_loss = tf2.keras.metrics.Mean(name='train_loss')
train_accuracy = tf2.keras.metrics.CategoricalAccuracy(name='train_accuracy')
test_loss = tf2.keras.metrics.Mean(name='test_loss')
test_accuracy = tf2.keras.metrics.CategoricalAccuracy(name='test_accuracy')
class History:
def __init__(self):
self.history = {'accuracy': [], 'loss': [], 'val_accuracy': [], 'val_loss': []}
history = History()
Optimizer = tf2.keras.optimizers.Adam(learning_rate = 0.01)
Loss_object = tf2.keras.losses.CategoricalCrossentropy()
for epoch in range(5):
for images, labels in train_ds:
with tf2.GradientTape() as tape:
predictions = model(images)
loss = Loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
Optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
for images, labels in test_ds:
predictions = model(images)
t_loss = Loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
history.history['accuracy'].append(train_accuracy.result())
history.history['loss'].append(train_loss.result())
history.history['val_accuracy'].append(test_accuracy.result())
history.history['val_loss'].append(test_loss.result())
template = 'Epoch {}, Loss: {:.2f}, Accuracy: {:.2f}, Test Loss: {:.2f}, Test Accuracy: {:.2f}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset the metrics for the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
############################################
# Training
##############################################
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
# +
##############################################
# Modeling
##############################################
import tensorflow as tf2
from tensorflow.keras import layers, models
class _ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
# Prepare network layers and activate functions
hidden = layers.Dense(Nh)
output = layers.Dense(Nout)
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
# Connect network elements
x = layers.Input(shape=(Nin,))
h = relu(hidden(x))
y = softmax(output(h))
super().__init__(x, y)
self.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
class ANN_models_class(models.Model):
def __init__(self, Nin, Nh, Nout):
# Prepare network layers and activate functions
super().__init__()
self.hidden = layers.Dense(Nh)
self.last = layers.Dense(Nout)
def call(self, x):
relu = layers.Activation('relu')
softmax = layers.Activation('softmax')
# Connect network elements
#x = layers.Input(shape=(Nin,))
h = relu(self.hidden(x))
y = softmax(self.last(h))
return y
##############################################
# Data
##############################################
import numpy as np
from tensorflow.keras import datasets # mnist
from tensorflow.keras import utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = utils.to_categorical(y_train)
Y_test = utils.to_categorical(y_test)
L, W, H = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
##############################################
# Plotting
##############################################
import matplotlib.pyplot as plt
def plot_acc(history, title=None):
# summarize history for accuracy
if not isinstance(history, dict):
history = history.history
plt.plot(history['accuracy'])
plt.plot(history['val_accuracy'])
if title is not None:
plt.title(title)
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
def plot_loss(history, title=None):
# summarize history for loss
if not isinstance(history, dict):
history = history.history
plt.plot(history['loss'])
plt.plot(history['val_loss'])
if title is not None:
plt.title(title)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Verification'], loc=0)
# plt.show()
##############################################
# Main
##############################################
Nin = 784
Nh = 100
number_of_class = 10
Nout = number_of_class
model = ANN_models_class(Nin, Nh, Nout)
# model.build(input_shape=[None,Nin])
(X_train, Y_train), (X_test, Y_test) = Data_func()
train_ds = tf2.data.Dataset.from_tensor_slices(
(X_train, Y_train)).shuffle(10000).batch(100)
test_ds = tf2.data.Dataset.from_tensor_slices(
(X_test, Y_test)).batch(100)
train_loss = tf2.keras.metrics.Mean(name='train_loss')
train_accuracy = tf2.keras.metrics.CategoricalAccuracy(name='train_accuracy')
test_loss = tf2.keras.metrics.Mean(name='test_loss')
test_accuracy = tf2.keras.metrics.CategoricalAccuracy(name='test_accuracy')
class History:
def __init__(self):
self.history = {'accuracy': [], 'loss': [], 'val_accuracy': [], 'val_loss': []}
history = History()
@tf2.function
def ep_train(xx, yy):
with tf2.GradientTape() as tape:
yp = model(xx)
loss = Loss_object(yy, yp)
gradients = tape.gradient(loss, model.trainable_variables)
Optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(yy, yp)
@tf2.function
def ep_test(xx, yy):
yp = model(xx)
t_loss = Loss_object(yy, yp)
test_loss(t_loss)
test_accuracy(yy, yp)
Optimizer = tf2.keras.optimizers.Adam(learning_rate = 0.01)
Loss_object = tf2.keras.losses.CategoricalCrossentropy()
for epoch in range(5):
for xx, yy in train_ds:
ep_train(xx, yy)
for images, labels in test_ds:
ep_test(xx, yy)
history.history['accuracy'].append(train_accuracy.result())
history.history['loss'].append(train_loss.result())
history.history['val_accuracy'].append(test_accuracy.result())
history.history['val_loss'].append(test_loss.result())
template = 'Epoch {}, Loss: {:.2f}, Accuracy: {:.2f}, Test Loss: {:.2f}, Test Accuracy: {:.2f}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset the metrics for the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
############################################
# Training
##############################################
plot_loss(history)
plt.show()
plot_acc(history)
plt.show()
# -
| tf2/ex2_1/tf2_full_nb_ex2_1_ann_mnist_cl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building Footprints as Vectors
#
# This notebook demonstrates converting the building footprint raster that
# is the output of the Analaytics feed into a vector dataset.
#
# It demonstrates the following techniques for converting to vector:
# 1. GDAL CLI
# 2. Rasterio (no processing)
# 3. Rasterio (with simplification)
# 4. Rasterio (arbitrary function, filtering and simplification as example)
# +
import os
from pprint import pprint
import fiona
import matplotlib.pyplot as plt
from planet import api
from planet.api.utils import write_to_file
import rasterio
from rasterio import features as rfeatures
from rasterio.enums import Resampling
from rasterio.plot import show
import shapely
from shapely.geometry import shape as sshape
# +
# if your Planet API Key is not set as an environment variable, you can paste it below
API_KEY = os.environ.get('PL_API_KEY', 'PASTE_YOUR_KEY_HERE')
analytics_client = api.ClientV1(api_key=API_KEY)
# -
# ## Obtain Analytics Raster
#
# ### Identify road feed feature for download
#
# We want to download the most recent feature from the feed for road detection in Kirazli, Turkey.
# +
# # uncomment to get feed ids
# feeds = analytics_client.list_analytic_feeds({}).get()
# for d in feeds['data']:
# print('{} ({}):\n\r{}\n\r'.format(d['id'], d['created'], d['description']))
# +
# # uncomment to get subscription ids
# FEED_ID = 'b442c53b-fc72-4bee-bab4-0b7aa318ccd9'
# subscriptions = analytics_client.list_analytic_subscriptions(FEED_ID).get()
# for d in subscriptions['data']:
# print('{} ({}):\n\r{}\n\r'.format(d['id'], d['created'], d['title']))
# -
# building footprints in Sazgin, Turkey
SUBSCRIPTION_ID = '02c4f912-090f-45aa-a18b-ac4a55e4b9ba'
# +
# Get subscription details
# subscription_info = analytics_client.get_subscription_info(SUBSCRIPTION_ID).get()
# pprint(subscription_info)
# -
results = analytics_client.list_collection_features(SUBSCRIPTION_ID).get()
features = results['features']
print('{} features in collection'.format(len(features)))
# sort features by acquisition date and take latest feature
features.sort(key=lambda k: k['properties']['first_acquired'])
feature = features[-1]
print(feature['properties']['first_acquired'])
# ### Download Quad Raster
RESOURCE_TYPE = 'target-quad'
# +
def create_save_dir(root_dir='data'):
save_dir = root_dir
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
return save_dir
dest = 'data/footprints'
create_save_dir(dest)
# +
def download_feature(feature, subscription_id, resource_type, dest=dest):
# making a long name shorter
get_resource = analytics_client.get_associated_resource_for_analytic_feature
resource = get_resource(subscription_id, feature['id'], resource_type)
filename = download_resource(resource, dest)
return filename
def download_resource(resource, dest, overwrite=False):
writer = write_to_file(dest, overwrite=overwrite)
writer(resource)
filename = os.path.join(dest, resource.name)
print('file saved to: {}'.format(filename))
return filename
filename = download_feature(feature, SUBSCRIPTION_ID, RESOURCE_TYPE)
# -
# ### Visualize Roads Image
#
# The output of the analytics road detection is a boolean image where road pixels are given a value of True and non-road pixels are given a value of False.
# +
def _open(filename, factor=1):
with rasterio.open(filename) as dataset:
height = int(dataset.height / factor)
width = int(dataset.width / factor)
data = dataset.read(
out_shape=(dataset.count, height, width)
)
return data
def open_bool(filename, factor=1):
data = _open(filename, factor=factor)
return data[0,:,:]
def get_figsize(factor):
return tuple(2 * [int(25/factor)])
factor = 1
figsize = (15, 15)
roads = open_bool(filename, factor=factor)
fig = plt.figure(figsize=figsize)
# show(roads, title="footprints", cmap="binary")
show(roads[2500:3000, 0:500], title="footprints", cmap="binary")
# -
# ## Convert Buildings to Vector Features
#
# ### GDAL Command-Line Interface (CLI)
#
# GDAL provides a python script that can be run via the CLI. It is quite easy to run and fast.
# +
def get_layer_name(filename):
# get the default layer output layer name based on the
# output filename. I wish there was a way to specify
# the output layer name but attempts have failed thus far.
return filename.split('/')[-1].split('.')[0]
gdal_tmp_output_filename = os.path.join(dest, 'test_gdal_all.shp')
gdal_tmp_output_layer_name = get_layer_name(gdal_tmp_output_filename)
gdal_output_filename = os.path.join(dest, 'test_gdal.shp')
gdal_output_layer_name = get_layer_name(gdal_output_filename)
# -
# convert the binary image into polygons
# creates polygons for building footprints as well as regions between
# and around building footprints
# !gdal_polygonize.py $filename $gdal_tmp_output_filename
# get number of features, this includes inside and outside building footprints
# !ogrinfo -so $gdal_tmp_output_filename $gdal_tmp_output_layer_name | grep 'Feature Count'
# get number of building footprint features
# building footprints are associated with image value (DN) of 255
# !ogrinfo -so $gdal_tmp_output_filename -sql "SELECT * FROM $gdal_tmp_output_layer_name WHERE DN=255" \
# | grep 'Feature Count'
# create a new shapefile with only building footprints
# !ogr2ogr -sql "SELECT * FROM $gdal_tmp_output_layer_name WHERE DN=255" \
# $gdal_output_filename $gdal_tmp_output_filename
# confirm the number of building footprint features
# !ogrinfo -so $gdal_output_filename -sql "SELECT * FROM $gdal_output_layer_name WHERE DN=255" \
# | grep 'Feature Count'
# ### Rasterio
#
# In this section we use rasterio to convert the binary buildings raster into a vector dataset. The vectors are written to disk as a shapefile. The shapefile can be imported into geospatial programs such as QGIS or ArcGIS for visualization and further processing.
#
# This is basic conversion to vector shapes. No smoothing to remove pixel edges, or conversion to the road centerlines is performed here.
# +
def buildings_as_vectors(filename):
with rasterio.open(filename) as dataset:
buildings = dataset.read(1)
building_mask = buildings == 255 # mask non-building pixels
# transforms roads features to image crs
building_shapes = rfeatures.shapes(buildings, mask=building_mask, transform=dataset.transform)
building_geometries = (s for s, _ in building_shapes)
crs = dataset.crs
return (building_geometries, crs)
def save_as_shapefile(output_filename, geometries, crs):
driver='ESRI Shapefile'
schema = {'geometry': 'Polygon', 'properties': []}
with fiona.open(output_filename, mode='w', driver=driver, schema=schema, crs=crs) as c:
count = 0
for g in geometries:
count += 1;
c.write({'geometry': g, 'properties': {}})
print('wrote {} geometries to {}'.format(count, output_filename))
building_geometries, crs = buildings_as_vectors(filename)
output_filename = os.path.join(dest, 'test_rasterio.shp')
save_as_shapefile(output_filename, building_geometries, crs)
# -
# ### Rasterio - Simplifying
#
# In this section, we use `shapely` to simplify the building footprints so we don't have a million pixel edges.
# +
def buildings_as_vectors_with_simplification(filename):
with rasterio.open(filename) as dataset:
buildings = dataset.read(1)
building_mask = roads == 255 # mask non-building pixels
# we skip transform on vectorization so we can perform filtering in pixel space
building_shapes = rfeatures.shapes(buildings, mask=building_mask)
building_geometries = (s for s, _ in building_shapes)
geo_shapes = (sshape(g) for g in building_geometries)
# simplify so we don't have a million pixel edge points
# value of 1 (in units of pixels) determined by visual comparison to non-simplified
tolerance = 1
geo_shapes = (g.simplify(tolerance, preserve_topology=False)
for g in geo_shapes)
# apply image transform
# rasterio transform: (a, b, c, d, e, f, 0, 0, 1), c and f are offsets
# shapely: a b d e c/xoff f/yoff
d = dataset.transform
shapely_transform = [d[0], d[1], d[3], d[4], d[2], d[5]]
proj_shapes = (shapely.affinity.affine_transform(g, shapely_transform)
for g in geo_shapes)
building_geometries = (shapely.geometry.mapping(s) for s in proj_shapes)
crs = dataset.crs
return (building_geometries, crs)
building_geometries_simp, crs = buildings_as_vectors_with_simplification(filename)
output_filename = os.path.join(dest, 'test_rasterio_simp.shp')
save_as_shapefile(output_filename, building_geometries_simp, crs)
# -
# ### Rasterio - Arbitrary Calculation
#
# In this section we get a little bit fancy and set up the rasterio vectorization function so that it can take any calculation function, as long as that function has a generator of `rasterio.shape` as input and a generator of `rasterio.shape` as output. We will use this to filter and simplify building footprint shapes.
# +
def buildings_as_vectors_proc(filename, proc_fcn):
with rasterio.open(filename) as dataset:
buildings = dataset.read(1)
building_mask = roads == 255 # mask non-building pixels
# we skip transform on vectorization so we can perform filtering in pixel space
building_shapes = rfeatures.shapes(buildings, mask=building_mask)
building_geometries = (s for s, _ in building_shapes)
geo_shapes = (sshape(g) for g in building_geometries)
# apply arbitrary processing function
geo_shapes = proc_fcn(geo_shapes)
# apply image transform
# rasterio transform: (a, b, c, d, e, f, 0, 0, 1), c and f are offsets
# shapely: a b d e c/xoff f/yoff
d = dataset.transform
shapely_transform = [d[0], d[1], d[3], d[4], d[2], d[5]]
proj_shapes = (shapely.affinity.affine_transform(g, shapely_transform)
for g in geo_shapes)
building_geometries = (shapely.geometry.mapping(s) for s in proj_shapes)
crs = dataset.crs
return (building_geometries, crs)
def filter_and_simplify_footprints(footprints):
# filter to shapes consisting of 6 or more pixels
min_pixel_size = 6
geo_shapes = (s for s in footprints if s.area >= min_pixel_size)
# simplify so we don't have a million pixel edge points
# value of 1 (in units of pixels) determined by visual comparison to non-simplified
tolerance = 1
geo_shapes = (s.simplify(tolerance, preserve_topology=False)
for s in geo_shapes)
return geo_shapes
building_geometries_simp, crs = buildings_as_vectors_proc(filename, filter_and_simplify_footprints)
output_filename = os.path.join(dest, 'test_rasterio_proc.shp')
save_as_shapefile(output_filename, building_geometries_simp, crs)
| jupyter-notebooks/analytics-snippets/building_footprints_as_vector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Introductory applied machine learning (INFR10069)
# # Lab 0: Introduction
# To complete this lab you should:
#
# * Set up your IAML environment ready for the course
# * __Read the text__ and run all the cells in this notebook and have a play with all the objects created (Don't worry about messing up this notebook - you can always download another copy!)
# * __Attempt all the Exercises at the bottom of the notebook__
# # Setting up
# Instructions for setting up can be found in the GitHub project [README](https://github.com/JamesOwers/iaml2017). Please follow the instructions to the letter, then start a jupyter server and open this file to get started:
#
# ```{bash}
# source activate iaml
# jupyter notebook
# # Your browser should open, navigate to this file and open it
# ```
# # Introducing main packages
# In the following sections we introduce the main **packages** we will be using in this course. If you think that you are already familiar with them, please feel free to skip to the excercises at the end of this notebook.
#
# The packages that we will use are the following:
# * [numpy](http://www.numpy.org/): scientific computing by using array objects
#
#
# * [pandas](http://pandas.pydata.org/): data structures and data analysis tools
#
#
# * [matplotlib](http://matplotlib.org/): plotting library (similar to MATLAB's plot interface)
#
#
# * [seaborn](https://seaborn.github.io/index.html): data visualisation library which works on top of matplotlib
#
# Throughout the course, we will also make heavy use of [scikit-learn](http://scikit-learn.org/stable/) which is a machine learning library implementing many learning algorithms and useful tools. This is introduced in Lab 2.
# ## IPython / Jupyter environment ======
# Basic knowledge of `python` is assumed for this course. If you haven't used python before, you are strongly advised to familiarise yourselves with basic python syntax and working in the Jupyter environment. There are many excellent tutorials available on the web and you can choose the ones you like the most. If you are not sure which ones to choose, these are good starting points:
#
# [Introduction to Python for scientific computing](http://bebi103.caltech.edu/2015/tutorials/t1a_intro_to_python.html)
#
# [Introduction to Jupyter notebooks](http://bebi103.caltech.edu/2015/tutorials/t0b_intro_to_jupyter_notebooks.html)
#
# [Python/Numpy tutorial](http://cs231n.github.io/python-numpy-tutorial/#python)
# (A large part of Lab 1 is based on this tutorial)
# **IMPORTANT**
#
# All labs and assignments will be done in Jupyter Notebooks, so do ask questions if you're struggling.
# ### Basic operation and shortcuts
# There are two modes of selection when inside a Jupyter Notebook:
# 1. Command Mode - When you hit up/down arrows you select different cells. Hit enter to enter edit mode.
# 1. Edit Mode - You can edit the cell. Hit Esc to enter Command Mode again.
# In Command Mode (cell highlighted blue):
# ```
# h - bring up help window (contains full list of shortcuts!)
# <enter> - Enter Edit Mode
# a - create new cell above selected
# b - create cell below selected
# d, d - delete selected cell
# ```
#
# In Edit Mode (cell highlighted green):
# ```
# <esc> - Enter Command Mode
# <shift> + <enter> - Run cell and move to cell below in Command Mode
# <ctrl> + <enter> - Run cell in place
#
# ```
# ### Printing and cell output
# A Jupyter notebook is a collection of code and text cells. Each code cell can be run and the output is given below the cell. A number appears at the side of the cell to indicate the order in which the cells were run.
#
# All objects created by running cells are stored in the kernel running in the background. You can restart the kernel by using the Kernel menu at the top of the notebook.
#
# You'll notice that the notebook will try to display the last thing in the cell, even if you don't use a print statement. However, if you want to print multiple things from one cell, you need to use multiple print statements (or multiple cells).
a = 1
b = 2
a
a
b
print(a)
print(b)
# ### Connecting to the Kernel
# Sometimes we might want to connect to the same notebook kernel from multiple frontends. This is useful for excecuting quick calculations, or checking objects currently stored in the kernel, without having to create a new cell in the notebook.
#
# The `%connect_info` line magic displays the information necessary to connect another frontend to the Notebook kernel.
# %connect_info
# For example, if this is the only kernel you have started, go to your terminal and execute:
#
# * `jupyter console --existing`
#
# This will bring up a console that is connected to the same kernel as this notebook. This can be handy if you want to do some exploration of objects without creating new cells in the workbook.
# #### **WARNING**
# If you connect to a kernel via the console, be careful you do not kill the kernel when quitting the console. You must use:
#
# * `<ctrl> + d`
# * or `exit(keep_kernel=True)`
# ### Built-in magic commands
# There are [many built-in magic commands](http://ipython.readthedocs.io/en/stable/interactive/magics.html) (like `%connect_info`) which allow you to do other fun things with notebooks. Check them out.
# ### Tab completion
# Tab completion is a powerful method for viewing object attributes and available methods.
#
# Let's see an example of this by using a Python [list](http://www.tutorialspoint.com/python/python_lists.htm). We will create a list and then you can see what methods are available by typing the list name followed by `.` and then hitting the <tab> key. Then you can access any method's help documentation by hitting the method's name followed by `?`; this opens a 'pager' at the bottom of the screen, you can hit <esc> to exit it.
l = [1, 4.2, 'hello']
l
# +
# type l. then hit <tab>
# +
# l.append?
# -
# ## Import packages
# It's generally good practice to import all your packages at the top of a file. We will do so in future tutorials.
# Before we start, we need to import the packages that we will be using later. If you are having trouble importing any of these packages make sure that these are properly installed. If you still encounter issues, refer to Installing instructions.
from __future__ import division, print_function # Makes division and printing work like python 3 (we're using 2)
import os
import sys
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
# ## Numpy introduction =======
# Numpy is a powerful scientific computing library. The fundmental object is the (n-dimensional) numpy array and the library provides a collection of tools for working with such arrays. If you are already familiar with MATLAB you might find this [tutorial](https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html) useful.
#
# The following Numpy introduction is largely based on this [tutorial](http://cs231n.github.io/python-numpy-tutorial/#numpy).
# ### Arrays
# A numpy array is a grid of values, all of the same type, and is indexed by a [tuple](http://www.tutorialspoint.com/python/python_tuples.htm) of nonnegative integers. The number of dimensions is the rank of the array; the shape of an array is a tuple of integers giving the size of the array along each dimension. *N.B. this use of the word 'rank' is not the same as the meaning in linear algebra.*
#
# We can initialize numpy arrays from nested Python [lists](http://www.tutorialspoint.com/python/python_lists.htm), and access elements using square brackets:
import numpy as np
a = np.array([1, 2, 3]) # Creates a rank 1 array (i.e. vector)
a
type(a) # Prints the type of object a (array)
a.shape # Prints the number of elements for each dimension
print(a[0], a[1], a[2], a[-1], a[-2], a[-3]) # Select array elements by index (starts at 0)
try:
a[3] # Will error
except IndexError as e:
print('{}'.format(e))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
a[0] = 5 # Change an element of the array
a
b = np.array([[1,2,3],[4,5,6]]) # Create a rank 2 array
b
b.shape
print(b[0, 0], b[0, 1], b[1, 0])
# N.B. Python follows [0-based indexing](https://en.wikipedia.org/wiki/Zero-based_numbering) (as opposed to MATLAB which follows 1-based indexing).
# ### Creating arrays
# We have many options here:
# #### Set matrix types
# Generally the first argument is simply the shape of the resulting array
np.zeros((2, 2))
np.ones((1, 2))
np.full((2, 2), 7)
np.eye(2)
np.random.random((2, 2))
mu = 2
sigma = .2
np.random.normal(mu, sigma, (4,1)), np.random.normal(mu, sigma, 10)
# #### From a list
some_list = [1, 4, 6, 8]
e = np.array(some_list)
e
some_list = [[1, 4, 6, 8], [2, 2, 4, 4]]
f = np.array(some_list, dtype=float)
f
# #### Appending an existing array
g = np.array([])
for ii in range(10):
g = np.append(g, ii)
g
# Be careful with types though, numpy will do some inference on your behalf...it may not be what you want/intended.
np.append(g, 'hello')
e.dtype
np.append(e, 2.0)
np.append(e, 2.0).dtype
# ### Array indexing
# Slicing is the most common way to index arrays. This works in a similar way to indexing python lists.
#
# There are also other options, such as integer and boolean array indexing.
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
a
b = a[:2, 1:3]
b
a[0, 1]
# A slice of an array is a view into the same data, so modifying it will modify the original array. For example: b[0, 0] is the same piece of data as a[0, 1]. Modifying b will modify a.
b[0, 0] = 77
a[0, 1]
# ### Array datatypes
# Every numpy array is a grid of elements of the same type. Numpy provides a large set of numeric datatypes that you can use to construct arrays. Numpy tries to guess a datatype when you create an array, but functions that construct arrays usually also include an optional argument to explicitly specify the datatype. Here is an example:
x = np.array([1, 2]) # Let numpy choose the datatype
x.dtype
x = np.array([1.0, 2.0]) # Let numpy choose the datatype
x.dtype
x = np.array([1, 2], dtype=np.int64) # Force a particular datatype
x.dtype
# ### Array math
# Basic mathematical functions operate elementwise on arrays, and are available both as operator overloads and as functions in the numpy module:
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
x
y = np.array([[5, 6], [7, 8]], dtype=np.float64)
y
# #### Elementwise sum, equivalent expressions:
x + y
np.add(x, y)
# #### Elementwise difference, equivalent expressions:
x - y
np.subtract(x, y)
# #### Elementwise product, equivalent expressions:
x * y
np.multiply(x, y)
# #### Elementwise division, equivalent expressions:
x / y
np.divide(x, y)
# #### Elementwise square root
np.sqrt(x)
# #### Dot product and matrix multiplicaiton
# Note that unlike MATLAB, * is elementwise multiplication, not matrix multiplication. We instead use the `np.dot` function or `.dot` method to compute inner products of vectors, to multiply a vector by a matrix, and to multiply matrices. `dot` is available both as a function in the numpy module and as an instance method of array objects:
x = np.array([[1, 2], [3, 4]])
y = np.array([[5, 6], [7, 8]])
v = np.array([9, 10])
w = np.array([11, 12])
# ##### Inner product of vectors
np.dot(v, w)
# ##### Matrix vector product
x.dot(v) # using x's method
np.dot(x, v) # using the numpy function
# ##### Matrix matrix product
x.dot(y) # using x's method
np.dot(x, y) # using the numpy function
# ### Mathematical functions
# Numpy provides many useful functions for performing computations on arrays; one of the most useful is `sum`:
x = np.array([[1, 2], [3, 4]])
x
np.sum(x) # Compute sum of all elements
np.sum(x, axis=0) # Compute sum of each column - sum *over rows* i.e. dimension 0
np.sum(x, axis=1) # Compute sum of each row - sum *over columns* i.e. dimension 1
# You can find the full list of mathematical functions provided by numpy in the [documentation](http://docs.scipy.org/doc/numpy/reference/routines.math.html).
#
# Apart from computing mathematical functions using arrays, we frequently need to reshape or otherwise manipulate data in arrays. The simplest example of this type of operation is transposing a matrix; to transpose a matrix, simply use the `T` attribute of an array object:
x = np.arange(4).reshape((2, 2))
x
x.T
np.transpose(x) # Equivalent expression
# Note that taking the transpose of a rank 1 array (a vector) does nothing:
v = np.array([1, 2, 3])
v
v.T
x.reshape((4, 1))
x.reshape((4,))
y = np.arange(27).reshape((3, 3, 3))
y
y.shape
y.reshape((3, -1))
y.reshape((3, -1)).shape
# ### Broadcasting
# Broadcasting is a powerful mechanism that allows numpy to work with arrays of different shapes when performing arithmetic operations. Frequently we have a smaller array and a larger array, and we want to use the smaller array multiple times to perform some operation on the larger array.
#
# For example, suppose that we want to add a constant vector to each row of a matrix.
x = np.arange(12).reshape((4, 3))
x
v = np.array([1, 0, 1])
v
x + v # Add v to each row of x using broadcasting
# `x + v` works even though `x` has shape `(4, 3)` and `v` has shape `(3,)` due to broadcasting; this line works as if v actually had shape `(4, 3)`, where each row was a copy of `v`, and the sum was performed elementwise.
#
# Broadcasting two arrays together follows these rules:
#
# * If the arrays do not have the same rank, prepend the shape of the lower rank array with 1s until both shapes have the same length.
# * The two arrays are said to be compatible in a dimension if they have the same size in the dimension, or if one of the arrays has size 1 in that dimension.
# * The arrays can be broadcast together if they are compatible in all dimensions.
# * After broadcasting, each array behaves as if it had shape equal to the elementwise maximum of shapes of the two input arrays.
# * In any dimension where one array had size 1 and the other array had size greater than 1, the first array behaves as if it were copied along that dimension.
# So be careful with shapes...
y = x.T
y
try:
y + v # Add v to each column of y using broadcasting...?
except ValueError as e:
print(e)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
# And especially careful with vectors!
try:
y + v.T # Add v to each column of y using broadcasting...?
except ValueError as e:
print(e)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
y + v.reshape((3, 1)) # Add v to each column of y using broadcasting!
print('x shape:', x.shape)
print('v shape:', v.shape)
print('y shape:', y.shape)
# ### Numpy documentation
# This brief overview has touched on many of the important things that you need to know about numpy, but is far from complete. Check out the [numpy reference](https://docs.scipy.org/doc/numpy-1.13.0/reference/) to find out much more about numpy.
# ## ======= Pandas introduction =======
# Pandas is a library for data manipulation and analysis. There are two fundamental data structures in pandas: the **Series** and **DataFrame** structures which are built on top of NumPy arrays.
#
# The following introduction is largely based on this [tutorial](http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/). Another useful referece is the [Pandas introduction to data structures](http://pandas.pydata.org/pandas-docs/stable/dsintro.html). Pandas is well documented and you will find good information about all methods and structures in the [API reference](http://pandas.pydata.org/pandas-docs/stable/api.html)
# ### Series
# A **Series** a one-dimensional object (similar to a vector). Each element has a corresponding *index*. By default the indices range from 0 to N, where N is the length of the Series.
# Let's create a Series by passing in a list without specifying the indices.
s = pd.Series([1, 4.2, 'Hello'])
s
# Now, let's specify the indices explicitly
s = pd.Series([1, 4.2, 'Hello'], index=['A', 'B', 'C'])
s
# Indexing the Series
s['B']
# We can also index by using boolean logic
s[s > 2]
# ### DataFrame
# A DataFrame is a tabular data structure comprised of rows and columns. You can also think of the DataFrame as a collection of Series objects that share an index.
# #### Creating DataFrame structures
# We can create an empty DataFrame by specifying the column names. Then we can insert data row by row.
df = pd.DataFrame(columns=['Gender', 'Age', 'Height', 'Weight'])
df
# Now let's add an observation
df.loc[0] = ['Male', 23, 180, 73] # Note how we used .loc to specify the index
df.loc['A'] = ['Female', 27, 167, 59]
df
# You can populate using a dictionary too which allows you to do things in a nonstandard order...
df.loc['i'] = dict(Weight='3kgs', Age=10, Gender='Blue', Height=-12)
df
# #### Creating DataFrame from other structures
# You can also create a dataframe from:
# * Dict of 1D ndarrays, lists, dicts, or Series
# * 2-D numpy.ndarray
# * Structured or record ndarray
# * A Series
# * Another DataFrame
#
# For example:
# Create a DataFrame from a list
some_list = [['Male', 23, 180, 73], ['Female', 27, 167, 59]]
df = pd.DataFrame(some_list, index=[0, 'A'], columns=['Gender', 'Age', 'Height', 'Weight'])
df
# Create a DataFrame from a dictionary where keys are column values
column_key_dict = {
'Gender': ['Male', 'Female'],
'Age': [23, 27],
'Height': [180, 167],
'Weight': [73, 59]
}
df = pd.DataFrame.from_dict(column_key_dict, orient='columns')
df.index = [0, 'A']
df
# Create a DataFrame from a dictionary where keys are index values
index_key_dict = {0:['Male', 23, 180, 73], 'A':['Female', 27, 167, 59]}
df = pd.DataFrame.from_dict(index_key_dict, orient='index')
df.columns = ['Gender', 'Age', 'Height', 'Weight']
df
# Using the DataFrame call, keys are assumed to be column headers
df = pd.DataFrame({0:['Male', 23, 180, 73], 'A':['Female', 27, 167, 59]},
index=['Gender', 'Age', 'Height', 'Weight'])
df
# +
# ...we can transpose using the `.T` method
# -
df = df.T
df
# #### Loading a CSV into a DataFrame
# Most commonly we create DataFrame structures by reading csv files. To run the following piece of code you need to download the datasets associated with the course and place them in a subdirectory called "datasets" under the same directory that your notebooks are located. Alternatively, you can specify the full path of the .csv file.
cpu_loc = os.path.join(os.getcwd(), 'datasets', 'cpu.csv')
cpu_loc
cpu = pd.read_csv(cpu_loc)
cpu.head() # Head shows the first few elements (unless specified otherwise) of the DataFrame
# You should see that each observation in our dataset comprises 8 measurements (attributes).
# #### Basic methods for DataFrame objects
# * `head(N)`: displays the first N elements of the DataFrame
# * `tail(N)`: displays the last N elements of the DataFrame
# * `info()`: displays basic information about the variables
# * `describe()`: displays summary statistics of the data
# Execute the following cells and observe the outputs.
cpu.tail(5)
cpu.info()
cpu.describe()
# #### Column Selection
# You can think of a DataFrame as a group of Series that share an index (in this case the column headers). This makes it easy to select specific **columns**.
cpu['MMAX'].head(5)
type(cpu['MMAX'])
# To select multiple columns we simple need to pass a list of column names. The resulting object is another DataFrame.
cpu[['MMIN', 'MMAX']].head(7)
type(cpu[['MMIN', 'MMAX']].head(7)) # This is a DataFrame
# #### Row selection
# To select specific **observations (i.e. rows)** we need to pass in the corresponding indices. This operation is called *slicing*. The resulting structure is again a DataFrame.
cpu[0:3]
# This is equivalent to using .iloc
cpu.iloc[0:3]
# #### Filtering
# Now suppose that you want to select all the observations which have an MMAX value which is higher than 35000. It is easy to do that:
cpu[cpu['MMAX'] > 35000]
# Or equivalently:
cpu[cpu.MMAX > 35000]
# You can also filter the data by using multiple attributes:
cpu[(cpu.MMAX > 35000) & (cpu.MMIN > 16000)]
# We saw before how we can select rows by passing the index numbers. This most of the time works but very often our indices are not in linear ascending order.
#
# There are two basic methods of indexing DataFrame structures:
# * `loc`: works on labels in the index
# * `iloc`: works on the position in the index (so it only takes integers)
#
# The following example should clarify the difference between label-based indexing (`loc`) and positional indexing (`iloc`)
#
# First let's create a new dataframe
cpu_new = cpu[cpu['MMAX'] > 35000]
cpu_new
cpu_new.loc[8:10] # Looks for the rows which are labelled 8 and 9
cpu_new.iloc[0:2] # Look for the first and second rows (this yields the same result as before)
# If we try the following we will get an empty DataFrame because there are no rows with labels 0 and 1.
cpu_new.loc[0:2]
# The result is another DataFrame
type(cpu[0:2])
# A very common scenario will be the following. We want to select specific observations and columns of a DataFrame and convert to a NumPy array so that we can use it for feature extraction, classification etc. This can be achieved by using the `values` method.
# Select the first 10 observations and the "MMIN" and "MMAX" columns only and convert to numpy array.
cpu[:10][['MMIN', 'MMAX']].values
# You can confirm that by using the `values` method the resulting object is a NumPy array.
# #### Indexing - selecting rows and columns
# *WARNING* - indexing is probably the most difficult part of pandas to get used to. If you get stuck [refer to the documentation on indexing](https://pandas.pydata.org/pandas-docs/stable/indexing.html).
# Summary of DataFrame methods for indexing:
# * iloc - ignore index labels, index like numpy with integer positions
# * loc - use index labels
# To illustrate, observe what happens when we reorder the rows of our dataframe.
cpu.sort_values('ERP', inplace=True)
cpu.iloc[:10]
cpu.loc[:10]
# Observe what happens if we change the label of one of the now first index
cpu = cpu.rename(index={cpu.index[0]: 'A'})
cpu.iloc[:10]
try:
cpu.loc[:10]
except TypeError as e:
print(e)
# For more, check out [Advanced Indexing](https://pandas.pydata.org/pandas-docs/stable/advanced.html#advanced)
# ## Matplotlib introduction
# Matplotlib is a 2D python plotting library with a similar interface to MATLAB's plot engine. The library is fully compatible with NumPy which means that we can -and most of the times will- pass numpy arrays as input arguments to the various plotting functions.
#
# There are many [resources](http://matplotlib.org/resources/index.html) for learning how to use Matplotlib. The following examples demonstrate only some basic plotting functions. When you are looking for a particular feature which you don't know yet how to implement a web search can prove very useful.
# +
# The first example creates a sine and a cosine and plots them.
X = np.linspace(-np.pi, np.pi, 256,endpoint=True) # Evenly spaced numbers over the specified interval
C, S = np.cos(X), np.sin(X) # Create a sine (S) and a cosine (C)
plt.figure(figsize=(8,5)) # Instantiates figure with a specified size
plt.plot(X,C, label='Cosine')
plt.plot(X,S, label='Sine')
plt.legend() # Displays legend
plt.show()
# +
# Create 4 subplots and plot cosines with different colours, linestyels and linewidths.
X = np.linspace(-np.pi, np.pi, 256,endpoint=True) # Evenly spaced numbers over the specified interval
C1, C2, C3, C4 = np.cos(X), np.cos(X+0.5), np.cos(X+1.), np.cos(X+1.5)
fig, ax = plt.subplots(figsize=(7,7))
plt.subplot(2,2,1)
plt.plot(X, C1)
plt.title('C1')
plt.xticks([]) # Removes xticks
plt.subplot(2,2,2)
plt.plot(X, C2, color='green', linestyle='-.')
plt.title('C2')
plt.xticks([]) # Removes xticks
plt.yticks([]) # Removes yticks
plt.subplot(2,2,3)
plt.plot(X, C3, color='k', linestyle='--')
plt.title('C3')
plt.subplot(2,2,4)
plt.plot(X, C4, color='m', linewidth = 5.)
plt.title('C4')
plt.yticks([]) # Removes yticks
plt.show()
# -
# Another way to reference subplots is using the Axes objects. This code acheives the same thing as above but, instead of using the package api functions, it uses the Axes object methods:
fig, ax = plt.subplots(2, 2, figsize=(7,7), sharex=True, sharey=True)
# ax is a 2 x 2 numpy array at this point containing the Axes objects
ax = ax.flatten() # this simply flattens the array such that we can reference by a single index
ax[0].plot(X, C1)
ax[0].set_title('C1')
ax[1].plot(X, C2, color='green', linestyle='-.')
ax[1].set_title('C2')
ax[2].plot(X, C3, color='k', linestyle='--')
ax[2].set_title('C3')
ax[3].plot(X, C4, color='m', linewidth = 5.)
ax[3].set_title('C4')
plt.show()
# **N.B. Because we created the subplots up front and stated the axes should be the same, matplotlib intelligently labels the axes for us**
# +
# Plot a histogram and add some text
mu, sigma = 100, 15
x = mu + sigma * np.random.randn(10000) # Generate data from a normal distribution with mean mu and variance sigma
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title('Histogram of IQ')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$') # Adds some text by using TeX
plt.axis([40, 160, 0, 0.03]) # Sets the axes limits
plt.grid(True) # Enables grid
plt.show()
# -
# ## Seaborn
# [Seaborn](https://seaborn.github.io/index.html) is a visualisation library built on top of matplotlib which offers some aesthetic enhancement and, more importantly, provides some high-level functions for "exploring and understanding data". Seaborn is also tightly integrated with pandas and provides support for both numpy and pandas data structures.
# ### Aesthetics
# Firstly note that when you import and use seaborn, it may change the aesthetics of all matplotlib plots you make subsequently. If you only ever want to use matplotlib defaults, you can import seaborn like this: `import seaborn.apionly as sns`.
#
# To reset to matplotlib defaults, use: `sns.reset_defaults()`
# There's some really useful stuff you can do with seaborn, especially with respect to setting default sizings with respect to the context in which you're producing plots. With one function, seaborn can make plots appropriate for posters, papers, notebooks, or talks. For further information see the [seaborn documentation on aesthetics](https://seaborn.pydata.org/tutorial/aesthetics.html)
# ### Seaborn implemented functions [Optional]
# The Seaborn website has many great examples. Below we show a few taken directly from the website. These are just to give you a flavour of the kinds of things that Seaborn can do. You can use it as reference later; by no means should you learn this code by heart! You should definitely not worry if some of the following commands do not make sense just yet.
# +
# Load the example titanic dataset
df = sns.load_dataset("titanic")
# Make a custom palette with gendered colors
pal = dict(male="#6495ED", female="#F08080")
with sns.axes_style(style="darkgrid"):
for context in ['notebook', 'paper', 'poster', 'talk']:
with sns.plotting_context(context):
# Show the survival proability as a function of age and sex
g = sns.lmplot(x="age", y="survived", col="sex", hue="sex", data=df,
palette=pal, y_jitter=.02, logistic=True)
g.set(xlim=(0, 80), ylim=(-.05, 1.05))
plt.show()
# -
# using a with statement means style defaults are not overridden
with sns.axes_style(style="white", rc=dict(palette="muted", color_codes=True)):
rs = np.random.RandomState(10)
# Set up the matplotlib figure
f, axes = plt.subplots(2, 2, figsize=(7, 7), sharex=True)
sns.despine(left=True)
# Generate a random univariate dataset
d = rs.normal(size=100)
# Plot a simple histogram with binsize determined automatically
sns.distplot(d, kde=False, color="b", ax=axes[0, 0])
# Plot a kernel density estimate and rug plot
sns.distplot(d, hist=False, rug=True, color="r", ax=axes[0, 1])
# Plot a filled kernel density estimate
sns.distplot(d, hist=False, color="g", kde_kws={"shade": True}, ax=axes[1, 0])
# Plot a historgram and kernel density estimate
sns.distplot(d, color="m", ax=axes[1, 1])
plt.setp(axes, yticks=[])
plt.tight_layout()
plt.show()
with sns.axes_style(style="white"):
# Generate a random correlated bivariate dataset
rs = np.random.RandomState(5)
mean = [0, 0]
cov = [(1, .5), (.5, 1)]
x1, x2 = rs.multivariate_normal(mean, cov, 500).T
x1 = pd.Series(x1, name="$X_1$")
x2 = pd.Series(x2, name="$X_2$")
# Show the joint distribution using kernel density estimation
g = sns.jointplot(x1, x2, kind="kde", size=7, space=0)
plt.show()
with sns.axes_style(style="whitegrid", rc=dict(palette="pastel", color_codes=True)):
# Load the example tips dataset
tips = sns.load_dataset("tips")
# Draw a nested violinplot and split the violins for easier comparison
sns.violinplot(x="day", y="total_bill", hue="sex", data=tips, split=True,
inner="quart", palette={"Male": "b", "Female": "y"})
sns.despine(left=True)
plt.show()
# +
sns.set()
# Load the example flights dataset and convert to long-form
flights_long = sns.load_dataset("flights")
flights = flights_long.pivot("month", "year", "passengers")
# Draw a heatmap with the numeric values in each cell
sns.heatmap(flights, annot=True, fmt="d", linewidths=.5)
plt.show()
# +
sns.set(style="darkgrid")
tips = sns.load_dataset("tips")
g = sns.FacetGrid(tips, row="sex", col="time", margin_titles=True)
bins = np.linspace(0, 60, 13)
g.map(plt.hist, "total_bill", color="steelblue", bins=bins, lw=0)
plt.show()
# +
sns.set(style="ticks")
# Create a dataset with many short random walks
rs = np.random.RandomState(4)
pos = rs.randint(-1, 2, (20, 5)).cumsum(axis=1)
pos -= pos[:, 0, np.newaxis]
step = np.tile(range(5), 20)
walk = np.repeat(range(20), 5)
df = pd.DataFrame(np.c_[pos.flat, step, walk],
columns=["position", "step", "walk"])
# Initialize a grid of plots with an Axes for each walk
grid = sns.FacetGrid(df, col="walk", hue="walk", col_wrap=5, size=1.5)
# Draw a horizontal line to show the starting point
grid.map(plt.axhline, y=0, ls=":", c=".5")
# Draw a line plot to show the trajectory of each random walk
grid.map(plt.plot, "step", "position", marker="o", ms=4)
# Adjust the tick positions and labels
grid.set(xticks=np.arange(5), yticks=[-3, 3],
xlim=(-.5, 4.5), ylim=(-3.5, 3.5))
# Adjust the arrangement of the plots
grid.fig.tight_layout(w_pad=1)
plt.show()
# -
# # ======= Exercises =======
# ## Numpy exercises
# The following short exercises test your understanding of simple numpy functions and objects. Make sure you can complete them and feel free to reference the official [documentation](http://docs.scipy.org/doc/) should you need it.
#
# **You may need to google some solutions**
# #### ========== Question 1 ==========
# Print your numpy version.
# +
# Your code goes here
# -
# #### ========== Question 2 ==========
# Create a zero vector of size 5.
# +
# Your code goes here
# -
# #### ========== Question 3 ==========
# Create a zero vector of size 5 of type integer. Set the third element to 1.
# +
# Your code goes here
# -
# #### ========== Question 4 ==========
# Create a vector ranging from 0 to 9.
# +
# Your code goes here
# -
# #### ========== Question 5 ==========
# Create a vector ranging from 10 to 29.
# +
# Your code goes here
# -
# #### ========== Question 6 ==========
# Create a vector ranging from 0 to 9 and reverse it.
# +
# Your code goes here
# -
# #### ========== Question 7 ==========
# Create a 5 x 3 zero matrix.
# +
# Your code goes here
# -
# #### ========== Question 8 ==========
# Create this matrix...without copy pasting it ;)
# ```
# array([[0, 3, 6],
# [1, 4, 7],
# [2, 5, 8]])
# ```
# +
# Your code goes here
# -
# #### ========== Question 9 ==========
# Create a 3 X 3 identity matrix.
# +
# Your code goes here
# -
# #### ========== Question 10 ==========
# Create a 2 X 2 X 2 array with random values (drawn from a normal distribution).
# +
# Your code goes here
# -
# #### ========== Question 11a ==========
# Create a 5 x 4 array with random values and find the minimum and maximum values.
# +
# Your code goes here
# -
# #### ========== Question 11b ==========
# Return the *index* (i.e. the location within the matrix) of the max or min values
# +
# Your code goes here
# -
# #### ========== Question 12 ==========
# Find the mean value of the array in 11.
# +
# Your code goes here
# -
# #### ========== Question 13 ==========
# Find the row means of the array in 11.
# +
# Your code goes here
# -
# #### ========== Question 14 ==========
# Find the column means of the array in 11.
# +
# Your code goes here
# -
# #### ========== Question 15 ==========
# Create a list with elements 2.2, 3.5, 0, 4, 0. and convert into numpy array. Find the indices of non-zero elements.
# +
# Your code goes here
# -
# #### ========== Question 16 ==========
# Crate two normally distributed random matrices of shape (5, 4) and (4, 2). Print their matrix product.
# +
# Your code goes here
# -
# #### ========== Question 17 ==========
# Crate a random matrix of shape (5, 3) and a random vector of size 3. Use broadcasting to add the two arrays.
# +
# Your code goes here
# -
# ## Pandas exercises
# #### ========== Question 18 ==========
# Load the `credit` dataset and display the dataset basic information.
# +
# Your code goes here
# -
# #### ========== Question 19 ==========
# Display the summary statistics of the attributes of the dataset.
# +
# Your code goes here
# -
# #### ========== Question 20 ==========
# Display the last 6 instances of the dataset.
# +
# Your code goes here
# -
# #### ========== Question 21 ==========
# Print the 5th observation
# +
# Your code goes here
# -
# #### ========== Question 22 ==========
# Print the standard deviation of the attribute `CreditAmount` by using the numpy method `std`. You can verify you get the correct result by inspecting the statistics table from Question 19.
# +
# Your code goes here
# -
# ## Matplotlib exercises
# #### ========== Question 23 ==========
# Initialise an empty figure of size (8,6). Create a scatter plot of the observations in the `credit` dataset for the attributes `Duration` and `Age` (age should be shown on the xaxis). Use black coloured markers. Label the axes accordingly by using red colour for the xaxis and green for the yaxis. Finally, add a title of your choice by using a font size of 20.
# +
# Your code goes here
# -
# #### ========== Question 24 ==========
# You should note above that there is an outlier (i.e. an observation that is very distant to the other observations). Now you think that there must be something wrong with that observation (why?) and as a next step you should remove it from both the age and duration variables and a make a scatter plot of the new data.
# +
# Your code goes here
| 01_Lab_0_Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
import numpy as np
import pymc3 as pm
import theano.tensor as tt
# ```stan
# data {
# int <lower=0> N;
# int <lower=0> D;
# matrix [N,D] x ;
# vector [N] y;
# }
# parameters {
# vector [D] b;
# real <lower=0> sigma;
# }
# model {
# y ~ normal(x * b, sigma);
# b~normal(0,1);
# sigma~gamma(0.5,0.5);
# }
# generated quantities{
# real log_density;
# log_density=normal_lpdf(y |x * b, sigma)+normal_lpdf(b|0,1)+gamma_lpdf(sigma|0.5,0.5)+log(sigma);
# }
# ```
# a linear regression with 10^5 data and 100 variables
N = 10000
D = 100
beta = np.random.randn(D)
x = np.random.randn(N, D)
y = np.dot(x, beta) + np.random.randn(N)*2
with pm.Model() as md:
b = pm.Normal('b', 0., 1., shape=D)
sigma = pm.Gamma('sd', .5, .5)
pm.Normal('y', tt.dot(x, b), sigma, observed=y)
with md:
approx = pm.ADVI()
approx.fit(100000)
| WIP/Evaluating Variational Inference/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Resource-Efficiency-Collective/coding-tutorials/blob/main/sankey_recipes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="VjlUmPJRwsDQ"
# # Sankey recipes
# Here are some recipes for typical problems visualised as a Sankey diagram.
#
# Please run the first two cells to setup the notebook.
# + id="aaB-_89wwNy2"
# %%capture
"""Installation and downloads"""
# Install floweaver and display widget packages
# %pip install floweaver ipysankeywidget
# Import packages
import pandas as pd
from io import StringIO
from floweaver import *
from ipywidgets import HBox
import gdown, os
# Import and unzip example data -> You can then view them in the left files panel
folder, zip_path = 'example_data', 'example_data.zip'
if not os.path.exists(folder):
gdown.download('https://drive.google.com/uc?id=1qriY29v7eKJIs07UxAw5RlJirfwuLnyP', zip_path ,quiet=True)
# ! unzip $zip_path -d 'example_data'
# ! rm $zip_path
# Set the default size to fit the documentation better.
size = dict(width=570, height=300)
# + id="P2B3Xs-uxDS4"
"""Display setup"""
# Enable widget display for Sankeys in Colab
from google.colab import output
output.enable_custom_widget_manager()
# + [markdown] id="6TzVlY4-xGlw"
# ## 1 - Imports & exports
#
# This recipe demonstrates how to show import and export flows to/from a simple process chain.
#
# For demonstration, the CSV data is written directly in the cell below -- in practice you would want to load data a file.
# + id="TsW0nNF3xI4n"
flows = pd.read_csv(StringIO("""
source,target,type,value
a,b,main,3
b,c,main,4
imports,b,import/export,2
b,exports,import/export,1
"""))
flows
# + [markdown] id="9ghl1jVbxRTI"
# Here is the basic structure of the Sankey diagram: a chain of processes `a -- b --- c`.
# + id="hYjtlXgTxSgY"
nodes = {
'a': ProcessGroup(['a']),
'b': ProcessGroup(['b']),
'c': ProcessGroup(['c']),
}
bundles = [
Bundle('a', 'b'),
Bundle('b', 'c'),
]
ordering = [
['a'],
['b'],
['c'],
]
sdd = SankeyDefinition(nodes, bundles, ordering)
weave(sdd, flows).to_widget(**size)
# + [markdown] id="odrW1HMxxX6Q"
# To get more control over the appearance of the import/export flows, they can be controlled using Waypoints:
# + id="C1_pPX4kxYNQ"
nodes = {
'a': ProcessGroup(['a']),
'b': ProcessGroup(['b']),
'c': ProcessGroup(['c']),
'imports': Waypoint(),
'exports': Waypoint(),
}
bundles = [
Bundle('a', 'b'),
Bundle('b', 'c'),
Bundle(Elsewhere, 'b', waypoints=['imports']),
Bundle('b', Elsewhere, waypoints=['exports']),
]
ordering = [
[['a'], ['imports']],
[['b']],
[['c'], ['exports']],
]
sdd = SankeyDefinition(nodes, bundles, ordering)
weave(sdd, flows).to_widget(**size)
# + [markdown] id="FX_KxB2cxbIt"
# To get different colours for imports/exports, we need to modify the SDD to use the `type` column to distinguish different types of flow:
# + id="cptBHE40xbbZ"
sdd = SankeyDefinition(nodes, bundles, ordering, flow_partition=Partition.Simple('type', ['main', 'import/export']))
weave(sdd, flows).to_widget(**size)
# + [markdown] id="Jnv3h7kzxeYT"
# Finally, you can customise the colour scheme:
# + id="-FQGKzXKxe74"
weave(sdd, flows, palette={'main': 'steelblue', 'import/export': 'lightblue'}).to_widget(**size)
# + [markdown] id="5Z9qyoPMxiNQ"
# ### Alternative style
#
# An alternative style for showing imports & exports like this isn't currently supported:
#
# 
#
# But it should be possible to support with minor changes to the Sankey diagram definition. For example, the difference between this style and the style shown above could be requested by changing:
#
# ```python
# Bundle(Elsewhere, 'b', waypoints=['imports'])
# ```
# to
# ```python
# Bundle(Elsewhere, 'b', waypoints=[])
# ```
#
# The lack of a waypoint would indicate that the flow should be shown as a short "stub".
# + [markdown] id="elsoqJyHx186"
# ## 2 - Forwards & backwards flows
#
# This recipe demonstrates how forwards and backwards flows work.
#
# For demonstration, the CSV data is written directly in the cell below -- in practice you would want to load data a file.
# + id="IOJRXT9Vxikg"
flows = pd.read_csv(StringIO("""
source,target,type,value
a,b,main,2
a,c,main,1
c,d,main,3
b,c,back,2
"""))
flows
# + [markdown] id="UEwaOeyDx7dg"
# Here is one structure, with nodes `b` and `c` both in the same vertical slice:
# + id="saOHl1_Yx50A"
nodes = {
'a': ProcessGroup(['a']),
'b': ProcessGroup(['b']),
'c': ProcessGroup(['c']),
'd': ProcessGroup(['d']),
'back': Waypoint(direction='L'),
}
bundles = [
Bundle('a', 'b'),
Bundle('a', 'c'),
Bundle('b', 'c', waypoints=['back']),
Bundle('c', 'd'),
Bundle('c', 'b'),
]
ordering = [
[['a'], []],
[['b', 'c'], ['back']],
[['d'], []],
]
sdd = SankeyDefinition(nodes, bundles, ordering)
weave(sdd, flows).to_widget(**size)
# + [markdown] id="B53YWdz2yCWc"
# Alternatively, if `b` is moved to the right, extra hidden waypoints are automatically added to get the `b--c` flow back to the left of `c`:
# + id="iIjI64lByCn4"
bundles = [
Bundle('a', 'b'),
Bundle('a', 'c'),
Bundle('b', 'c'),
Bundle('c', 'd'),
Bundle('c', 'b'),
]
ordering = [
[['a'], []],
[['c'], ['back']],
[['b', 'd'], []],
]
sdd = SankeyDefinition(nodes, bundles, ordering)
weave(sdd, flows).to_widget(**size)
# + [markdown] id="CWPvO_ogycqi"
# ## 3 - "Fruit" example (from Hybrid Sankey diagrams paper)
#
# This notebook gives a fairly complicated example of building a Sankey diagram from the sample "fruit" database used in the paper [Hybrid Sankey diagrams: Visual analysis of multidimensional data for understanding resource use](https://doi.org/10.1016/j.resconrec.2017.05.002).
#
# For more explanation of the steps and concepts, see the [tutorials](../tutorials/index.ipynb).
# + id="MPnThre4yEA6"
# Load dataset
dataset = Dataset.from_csv('example_data/fruit_flows.csv', 'example_data/fruit_processes.csv')
# + [markdown] id="JS7sMUd_yn83"
# This made-up dataset describes flows from farms to consumers:
# + id="evkd5WOzykxJ"
dataset._flows.head()
# + [markdown] id="lRZD2W3Rys0H"
# Additional information is available in the process dimension table:
# + id="gFS7Krkeyq7_"
dataset._dim_process.head()
# + [markdown] id="FmIHvh96yw5o"
# We'll also define some partitions that will be useful:
# + id="vQspZXTiyus4"
farm_ids = ['farm{}'.format(i) for i in range(1, 16)]
farm_partition_5 = Partition.Simple('process', [('Other farms', farm_ids[5:])] + farm_ids[:5])
partition_fruit = Partition.Simple('material', ['bananas', 'apples', 'oranges'])
partition_sector = Partition.Simple('process.sector', ['government', 'industry', 'domestic'])
# + [markdown] id="GGhQbg9my0WD"
# Now define the Sankey diagram definition.
#
# - Process groups represent sets of processes in the underlying database. The underlying processes can be specified as a list of ids (e.g. `['inputs']`) or as a Pandas query expression (e.g. `'function == "landfill"'`).
# - Waypoints allow extra control over the partitioning and placement of flows.
# + id="WEFNURnIyycI"
nodes = {
'inputs': ProcessGroup(['inputs'], title='Inputs'),
'compost': ProcessGroup('function == "composting stock"', title='Compost'),
'farms': ProcessGroup('function in ["allotment", "large farm", "small farm"]', farm_partition_5),
'eat': ProcessGroup('function == "consumers" and location != "London"', partition_sector,
title='consumers by sector'),
'landfill': ProcessGroup('function == "landfill" and location != "London"', title='Landfill'),
'composting': ProcessGroup('function == "composting process" and location != "London"', title='Composting'),
'fruit': Waypoint(partition_fruit, title='fruit type'),
'w1': Waypoint(direction='L', title=''),
'w2': Waypoint(direction='L', title=''),
'export fruit': Waypoint(Partition.Simple('material', ['apples', 'bananas', 'oranges'])),
'exports': Waypoint(title='Exports'),
}
# + [markdown] id="0wpAHuRvy52Y"
# The ordering defines how the process groups and waypoints are arranged in the final diagram. It is structured as a list of vertical *layers* (from left to right), each containing a list of horizontal *bands* (from top to bottom), each containing a list of process group and waypoint ids (from top to bottom).
# + id="roFjXSeSy2Ix"
ordering = [
[[], ['inputs', 'compost'], []],
[[], ['farms'], ['w2']],
[['exports'], ['fruit'], []],
[[], ['eat'], []],
[['export fruit'], ['landfill', 'composting'], ['w1']],
]
# + [markdown] id="sh48BxfUy-QJ"
# Bundles represent flows in the underlying database:
# + id="fllR5sE2y7i4"
bundles = [
Bundle('inputs', 'farms'),
Bundle('compost', 'farms'),
Bundle('farms', 'eat', waypoints=['fruit']),
Bundle('farms', 'compost', waypoints=['w2']),
Bundle('eat', 'landfill'),
Bundle('eat', 'composting'),
Bundle('composting', 'compost', waypoints=['w1', 'w2']),
Bundle('farms', Elsewhere, waypoints=['exports', 'export fruit']),
]
# + [markdown] id="EVcN4NVvzBjA"
# Finally, the process groups, waypoints, bundles and ordering are combined into a Sankey diagram definition (SDD). When applied to the dataset, the result is a Sankey diagram!
# + id="OmatrWx1y_4A"
sdd = SankeyDefinition(nodes, bundles, ordering,
flow_partition=dataset.partition('material'))
weave(sdd, dataset) \
.to_widget(width=570, height=550, margins=dict(left=70, right=90))
# + [markdown] id="VUu1a_HfzJg4"
# # Setting the scale
#
# This recipe demonstrates how the scale of the Sankey diagram is set.
#
# By default the scale is calculated for each diagram to achieve a certain whitespace-to-flow ratio within the height that is given. But in some cases, you may want to set the scale explicitly.
#
# For demonstration, the CSV data is written directly in the cell below -- in practice you would want to load data a file.
# + id="zKnVxArmzLk4"
flows = pd.read_csv(StringIO("""
year,source,target,value
2020,A,B,10
2025,A,B,20
"""))
flows
# + id="jX3yjDuczNf5"
nodes = {
'A': ProcessGroup(['A']),
'B': ProcessGroup(['B']),
}
bundles = [
Bundle('A', 'B'),
]
ordering = [['A'], ['B']]
sdd = SankeyDefinition(nodes, bundles, ordering)
# + [markdown] id="5QeSCNklzSWg"
# If we draw the flow for the year 2020 and the year 2025 separately, they appear the same:
# + id="6TqXDBx5zPzA"
w1 = weave(sdd, flows.query('year == 2020')).to_widget(**size)
w1
# + id="B1JBke5mzUpo"
w2 = weave(sdd, flows.query('year == 2025')).to_widget(**size)
w2
# + [markdown] id="VyZW7PDjzXSZ"
# But in fact they have different scales:
# + id="rwKVAuJ6zV2i"
w1.scale, w2.scale
# + [markdown] id="ii7lvMKxzbMg"
# The units of the scale are `units-of-value` per pixel.
#
# If we draw the Sankeys again while setting the scale, we can see that the flow indeed has changed between years:
# + id="5K5IOpDtzZH6"
SCALE = 2.0
w1 = weave(sdd, flows.query('year == 2020')).to_widget(**size)
w2 = weave(sdd, flows.query('year == 2025')).to_widget(**size)
w1.scale = w2.scale = SCALE
HBox([w1, w2])
| sankey_recipes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Boiler plate imports
import numpy as np
import cv2
from matplotlib import pyplot as plt
from matplotlib import image as image
import easygui
# Loading in the imgage
warTimeImage= image.imread("data/wartime.jpg")
# +
# Subplot setup
fig = plt.figure(figsize=(20,20))
ax1 = plt.subplot2grid((3,3), (0,0),)
ax2 = plt.subplot2grid((3,3), (0,1),)
ax3 = plt.subplot2grid((3,3), (1,0),)
ax4 = plt.subplot2grid((3,3), (1,1),)
ax5 = plt.subplot2grid((3,3), (2,0),)
ax6 = plt.subplot2grid((3,3), (2,1),)
# Assign each subplot with an
ax1.imshow(warTimeImage)
Values = warTimeImage.ravel()
ax2.hist(Values,bins=256,range=[0,256])
greyWarTime = cv2.cvtColor(warTimeImage,cv2.COLOR_BGR2GRAY)
eqHistImage = cv2.equalizeHist(greyWarTime)
ax3.imshow(cv2.cvtColor(eqHistImage,cv2.COLOR_GRAY2BGR))
ValuesEq = eqHistImage.ravel()
ax4.hist(ValuesEq,bins=256,range=[0,256])
# Jane's intensity correciton
Intentisiy = 255 * ( (greyWarTime-greyWarTime.min()) / (greyWarTime.max() - greyWarTime.min()) )
#ax5.imshow(cv2.cvtColor(Intentisiy,cv2.COLOR_GRAY2BGR))
ax5.imshow(Intentisiy,cmap='gray')
ValuesIntenseEq = Intentisiy.ravel()
ax6.hist(ValuesIntenseEq,bins=256,range=[0,256])
plt.show()
# -
| Lecture Code Work/Task 3 - Histograms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="-90UUILKZSuW"
# # Modelos de Machine Learning
# Após a falha nos testes de modelos de ML com redução da dimensionalidade na etapa anterior, foi realizado o treinamento dos modelos de machine learning com o dataset completo. Para tanto, foi utilizado o Google Colab devido a limitações de hardware da máquina local.
# + executionInfo={"elapsed": 928, "status": "ok", "timestamp": 1640265997823, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="o3tBDHpaQlIx"
import numpy as np
import sklearn
from scipy import sparse
from google.colab import drive
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 17462, "status": "ok", "timestamp": 1640266015280, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="CIQxR8pqQ0Xd" outputId="742b9185-c4d5-4705-afab-9a069262ab8f"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="fPhsSZ_QbPMi"
# ## Carregamento dos datasets de treino e teste
# + executionInfo={"elapsed": 2553, "status": "ok", "timestamp": 1640266017829, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="7zqsJiWgZ9ee"
X_train = sparse.load_npz('drive/MyDrive/''Colab Notebooks''/X_train_blc_sparse.npz')
# + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1640266017830, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="Hnv_KXKVaUh8"
y_train = np.loadtxt('drive/MyDrive/''Colab Notebooks''/y_train_balanced.csv', delimiter=',')
# + executionInfo={"elapsed": 714, "status": "ok", "timestamp": 1640266018538, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="k5JnNfZTaUlI"
X_test = sparse.load_npz('drive/MyDrive/''Colab Notebooks''/X_test_sparse.npz')
# + executionInfo={"elapsed": 421, "status": "ok", "timestamp": 1640266018956, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="WIKSB10jZSue"
y_test = np.loadtxt('drive/MyDrive/''Colab Notebooks''/y_test.csv', delimiter=',')
# + [markdown] id="S4DVyYG2ZSuh"
# ## Escolha do modelo de machine learning
# Será seguido o padrão de escolha de modelos do scikit-learn, disponível em https://scikit-learn.org/stable/tutorial/machine_learning_map/index.html. Como trata-se de um problema de classificação, com dados rotulados e menos de 100 mil amostras, o primeiro modelo a ser testado será o Linear SVC. Caso não apresente resultado satisfatório será testado o algoritmo Naive Bayes.
# + [markdown] id="fBk278TvZSui"
# <img src='drive/MyDrive/''Colab Notebooks''/ml_map.png' />
# + [markdown] id="xuM06t1jZSuj"
# ## Modelo LinearSVC
# + executionInfo={"elapsed": 7, "status": "ok", "timestamp": 1640266018957, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="xsB76RD4ZSuk"
from sklearn.svm import LinearSVC
# + executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1640266018957, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="amn03s71ZSuk"
mod_lin_svc_1 = LinearSVC()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 21659, "status": "ok", "timestamp": 1640266040612, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="yYZOyxhqZSul" outputId="4f3a6945-91fb-43eb-85d7-cc527c89284a"
# %%time
mod_lin_svc_1.fit(X_train, y_train)
# + [markdown] id="qoMGtmh_ZSul"
# ## Teste do modelo 1:
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 13, "status": "ok", "timestamp": 1640266040612, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="UNttgD8oZSun" outputId="07510018-b176-48bb-8440-33364b2f122d"
mod_lin_svc_1.score(X_test, y_test)
# + executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1640266040613, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="UikakWJ9ZSun"
y_pred = mod_lin_svc_1.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1640266040613, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="fALm5nkGZSun" outputId="9762fe88-618f-42ec-efd4-bda82211e93e"
y_pred.shape
# + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1640266040614, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="G2CQ10pxZSuo"
from sklearn.metrics import plot_confusion_matrix
# + colab={"base_uri": "https://localhost:8080/", "height": 350} executionInfo={"elapsed": 703, "status": "ok", "timestamp": 1640266041310, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="ssfaJ5G1ZSuo" outputId="6e7aaae5-5201-4523-8ba7-10bf6483f38e"
plot_confusion_matrix(mod_lin_svc_1, X_test, y_test)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 342, "status": "ok", "timestamp": 1640266041645, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="K6rMUyrcZSuo" outputId="7bc2c391-a219-4bd1-c173-5b0e42939887"
from collections import Counter
Counter(y_test)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 20, "status": "ok", "timestamp": 1640266041645, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="16DdlIkCZSup" outputId="a9f8c941-cff7-43c1-ca02-3dd69ef7152f"
Counter(y_train)
# + [markdown] id="0oF1Fag3ZSuq"
# Apresentando uma acurácia de aproximadamente 55%, o primeiro modelo criado com o algoritmo LinearSVC já apresentou resultado bem melhor do que os modelos anteriores treinados com dados com dimensionalidade reduzida.
# + [markdown] id="Ej3pVDdjZSuq"
# ## Grid search para encontrar melhores parâmetros para o LinearSVC:
# + executionInfo={"elapsed": 341, "status": "ok", "timestamp": 1640266127645, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="poiHpfj-ZSuq"
from sklearn.model_selection import GridSearchCV
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 294, "status": "ok", "timestamp": 1640266567140, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="ekg9AcqPZSuq" outputId="22a8ec1c-7ffd-49e2-c1c8-ba090812556e"
parameters = {'multi_class': ['ovr', 'crammer_singer'], 'tol':[1e-4, 1e-5]}
parameters
# + executionInfo={"elapsed": 253, "status": "ok", "timestamp": 1640266577337, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="lByIDm-gZSur"
LinSVC = LinearSVC()
# + executionInfo={"elapsed": 278, "status": "ok", "timestamp": 1640266588437, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "04933481191968908438"}, "user_tz": 180} id="RkB6iSNsZSur"
clf = GridSearchCV(LinSVC, parameters, scoring='accuracy')
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="D3zLj7FrZSur"
clf.fit(X_train, y_train)
# + id="dR9yI54QZSur"
clf.classes_
# + id="tbrjLrQvZSur"
import pandas as pd
# + id="tapsYw6tZSus"
resultados = pd.DataFrame.from_dict(clf.cv_results_)
resultados
# + id="sQliBNNvZSus"
clf.best_score_
# + id="bGOKh-N3ZSus"
clf.best_estimator_
# + id="iw6xXkQnZSus"
clf.best_params_
# + id="WYWZsYAZZSut"
y_pred_2 = clf.predict(X_test)
# + id="Dkr2ZFQMZSut"
y_pred_2.shape
# + id="9QenGVQgZSuu"
Counter(y_pred_2)
# + id="dynicuX0ZSuu"
plot_confusion_matrix(clf.best_estimator_, X_test, y_test)
# + [markdown] id="SLKonxrFZSuv"
# ## Modelo Naive-Bayes
# + id="N7Wls_Z6ZSuv"
from sklearn.naive_bayes import ComplementNB
# + id="dYhZx59dZSuv"
mod_NB = ComplementNB()
# + id="WO06DJSXZSuw"
# O Naive-Bayes supõe que os valores de entrada são não-negativos. Para contornar esta exigência:
#from sklearn.preprocessing import MinMaxScaler
#scaler = MinMaxScaler()
#X_train = scaler.fit_transform(X_train_pca)
#X_test = scaler.fit_transform(X_test_pca)
# + id="wBj6LZCkZSuw"
mod_NB.fit(X_train, y_train)
# + id="p3msRPkbZSuw"
y_pred_3 = mod_NB.predict(X_test)
# + id="r7lu8Sp7ZSuw"
Counter(y_pred_3)
# + id="_VwhbQDvZSux"
plot_confusion_matrix(mod_NB, X_test, y_test)
# + id="0AjX3IiwIDzE"
mod_NB.score(X_test, y_test)
# + [markdown] id="wc5VcbOZZSux"
# O resultado do modelo Naive-Bayes mostrou-se com desempenho pior do que o modelo LinearSVC. O grid search não foi concluído com sucesso devido ao tempo de execução. Serão testados mais modelos na sequência.
# + id="Lz5P1HsmJCew"
| 06-Treinamento_modelos_ML-3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
d = json.load(open("dev-v2.0.json"))
contexts = []
for x in d["data"]:
for y in x["paragraphs"]:
contexts.append(y["context"])
len(contexts)
print(contexts[0])
#stop words list
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our',
'ours', 'ourselves', 'you', 'your', 'yours',
'yourself', 'yourselves', 'he', 'him', 'his',
'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who',
'whom', 'this', 'that', 'these', 'those', 'am',
'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did',
'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or',
'because', 'as', 'until', 'while', 'of', 'at',
'by', 'for', 'with', 'about', 'against', 'between',
'into', 'through', 'during', 'before', 'after',
'above', 'below', 'to', 'from', 'up', 'down', 'in',
'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'when',
'where', 'why', 'how', 'all', 'any', 'both', 'each',
'few', 'more', 'most', 'other', 'some', 'such', 'no',
'nor', 'not', 'only', 'own', 'same', 'so', 'than', 'too',
'very', 's', 't', 'can', 'will', 'just', 'don',
'should', 'now', '']
import nltk
from nltk.corpus import wordnet
nltk.download('wordnet')
def synonym_replacement(words, n):
new_words = words.copy()
random_word_list = list(set([word for word in words if word not in stop_words]))
random.shuffle(random_word_list)
num_replaced = 0
for random_word in random_word_list:
synonyms = get_synonyms(random_word)
if len(synonyms) >= 1:
synonym = random.choice(list(synonyms))
new_words = [synonym if word == random_word else word for word in new_words]
#print("replaced", random_word, "with", synonym)
num_replaced += 1
if num_replaced >= n: #only replace up to n words
break
#this is stupid but we need it, trust me
sentence = ' '.join(new_words)
new_words = sentence.split(' ')
return new_words
def get_synonyms(word):
synonyms = set()
for syn in wordnet.synsets(word):
for l in syn.lemmas():
synonym = l.name().replace("_", " ").replace("-", " ").lower()
synonym = "".join([char for char in synonym if char in ' qwertyuiopasdfghjklzxcvbnm'])
synonyms.add(synonym)
if word in synonyms:
synonyms.remove(word)
return list(synonyms)
get_synonyms("descended")
import re
def get_only_chars(line):
clean_line = ""
line = line.replace("’", "")
line = line.replace("'", "")
line = line.replace("-", " ") #replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.lower()
for char in line:
if char in 'qwertyuiopasdfghjklzxcvbnm ':
clean_line += char
else:
clean_line += ' '
clean_line = re.sub(' +',' ',clean_line) #delete extra spaces
if clean_line[0] == ' ':
clean_line = clean_line[1:]
return clean_line
get_only_chars(contexts[0])
contexts[0]
from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from random import randint
import nltk.data
text = "Pete ate a large cake. Sam has a big mouth."
words = word_tokenize(contexts[0])
words
tagged = nltk.pos_tag(words)
tagged
words[4]
# +
import string
for word in words:
if(word[0] in string.punctuation):
print(word)
else:
print("non :- ", word)
# -
get_synonyms("agreed")
words
' '.join(words)
from nltk.tokenize.treebank import TreebankWordDetokenizer
TreebankWordDetokenizer().detokenize(words)
# for word in words:
import random
print(random.choice([0,0,0,0,0,1,1,1,1,1]))
# +
new_words = []
for i, word in enumerate(words):
if(tagged[i][1] == 'NNP' or tagged[i][1] == 'DT'):
new_words.append(word)
continue;
if(word[0] in string.punctuation):
new_words.append(word)
continue
if(random.choice([0,0,0,0,0,1,1,1,1,1]) == 0):
## do not replace
new_words.append(word)
else:
print(word)
syns = get_synonyms(word)
if(len(syns)>=1):
new_words.append(random.choice(syns))
## replace word with synonymn
# -
print(new_words)
contexts[0]
TreebankWordDetokenizer().detokenize(new_words)
contexts[1]
tokenized_words = word_tokenize(contexts[1])
tagged = nltk.pos_tag(tokenized_words)
# +
new_words = []
for i, word in enumerate(tokenized_words):
if(tagged[i][1] == 'NNP' or tagged[i][1] == 'DT'):
new_words.append(word)
continue;
if(word[0] in string.punctuation):
new_words.append(word)
continue
if(random.choice([0,0,0,0,0,1,1,1,1,1]) == 0):
## do not replace
new_words.append(word)
else:
print(word)
syns = get_synonyms(word)
if(len(syns)>=1):
new_words.append(random.choice(syns))
# -
TreebankWordDetokenizer().detokenize(new_words)
def get_augmented_replaced(sentence):
tokenized_words = word_tokenize(sentence)
tagged = nltk.pos_tag(tokenized_words)
new_words = []
for i, word in enumerate(tokenized_words):
if(tagged[i][1] == 'NNP' or tagged[i][1] == 'DT'):
new_words.append(word)
continue;
if(word[0] in string.punctuation):
new_words.append(word)
continue
if(random.choice([0,0,0,0,0,1,1,1,1,1]) == 0):
new_words.append(word)
else:
syns = get_synonyms(word)
if(len(syns)>=1):
new_words.append(random.choice(syns))
return TreebankWordDetokenizer().detokenize(new_words)
get_augmented_replaced(contexts[2])
contexts[2]
for x in d["data"]:
for y in x["paragraphs"]:
y["context"] = get_augmented_replaced(y["context"])
d["data"][0]["paragraphs"][0]["context"]
with open('result.json', 'w') as fp:
json.dump(d, fp)
| Optimizing QA models/Data_Augmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Getting started with Jupyter notebooks
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Interactively
# - [CSC notebook environment](https://notebooks.csc.fi/) (*recommended*)
# - [JuliaBox](https://www.juliabox.com) (Amazon web server reserver for Julia teaching)
# - [notebooks.csc.fi](https://notebooks.csc.fi/) with HAKA-account.
#
# ## Locally
# - `IJulia` [repository](https://github.com/JuliaLang/IJulia.jl)
# - via `Pkg` as `using Pkg` and `Pkg.add("IJulia")`
# - `using IJulia` and opening a new notebook in your browser `notebook()`
# + [markdown] slideshow={"slide_type": "slide"}
# ## Running a cell
#
# To execute code within a cell, select that cell and either (1) hit `Shift` and `Enter` or (2) hit the run button (the right pointing arrow) above.
# + slideshow={"slide_type": "fragment"}
1 + 1
2 + 2
# + [markdown] slideshow={"slide_type": "fragment"}
# Note that only the last line of a cell prints by default. It is possible to suppress this output with a semicolon.
# + slideshow={"slide_type": "fragment"}
1 + 1
2 + 2;
# + [markdown] slideshow={"slide_type": "slide"}
# ## How to get docs for Julia functions
#
# To get docs for a function, precede it with a question mark. This works in the REPL too.
# + slideshow={"slide_type": "fragment"}
?println
# + [markdown] slideshow={"slide_type": "slide"}
# ## How to use shell commands
#
# Type `;` and then you can use (UNIX) shell commands.
# + slideshow={"slide_type": "fragment"}
;ls
# + slideshow={"slide_type": "fragment"}
;pwd
# -
| notebooks/00_notebooks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import json
import numpy as np
import seaborn as sns
# %matplotlib inline
beautified = json.load(open("../Data/bostonResultsAll/EverythingUnlockedBoston.json",'rb'))
len(beautified.keys())
def shannonEntropy(vals):
return -np.sum([pA*np.log(pA) for pA in vals])
beautified[beautified.keys()[1]]['']
bostonScores = json.load(open("bostonScores.json",'rb'))
bostonScores[bostonScores.keys()[1]]
# +
pavementValues = []
for k in bostonScores:
if 'Pavement' in bostonScores[k]['segnet']:
pavementValues.append(bostonScores[k]['segnet']['Pavement'])
# -
sns.distplot(pavementValues)
for k in bostonScores:
pavWalk = 0
if 'Pavement' in bostonScores[k]['segnet']:
segnetPavement = bostonScores[k]['segnet']['Pavement']
if segnetPavement <= 0.03:
pavWalk = 1
elif segnetPavement > 0.03 and segnetPavement <= 0.06:
pavWalk = 2
elif segnetPavement > 0.06 and segnetPavement <= 0.09:
pavWalk = 3
elif segnetPavement > 0.09 and segnetPavement <= 0.12:
pavWalk = 4
elif segnetPavement > 0.12:
pavWalk = 5
walkbaleScore = max(bostonScores[k]['TnomyScores']['Walkable'],pavWalk)
bostonScores[k]['TnomyScores']['Walkable'] = walkbaleScore
entropyList = []
for k in bostonScores:
segnetVals = bostonScores[k]['segnet'].values()
entropy = shannonEntropy(segnetVals)
entropyList.append(entropy)
score = 0
if entropy <= 1.25:
score = 1
elif entropy > 1.25 and entropy <= 1.5:
score = 2
elif entropy > 1.5 and entropy <= 1.65:
score = 3
elif entropy > 1.65 and entropy <= 1.80:
score = 4
elif entropy > 1.80:
score = 5
# print score
bostonScores[k]['VisualComplexity'] = score
sns.distplot(entropyList)
bostonScores.keys()[10]
bostonScores[bostonScores.keys()[10]]
bostonImages = json.load(open("bostonImages.json",'rb'))
bostonImages[bostonImages.keys()[1]]
len(bostonImages.keys())
finalDict = {}
for k in bostonImages:
finalDict[k] = dict()
finalDict[k]['location'] = dict()
finalDict[k]['location']['lat'] = bostonImages[k]['lat']
finalDict[k]['location']['long'] = bostonImages[k]['long']
finalDict[k]['trueSkill'] = bostonImages[k]['rating']
# finalDict[k]['trueSkillVar'] = bostonImages[k]['variance']
finalDict[k]['metrics'] = dict()
if k in bostonScores:
finalDict[k]['metrics'] = bostonScores[k]
finalDict[k]['beautified'] = dict()
if k in beautified:
finalDict[k]['beautified']['flag'] = True
finalDict[k]['beautified']['XformedKeys'] = beautified[k]['Top5Keys']
else:
finalDict[k]['beautified']['flag'] = False
finalDict[k]['beautified']['XformedKeys'] = []
finalDict[finalDict.keys()[15]]
len(finalDict.keys())
# +
# json.dump(finalDict,open("BostonCollatedV4.json",'wb'))
# -
| notebooks/CollateData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# NBVAL_IGNORE_OUTPUT
import random
import io
import csv
import numpy as np
import matplotlib.pyplot as plt
from clkhash.field_formats import *
from clkhash.schema import Schema
from clkhash.comparators import NgramComparison, ExactComparison, NumericComparison
from clkhash.clk import generate_clk_from_csv
# -
# # Explanantion of the different comparison techniques
#
# The clkhash library is based on the concept of a CLK. This is a special type of Bloom filter, and a Bloom filter is a probabilistic data structure that allow space-efficient testing of set membership.
# By first tokenising a record and then inserting those tokens into a CLK, the comparison of CLKs approximates the comparisons of the sets of tokens of the CLKs.
#
# The challenge lies in finding good tokenisation strategies, as they define what is considered similiar and what is not. We call these tokenisation strategies *comparison techniques*.
#
# With Schema v3, we currently support three different comparison techniques:
#
# - ngram comparison
# - exact comparison
# - numeric comparison
#
# In this notebook we describe how these techniques can be used and what type of data they are best suited.
# ## n-gram Comparison
# *n-grams* are a popular technique for [approximate string matching](https://en.wikipedia.org/wiki/N-gram#n-grams_for_approximate_matching).
#
# An *n-gram* is a n-tuple of characters which follow one another in a given string.
# For example, the 2-grams of the string ‘clkhash’ are ‘ c’, ‘cl’, ‘lk’, ‘kh’, ‘ha’, ‘as’, ‘sh’, ‘h ‘. Note the white- space in the first and last token. They serve the purpose to a) indicate the beginning and end of a word, and b) gives every character in the input text a representation in two tokens.
#
# The number of *n-grams* in common defines a similiarity measure for comparing strings.
# The strings ‘clkhash’ and ‘clkhush’ have 6 out of 8 2-grams in common, whereas 'clkhash' and 'anonlink' have none out of 9 in common.
#
# A positional n-gram also encodes the position of the n-gram within the word. The positional 2-grams of ‘clkhash’ are ‘1 c’, ‘2 cl’, ‘3 lk’, ‘4 kh’, ‘5 ha’, ‘6 as’, ‘7 sh’, ‘8 h ‘. Positional n-grams can be useful for comparing words where the position of the characters are important, e.g., postcodes or phone numbers.
#
# *n-gram* comparison of strings is tolerant to spelling mistakes, as one wrong character will only affect *n* *n-grams*. Thus, the larger you choose ‘n’, the more the error propagates.
# ## Exact Comparison
#
# The exact comparison technique creates high similarity scores if inputs are identical, and low otherwise. This can be useful when comparing data like credit card numbers or email addresses. It is a good choice whenever data is either an exact match or has no similarity at all.
# The main advantage of the *Exact Comparison* technique is that it better separates the similarity scores of the matches from the non-matches (but cannot acount for errors).
#
# We will show this with the following experiment. First, we create a dataset consisting of random 6-digit numbers. Then we compare the dataset with itself, once encoded with the *Exact Comparison*, and twice encoded with the *Ngram Comparison* (uni- and bi-grams) technique.
data = [[i, x] for i, x in enumerate(random.sample(range(1000000), k=1000))]
a_csv = io.StringIO()
csv.writer(a_csv).writerows(data)
# We define three different schemas, one for each comparison technique.
# +
unigram_fields = [
Ignore('rec_id'),
IntegerSpec('random', FieldHashingProperties(comparator=NgramComparison(1, True), strategy=BitsPerFeatureStrategy(300))),
]
unigram_schema = Schema(unigram_fields, 512)
bigram_fields = [
Ignore('rec_id'),
IntegerSpec('random', FieldHashingProperties(comparator=NgramComparison(2, True), strategy=BitsPerFeatureStrategy(300))),
]
bigram_schema = Schema(bigram_fields, 512)
exact_fields = [
Ignore('rec_id'),
IntegerSpec('random', FieldHashingProperties(comparator=ExactComparison(), strategy=BitsPerFeatureStrategy(300))),
]
exact_schema = Schema(exact_fields, 512)
secret_key = 'password1234'
# +
from bitarray import bitarray
import base64
import anonlink
def grouped_sim_scores_from_clks(clks_a, clks_b, threshold):
"""returns the pairwise similarity scores for the provided clks, grouped into matches and non-matches"""
results_candidate_pairs = anonlink.candidate_generation.find_candidate_pairs(
[clks_a, clks_b],
anonlink.similarities.dice_coefficient,
threshold
)
matches = []
non_matches = []
sims, ds_is, (rec_id0, rec_id1) = results_candidate_pairs
for sim, rec_i0, rec_i1 in zip(sims, rec_id0, rec_id1):
if rec_i0 == rec_i1:
matches.append(sim)
else:
non_matches.append(sim)
return matches, non_matches
# -
# generate the CLKs according to the three different schemas.
a_csv.seek(0)
clks_a_unigram = generate_clk_from_csv(a_csv, secret_key, unigram_schema, header=False)
a_csv.seek(0)
clks_a_bigram = generate_clk_from_csv(a_csv, secret_key, bigram_schema, header=False)
a_csv.seek(0)
clks_a_exact = generate_clk_from_csv(a_csv, secret_key, exact_schema, header=False)
# We do an exhaustive pairwise comparison for the CLKs and group the similarity scores into 'matches' - the similarity scores for the correct linkage - and non-matches.
sims_matches_unigram, sims_non_matches_unigram = grouped_sim_scores_from_clks(clks_a_unigram, clks_a_unigram, 0.0)
sims_matches_bigram, sims_non_matches_bigram = grouped_sim_scores_from_clks(clks_a_bigram, clks_a_bigram, 0.0)
sims_matches_exact, sims_non_matches_exact = grouped_sim_scores_from_clks(clks_a_exact, clks_a_exact, 0.0)
# We will plot the similarity scores as histograms. Note the log scale of the y-axis.
# +
# NBVAL_IGNORE_OUTPUT
import matplotlib.pyplot as plt
plt.style.use('seaborn-deep')
plt.hist([sims_matches_unigram, sims_non_matches_unigram], bins=50, label=['matches', 'non-matches'])
plt.legend(loc='upper right')
plt.yscale('log', nonposy='clip')
plt.xlabel('similarity score')
plt.title('uni-gram comparison')
plt.show()
plt.hist([sims_matches_bigram, sims_non_matches_bigram], bins=50, label=['matches', 'non-matches'])
plt.legend(loc='upper right')
plt.yscale('log', nonposy='clip')
plt.xlabel('similarity score')
plt.title('bi-gram comparison')
plt.show()
plt.hist([sims_matches_exact, sims_non_matches_exact], bins=50, label=['matches', 'non-matches'])
plt.legend(loc='upper right')
plt.yscale('log', nonposy='clip')
plt.xlabel('similarity score')
plt.title('exact comparison')
plt.show()
# -
# The true matches all lie on the vertical line above the 1.0. We can see that the *Exact Comparison* technique significantly widens the gap between matches and non-matches.
# Thus increases the range of available solving thresholds (only similarity scores above are considered a potential match) which provide the correct linkage result.
# ## Numeric Comparison
# This technique enables numerical comparisons of integers and floating point numbers.
#
# Comparing numbers creates an interesting challenge. The comparison of 1000 with 1001 should lead to the same result as the comparison of 1000 and 999. They are both exactly 1 apart. However, string-based techniques like n-gram comparison will produce very different results, as the first pair has three digits in common, compared to none in the last pair.
#
# We have implemented a technique, where the numerical distance between two numbers relates to the similarity of the produced tokens.
#
# We generate a dataset with one column of random 6-digit integers, and a second dataset where we alter the integers of the first dataset by +/- 100.
data_A = [[i, random.randrange(1000000)] for i in range(1000)]
data_B = [[i, x + random.randint(-100,100)] for i,x in data_A]
a_csv = io.StringIO()
b_csv = io.StringIO()
csv.writer(a_csv).writerows(data_A)
csv.writer(b_csv).writerows(data_B)
# We define two linkage schemas, one for postitional uni-gram comparison and one for numeric comparison.
#
# The parameter *resolution* controls how many different token are generated. Clkhash will produce *2 * resolution + 1* tokens (*resolution* tokens on either side of the input value plus the input value iteself).\
# And *threshold_distance* controls the sensitivity of the comparison. Only numbers that are not more than *threshold_distance* apart will produce overlapping tokens.
# +
unigram_fields = [
Ignore('rec_id'),
IntegerSpec('random',
FieldHashingProperties(comparator=NgramComparison(1, True),
strategy=BitsPerFeatureStrategy(301))),
]
unigram_schema = Schema(unigram_fields, 512)
bigram_fields = [
Ignore('rec_id'),
IntegerSpec('random',
FieldHashingProperties(comparator=NgramComparison(2, True),
strategy=BitsPerFeatureStrategy(301))),
]
bigram_schema = Schema(unigram_fields, 512)
numeric_fields = [
Ignore('rec_id'),
IntegerSpec('random',
FieldHashingProperties(comparator=NumericComparison(threshold_distance=500, resolution=150),
strategy=BitsPerFeatureStrategy(301))),
]
numeric_schema = Schema(numeric_fields, 512)
secret_key = 'password1234'
# -
a_csv.seek(0)
clks_a_unigram = generate_clk_from_csv(a_csv, secret_key, unigram_schema, header=False)
b_csv.seek(0)
clks_b_unigram = generate_clk_from_csv(b_csv, secret_key, unigram_schema, header=False)
a_csv.seek(0)
clks_a_bigram = generate_clk_from_csv(a_csv, secret_key, bigram_schema, header=False)
b_csv.seek(0)
clks_b_bigram = generate_clk_from_csv(b_csv, secret_key, bigram_schema, header=False)
a_csv.seek(0)
clks_a_numeric = generate_clk_from_csv(a_csv, secret_key, numeric_schema, header=False)
b_csv.seek(0)
clks_b_numeric = generate_clk_from_csv(b_csv, secret_key, numeric_schema, header=False)
# First, we will look at the similarity score distributions. We will group the similiarity scores into *matches* - the similarity scores for the correct linkage - and *non-matches*.
sims_matches_unigram, sims_non_matches_unigram = grouped_sim_scores_from_clks(clks_a_unigram, clks_b_unigram, 0.0)
sims_matches_bigram, sims_non_matches_bigram = grouped_sim_scores_from_clks(clks_a_bigram, clks_b_bigram, 0.0)
sims_matches_numeric, sims_non_matches_numeric = grouped_sim_scores_from_clks(clks_a_numeric, clks_b_numeric, 0.0)
# +
# NBVAL_IGNORE_OUTPUT
plt.style.use('seaborn-deep')
plt.hist([sims_matches_unigram, sims_non_matches_unigram], bins=50, label=['matches', 'non-matches'])
plt.legend(loc='upper right')
plt.yscale('log', nonposy='clip')
plt.xlabel('similarity score')
plt.title('uni-gram comparison')
plt.show()
plt.hist([sims_matches_bigram, sims_non_matches_bigram], bins=50, label=['matches', 'non-matches'])
plt.legend(loc='upper right')
plt.yscale('log', nonposy='clip')
plt.xlabel('similarity score')
plt.title('bi-gram comparison')
plt.show()
plt.hist([sims_matches_numeric, sims_non_matches_numeric], bins=50, label=['matches', 'non-matches'])
plt.legend(loc='upper right')
plt.yscale('log', nonposy='clip')
plt.xlabel('similarity score')
plt.title('numeric comparison')
plt.show()
# -
# The distribution for the numeric comparison is very different to the uni/bi-gram one. The similarity scores of the matches (the correct linkage) in the n-gram case are mixed-in with the scores of the non-matches, making it challenging for a solver to decide if a similarity score denotes a match or a non-match.
#
# The numeric comparison produces similarity scores for matches that mirrors the distribution of the numeric distances. More importanty, there is a good separation between the scores for the matches and the ones for the non-matches. The former are all above 0.8, whereas the latter are almost all (note the log scale) below 0.6.
#
# In the next step, we will see how well the solver can find a linkage solution for the different CLKs.
# +
def mapping_from_clks(clks_a, clks_b, threshold):
"""computes a mapping between clks_a and clks_b using the anonlink library"""
results_candidate_pairs = anonlink.candidate_generation.find_candidate_pairs(
[clks_a, clks_b],
anonlink.similarities.dice_coefficient,
threshold
)
solution = anonlink.solving.greedy_solve(results_candidate_pairs)
return set( (a,b) for ((_, a),(_, b)) in solution)
true_matches = set((i,i) for i in range(1000))
def describe_matching_quality(found_matches):
"""computes and prints precision and recall of the found_matches"""
tp = len(true_matches & found_matches)
fp = len(found_matches - true_matches)
fn = len(true_matches - found_matches)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
print('Precision: {:.3f}, Recall: {:.3f}'.format(precision, recall))
# -
print('results for numeric comparisons')
print('threshold 0.6:')
describe_matching_quality(mapping_from_clks(clks_a_numeric, clks_b_numeric, 0.6))
print('threshold 0.7:')
describe_matching_quality(mapping_from_clks(clks_a_numeric, clks_b_numeric, 0.7))
print('threshold 0.8:')
describe_matching_quality(mapping_from_clks(clks_a_numeric, clks_b_numeric, 0.8))
print('results for unigram comparisons')
print('threshold 0.6:')
describe_matching_quality(mapping_from_clks(clks_a_unigram, clks_b_unigram, 0.6))
print('threshold 0.7:')
describe_matching_quality(mapping_from_clks(clks_a_unigram, clks_b_unigram, 0.7))
print('threshold 0.8:')
describe_matching_quality(mapping_from_clks(clks_a_unigram, clks_b_unigram, 0.8))
# As expected, we can see that the solver does a lot better when given the CLKs generated with the numeric comparison technique.
#
# The other thing that stands out is that the results in with the numeric comparison are stable over a wider range of thresholds, in contrast to the unigram comparison, where different thresholds produce different results, thus making it more challenging to find a good threshold.
#
# ### Conclusions
#
# The overall quality of the linkage result is heavily influence by the right choice of comparison technique for each individual feature.
# In summary:
# - *n-gram comparison* is best suited for fuzzy string matching. It can account for localised errors like spelling mistakes.
# - *exact comparison* produces high similiarity only for exact matches, low otherwise. This can be useful if the data is noise-free and partial similarities are not relevant. For instance credit card numbers, even if they only differ in one digit they discribe different accounts and are thus just as different then numbers which don't have any digits in common.
# - *numeric comparison* provides a measure of similiarity that relates to the numerical distance of two numbers. Example use-cases are measurements like height or weight, continuous variables like salary.
# + pycharm={"name": "#%%\n"}
| docs/tutorial_comparisons.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mandelbrot set visualization
#
# A vanilla python/numpy implementation of a Mandelbrot set visualizer.
#
# The code has been liberally inspired by [this implementation from Jean-François Puget](https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift?lang=en).
#
# Follow the [above link](https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift?lang=en) also for more information on the Maldebrot set and the colorization procedure.
# +
import warnings
import numpy as np
from matplotlib import colors, patches, pyplot as plot
# %matplotlib inline
# -
# Let's define a procedure to sample a region of the complex plane:
# +
pois = { # points of interest
'full': (-2.0, 0.5, -1.25, 1.25),
'valley': (-0.8, -0.7, 0.0, 0.1),
'sea_horses': (-0.755, -0.745, 0.06, 0.07),
'sea_horse': (-0.75, -0.747, 0.063, 0.066),
'sea_horse_tail': (-0.749, -0.748, 0.065, 0.066),
'black_dot': (-0.74877,-0.74872,0.06505,0.06510)}
def complex_grid(xmin, xmax, ymin, ymax, res=1024):
xx = np.linspace(xmin, xmax, num=res)
yy = np.linspace(ymin, ymax, num=res)
x, y = np.meshgrid(xx, yy)
return x + 1j * y
# -
# Here's the most crude, synthetic and straighforward implementation:
# +
def mandelbrot_boolean(coords, max_iterations=256):
values = coords.copy()
for _ in range(max_iterations):
values = values ** 2 + coords
return (abs(values) < 2).astype(np.float)
def show(mandelbrot_function, name):
plot.figure(figsize=(14, 14))
image = mandelbrot_function(complex_grid(*pois[name]))
plot.imshow(image, cmap='gnuplot2', norm=colors.PowerNorm(0.3))
plot.xticks(np.linspace(0, image.shape[0], num=9), np.round(np.linspace(*pois[name][:2], num=9), 4))
plot.yticks(np.linspace(0, image.shape[1], num=9), np.round(np.linspace(*pois[name][2:], num=9), 4))
plot.show()
warnings.filterwarnings('ignore') # ignore warnings about overflows
show(mandelbrot_boolean, 'full')
warnings.filterwarnings('default')
# -
# Let's keep track and visualize the number of iterations required to diverge:
# +
def mandelbrot_discrete(coords, max_iterations=256):
values = coords.copy()
iteration_count = np.zeros_like(values, np.float)
for iteration in range(max_iterations):
nya = iteration_count == 0 # not yet assigned
values[nya] = values[nya] ** 2 + coords[nya]
abs_values = np.abs(values)
jd = np.logical_and(nya, abs_values > 2) # just diverged
iteration_count[jd] = iteration
return iteration_count
show(mandelbrot_discrete, 'full')
# -
# To avoid banding effects, we take into account not just the iteration number but the value as well.
#
# See [this implementation by <NAME>](https://www.ibm.com/developerworks/community/blogs/jfp/entry/My_Christmas_Gift?lang=en) for an explanation of the technique.
# +
def mandelbrot_continuous(coords, max_iterations=256):
horizon = 2.0 ** 40
log_horizon = np.log(np.log(horizon)) / np.log(2)
values = coords.copy()
iteration_count = np.zeros_like(values, np.float)
for iteration in range(max_iterations):
nya = iteration_count == 0 # not yet assigned
values[nya] = values[nya] ** 2 + coords[nya]
abs_values = np.abs(values)
jd = np.logical_and(nya, abs_values > horizon) # just diverged
iteration_count[jd] = iteration - np.log(np.log(abs_values[jd])) / np.log(2) + log_horizon
return iteration_count
show(mandelbrot_continuous, 'full')
# -
# More pretty pictures:
# +
fig = plot.figure(figsize=(14, 9))
names = ('full', 'valley', 'sea_horses', 'sea_horse', 'sea_horse_tail', 'black_dot')
for index, (name, next_name) in enumerate(zip(names, names[1:] + (None, ))):
ax = plot.subplot(231 + index)
image = mandelbrot_continuous(complex_grid(*pois[name], res=512), 2048)
ax.imshow(image, cmap='gnuplot2', norm=colors.PowerNorm(0.3))
plot.xticks(np.linspace(0, image.shape[0], num=5), np.round(np.linspace(*pois[name][:2], num=5), 3))
plot.yticks(np.linspace(0, image.shape[1], num=5), np.round(np.linspace(*pois[name][2:], num=5), 3))
plot.title(name.replace('_', ' ').capitalize(), fontsize=15)
if next_name is not None:
ox, xmax, oy, ymax = pois[name]
w, h = xmax - ox, ymax - oy
xmin, xmax, ymin, ymax = pois[next_name]
x0, y0 = (xmin - ox) / w * image.shape[0], (ymin - oy) / h * image.shape[1]
x1, y1 = (xmax - ox) / w * image.shape[0], (ymax - oy) / h * image.shape[1]
ax.add_patch(patches.Rectangle((x0, y0), x1 - x0, y1 - y0, edgecolor='g', linewidth=3, facecolor='none'))
plot.tight_layout()
plot.show()
| Mandelbrot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# HIDDEN
from datascience import *
from prob140 import *
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# %matplotlib inline
import math
from scipy import stats
from sympy import *
init_printing()
# ### Probabilities and Expectations ###
# A function $f$ on the plane is called a *joint density* if:
# - $f(x, y) \ge 0$ for all $x$, $y$
# - $\int_x \int_y f(x, y)dydx = 1$
#
# If you think of $f$ as a surface, then the first condition says that the surface is on or above the plane. The second condition says that the total volume under the surface is 1.
#
# Think of probabilities as volumes under the surface, and define $f$ to be the *joint density of random variables $X$ and $Y$* if
#
# $$
# P((X, Y) \in A) ~ = ~ \mathop{\int \int}_A f(x,y)dydx ~~~~~ \text{for all } A
# $$
#
# That is, the chance that the random point $(X, Y)$ falls in the region $A$ is the volume under the joint density surface over the region $A$.
#
# This is a two-dimensional analog of the fact that in probabilities involving a single random variable can be thought of as areas under the density curve.
# ### Infinitesimals ###
# Also analogous is the interpretation of the joint density as an element in the calculation of the probability of an infinitesimal region.
# 
# The infinitesimal region is a tiny rectangle in the plane just around the point $(x, y)$. Its width is $dx$ and its length is $dy$. The corresponding volume is that of a rectangular box whose base is the tiny rectangle and whose height is $f(x, y)$.
#
# Thus for all $x$ and $y$,
# $$
# P(X \in dx, Y \in dy) ~ \sim ~ f(x, y)dxdy
# $$
#
# and the joint density measures *probability per unit area*:
# $$
# f(x, y) ~ \sim ~ \frac{P(X \in dx, Y \in dy)}{dxdy}
# $$
# An example will help us visualize all this. Let $f$ be defined as follows:
#
# $$
# f(x, y) ~ = ~
# \begin{cases}
# 120x(y-x)(1-y), ~~~ 0 < x < y < 1 \\
# 0 ~~~~~~~~ \text{otherwise}
# \end{cases}
# $$
#
# For now, just assume that this is a joint density, that is, it integrates to 1. Let's first take a look at what the surface looks like.
# ### Plotting the Surface ###
# To do this, we will use a 3-dimensional plotting routine. First, we define the joint density function. For use in our plotting routine, this function must take $x$ and $y$ as its inputs and return the value $f(x, y)$ as defined above.
def joint(x,y):
if y < x:
return 0
else:
return 120 * x * (y-x) * (1-y)
# Then we call `Plot_3d` to plot the surface. The arguments are the limits on the $x$ and $y$ axes, the name of the function to be plotted, and two optional arguments `rstride` and `cstride` that determine how many grid lines to use (larger numbers correspond to less frequent grid lines).
Plot_3d(x_limits=(0,1), y_limits=(0,1), f=joint, cstride=4, rstride=4)
# You can see that the surface has level 0 in the lower right hand triangle. In fact, the possible values of $(X, Y)$ are as shown below. For calculations, we will frequently draw just the possible values and not the surface.
# HIDDEN
plt.plot([0, 0], [0, 1], color='k', lw=2)
plt.plot([0, 1], [0, 1], color='k', lw=2)
plt.plot([0, 1], [1, 1], color='k', lw=2)
xx = np.arange(0, 1.11, 0.1)
yy = np.ones(len(xx))
plt.fill_between(xx, xx, yy, alpha=0.3)
plt.xlim(-0.05, 1)
plt.ylim(0, 1.05)
plt.axes().set_aspect('equal')
plt.xticks(np.arange(0, 1.1, 0.25))
plt.yticks(np.arange(0, 1.1, 0.25))
plt.xlabel('$x$')
plt.ylabel('$y$', rotation=0)
plt.title('Possible Values of $(X, Y)$');
# ### The Total Volume Under the Surface ###
# First, it's a good idea to check that the total probability under the surface is equal to 1.
# The function $f$ looks like a bit of a mess but it is easy to see that it is non-negative. Let's use `SymPy` to see that it integrates to 1. Done by hand, the integration is routine but tedious.
#
# We will first declare the two variables to have values in the unit interval, and assign the function to the name `f`. This specification doesn't say that $x < y$, but we will enforce that condition when we integrate.
declare('x', interval=(0, 1))
declare('y', interval=(0, 1))
f = 120*x*(y-x)*(1-y)
# To set up the double integral over the entire region of possible values, notice that $x$ goes from 0 to 1, and for each fixed value of $x$, the value of $y$ goes from $x$ to 1.
#
# We will fix $x$ and first integrate with respect to $y$. Then we will integrate $x$. The double integral requires a call to `Integral` that specifies the inner integral first and then the outer. The call says:
# - The function being integrated is $f$.
# - The inner integral is over the variable $y$ which goes from $x$ to 1.
# - The outer integral is over the variable $x$ which goes from 0 to 1.
Integral(f, (y, x, 1), (x, 0, 1))
# To evaluate the integral, use `doit()`:
Integral(f, (y, x, 1), (x, 0, 1)).doit()
# ### Probabilities as Volumes ###
# Probabilities are volumes under the joint density surface; in other words, they are double integrals of the function $f$. For each probability, we have to first identify the region of integration, which we will do by geometry and by inspecting the event. Once we have set up the integral, we have to calculate its value, which we will do by `SymPy`.
# #### Example 1. ####
# Suppose you want to find $P(Y > 4X)$. The event is the blue region in the graph below.
# HIDDEN
plt.plot([0, 0], [0, 1], color='k', lw=2)
plt.plot([0, 1], [0, 1], color='k', lw=2)
plt.plot([0, 1], [1, 1], color='k', lw=2)
xx = np.arange(0, 0.251, 0.05)
yy = np.ones(len(xx))
plt.fill_between(xx, 4*xx, yy, alpha=0.3)
plt.xlim(-0.05, 1)
plt.ylim(0, 1.05)
plt.axes().set_aspect('equal')
plt.xticks(np.arange(0, 1.1, 0.25))
plt.yticks(np.arange(0, 1.1, 0.25))
plt.xlabel('$x$')
plt.ylabel('$y$', rotation=0)
plt.title('$Y > 4X$');
# The volume under the density surface over this region is given by an integral specified analogously to the previous one: first the inner integral and then the outer.
Integral(f, (y, 4*x, 1), (x, 0, 0.25))
Integral(f, (y, 4*x, 1), (x, 0, 0.25)).doit()
# #### Example 2. ####
# Suppose you want to find $P(X > 0.25, Y > 0.5)$. The event is the colored region below.
# HIDDEN
plt.plot([0, 0], [0, 1], color='k', lw=2)
plt.plot([0, 1], [0, 1], color='k', lw=2)
plt.plot([0, 1], [1, 1], color='k', lw=2)
xx = np.arange(0.25, .52, 0.05)
yy1 = 0.5*np.ones(len(xx))
yy2 = np.ones(len(xx))
plt.fill_between(xx, yy1, yy2, alpha=0.3)
xx = np.arange(0.5, 1.1, 0.1)
yy1 = 0.5*np.ones(len(xx))
yy2 = np.ones(len(xx))
plt.fill_between(xx, xx, yy2, alpha=0.3)
plt.xlim(-0.05, 1)
plt.ylim(0, 1.05)
plt.axes().set_aspect('equal')
plt.xticks(np.arange(0, 1.1, 0.25))
plt.yticks(np.arange(0, 1.1, 0.25))
plt.xlabel('$x$')
plt.ylabel('$y$', rotation=0)
plt.title('$X > 0.25, Y > 0.5$');
# Now $P(X > 0.25, Y > 0.5)$ is the integral of the joint density function over this region. Notice that for each fixed value of $y > 0.5$, the value of $x$ in this event goes from $0.25$ to $y$. So let's integrate $x$ first and then $y$.
Integral(f, (x, 0.25, y), (y, 0.5, 1))
Integral(f, (x, 0.25, y), (y, 0.5, 1)).doit()
# ### Expectation ###
# Let $g$ be a function on the plane. Then
# $$
# E(g(X, Y)) ~ = ~ \int_y \int_x g(x, y)f(x, y)dxdy
# $$
# provided the integral exists, in which case it can be carried out in either order ($x$ first, then $y$, or the other way around).
#
# This is the non-linear function rule for expectation, applied to two random variables with a joint density.
#
# As an example, let's find $E(\frac{Y}{X})$ for $X$ and $Y$ with the joint density $f$ given in the examples above.
#
# Here $g(x, y) = \frac{y}{x}$, and
#
# \begin{align*}
# E\big{(}\frac{Y}{X}\big{)} &= \int_y \int_x g(x, y)f(x, y)dxdy \\ \\
# &= \int_0^1 \int_x^1 \frac{y}{x} 120x(y-x)(1-y)dy dx \\ \\
# &= \int_0^1 \int_x^1 120y(y-x)(1-y)dy dx
# \end{align*}
#
# Now let's use `SymPy`. Remember that `x` and `y` have already been defined as symbolic variables with values in the unit interval.
ev_y_over_x = Integral(120*y*(y-x)*(1-y), (y, x, 1), (x, 0, 1))
ev_y_over_x
ev_y_over_x.doit()
# So for this pair of random variables $X$ and $Y$, we have
# $$
# E\big{(}\frac{Y}{X}\big{)} = 3
# $$
| miscellaneous_notebooks/Joint_Densities/Probabilities_and_Expectations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dynamic Programing
#
# In this notebook, we will develop a dynamic programming solution to a grid world problem. By this I mean a function, called a policy:
#
# $$ \pi : S \times A \to [0,1] $$
#
# that gives for each state the probability of choosing an action. A policy is optimal if by following this policy(i.e. choosing action $a$ in state $s$ with probability $ \pi(s,a)$) we maximize the expected reward over an episode(here we can think of an episode starting at a random position and ending whenever we reach the terminal state).
#
# For this notebook I will consider a deterministic setting; all actions always have the same effect in any state. We will make free use of the information about the world to construct a model of the world's actions, which is required for dynamic programming. In this sense this not a reinforcement learning approach, as we will not have an agent interacting with the world to discover its behavior.
#
# Under these assumptions, the problem is pretty straightforward, and we will get an optimal solution by using an generalized policy iteration algorithm(GPI).
# +
import sys
sys.path.append('../..')
import pandas as pd
import numpy as np
from grid_world.grid_world import GridWorld
from grid_world.visualization.format_objects import get_police_rec_str, get_police_eval_str, get_world_str
from grid_world.utils.police import get_police_rec
from grid_world.action import Action
np.set_printoptions(precision=2)
# -
# # Our World
gworld = GridWorld(
grid_shape=(5,6),
terminal_states=((0,5),),
walls=((0,1), (1,1), (2,3), (3,3)),
traps=((1,3),)
)
print(get_world_str(gworld))
# This is the world we will be considering, our goal is to reach the termial state as fast as possible, avoiding the trap. If this looks strange to you please refer to the readme file for more details.
# # World Modeling
# In order to solve this problem with dynamic programming we will need a model of the world and a reward function. Mathematicaly these are the functions we need:
#
# $$ M_w: S \times A \to \mathbb{P}(S) $$
# $$ R_w: S \times A \to \mathbb{R} $$
#
# where $M_w$ gives for each pair state action $(s,a)$ a probability distribution over the states $S$, these indicate the probabilitie of moving to this new state when taking action $a$ in state $s$. This means that $M_w(s,a): S \to [0, 1]$ is also a function and $M_w(s,a)(s_0)$ is the probability of getting to $s_0$ when taking action $a$ in state $s$. Since we are in a determinitisct setting these values will be either 0 or 1.
#
# On the hand, $R_w(s,a)$ is the reward we get for taking action $a$ in state $s$. This is something we need to choose by ourselves. Since we want to reach the terminal state as soon as possible we will add a negative reward whenever we take an action outside it, for flavor we will also a big negative reward for being inside the trap. There is no reward for being in the terminal state since this should indicate the end of an episode(this is necessary for our implementation).
# +
# lets make some restrictions on the available actions
actions = [Action.up, Action.down, Action.left, Action.right]
def r(effect):
if effect == -1:
return -100
elif effect == 1:
return 0
else:
return -1
rewards_dict = {(s, a): r(gworld.take_action(s, a)[1]) for s in gworld.states for a in actions}
rewards = lambda x, y: rewards_dict[(x, y)]
rewards((0,0), Action.up)
# +
def world_model(s, a, world = gworld):
final_state = world.take_action(s, a)[0]
return (lambda x: 1 if x == final_state else 0)
world_model((0,0), Action.up)((1,0))
# -
# # Policy evaluation
# Alright, the first step to implementing a dynamic programming solution is doing policy evaluation, this means that given a policy $\pi$ we want to calculate a function, called the value function of $\pi$:
#
# $$ V_{\pi}: S \to \mathbb{R} $$
#
# that gives for each state $s$ the expected reward we will get until we reach a terminal state, when following our policy(usualy discounted by a $\gamma$ factor which we will set to 1).
#
# There are many ways to calculate this, we will use an iterative approach that can be generalized to the reinforcement learning methods we want to explore. The idea is to start with a random value function $V$ and improve each state estimate like this:
#
# $$ V(s) \leftarrow \sum_{a \in A}\pi(s,a)\sum_{s_0 \in S}M_w(s,a)(s_0)(R(s,a) + \gamma V(s_0))$$
#
# This is essentialy bootstrapping, for each state we of observe the reward for taking action $a$ and the estimated value of our new state, adding these gives an estimate of the value of this action in this state, so we average everything with the respective probability.
#
# Notice that, since $V(s_0)$ is expected to be wrong this new estimate can also be wrong, however now it incorporates information about the actual rewards, and by iterating this method $V$ actually converges to $V_\pi$.
#
# When implementing this we will make $V$ a hashmap(a dictionary) since it is much more practical to update hash values then it is to change functions.
# +
def _acc_V(s, V, pi, world_model, reward_function, states, gamma):
return np.sum([
pi(s, a)*np.sum([
world_model(s, a)(s0)*(reward_function(s, a) + gamma*V[s0]) for s0 in states
]) for a in actions
])
def _iterate_policy_step(pi, world_model, reward_function, actions, states, V_0, gamma):
V = V_0.copy()
for s in states:
V_0[s] = _acc_V(s, V, pi, world_model, reward_function, states, gamma)
return np.amax(np.abs([V_0[x] - V[x] for x in V_0]))
def iterative_policy_evalution(
pi, world_model, reward_function, actions, states, V_0, epsilon = 0.01, gamma = 1
):
delta = 2*epsilon
while delta > epsilon:
delta = _iterate_policy_step(pi, world_model, reward_function, actions, states, V_0, gamma)
return V_0
# -
# Lets test this on a random policy.
# +
def pi(s, a):
return 0.25
V_0 = {x: 0 for x in gworld.states}
V_pi = iterative_policy_evalution(pi, world_model, rewards, actions, gworld.states, V_0)
print(get_police_eval_str(V_pi))
# -
# It's pretty hard to tell whether this is right or not, but it will get clearer latter.
# # Police improvement
# The other corner stone of GPI is to improve the policy, this is pretty obvious, once we have values for each state we simply make a policy that will send us to the state with better value; this is called a greedy policy with repect to $V$.
# +
def q(s, a, V, world_model, reward_function, states):
return reward_function(s, a) + np.sum([
world_model(s, a)(s0)*V[s0] for s0 in states
])
# TODO: function needs improvement
def _argmax_q(s, V, world_model, reward_function, actions, states):
best_score = q(s, actions[0], V, world_model, reward_function, states)
best_action = actions[0]
for a in actions:
qa = q(s, a, V, world_model, reward_function, states)
if qa > best_score:
best_score = qa
best_action = a
return best_action
def get_greedy_policy(V, world_model, reward_function, actions, states):
gpr = {s:_argmax_q(s, V, world_model, reward_function, actions, states) for s in states}
return (lambda s, a: 1 if (a == gpr[s]) else 0)
# -
pi_1 = get_greedy_policy(V_pi, world_model, rewards, actions, gworld.states)
pr = get_police_rec(pi_1, gworld, actions)
print(get_police_rec_str(pr, gworld))
# # GPI - Iterating till convergence
#
# Notice that since we change the policy to say $\pi'$, $V$ may not be a good estimate of $V_{\pi'}$, so $\pi'$ is not necessarily greedy with respect to $V_{\pi'}$. So the idea is to repeat the process until we get a policy that is greedy with respect to its value function; it is not hard to see that this is an optimal policy(check Sutton and Barto if you need).
#
# I will use changes in the value function as the stop criteria, since it is a little easier to check. This also guarantess that the policy is optimal.
# +
max_epochs = 20
def float_dict_compare(d0, d1):
return np.all([np.isclose(d0[x], d1[x]) for x in d0])
def dpi_step(V_pi, world_model, reward_function, actions, states):
pi_1 = get_greedy_policy(V_pi, world_model, reward_function, actions, states)
V_pi_1 = iterative_policy_evalution(pi_1, world_model, reward_function, actions, states, V_pi)
return pi_1, V_pi_1
def pi(s, a):
return 0.25
V_0 = {x: 0 for x in gworld.states}
V_pi = iterative_policy_evalution(pi, world_model, rewards, actions, gworld.states, V_0)
for i in range(max_epochs):
V_pi_0 = V_pi.copy()
pi, V_pi = dpi_step(V_pi, world_model, rewards, actions, gworld.states)
if float_dict_compare(V_pi, V_pi_0):
print('police convergerd')
break
print(f'epoch: {i}')
pr = get_police_rec(pi, gworld, actions)
print(get_police_rec_str(pr, gworld))
# -
# Notice how the police starts by avoiding the shorter path, which passes near the trap. This happens because the initial random policy has a chance of walking to the trap, and as a consequence the values of states near the trap start much lower then their values under the optimal policy. As we iterate these values get adjusted to the improved policies, and the improvements sorta propagates back to other states.
| notebooks/dynamic_programing/determinitisc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5 with Spark
# language: python3
# name: python3
# ---
# +
### TODO Please provide your Cloudant credentials in this cell
#Please don't modify this function
def readDataFrameFromCloudant(database):
cloudantdata=spark.read.load(database, "com.cloudant.spark")
cloudantdata.createOrReplaceTempView("washing")
spark.sql("SELECT * from washing").show()
return cloudantdata
spark = SparkSession\
.builder\
.appName("Cloudant Spark SQL Example in Python using temp tables")\
.config("cloudant.host",'XXXXX')\
.config("cloudant.username", 'XXXXX')\
.config("cloudant.password",'<PASSWORD>')\
.config("jsonstore.rdd.partitions", 1)\
.getOrCreate()
# -
df=readDataFrameFromCloudant('training')
# Enable SQL on the data frame
df.createOrReplaceTempView('df')
df_class_0 = spark.sql('select time, temp, humidity, class from df where class = 0')
df_class_1 = spark.sql('select time, temp, humidity, class from df where class = 1')
df_class_0.createOrReplaceTempView('df_class_0')
df_class_1.createOrReplaceTempView('df_class_1')
df_class_1.select('temp', 'humidity').distinct().show()
df_class_0.select('temp', 'humidity').distinct().show()
df.printSchema()
spark.sql('select class, count(class) from df group by class').show()
import pixiedust
# + pixiedust={"displayParams": {"handlerId": "tableView"}}
display(df_class_0)
# -
# Imports for modelling
from pyspark.ml.feature import StringIndexer, OneHotEncoder
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import Normalizer
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# create binary classifier model
vectorAssembler = VectorAssembler(inputCols=["humidity","temp"],
outputCol="features")
lr = LogisticRegression(maxIter=1000).setLabelCol("class")
pipeline = Pipeline(stages=[vectorAssembler, lr ])
model = pipeline.fit(df)
result = model.transform(df)
model.stages[1].coefficients
model.stages[1].intercept
# + pixiedust={"displayParams": {"handlerId": "tableView"}}
#evaluate classification accuracy (1.0 = 100% accurate)
binEval = MulticlassClassificationEvaluator().setMetricName("accuracy").setPredictionCol("prediction").setLabelCol("class")
binEval.evaluate(result)
# -
# test the model
#re-read data from cloudant
new_df = readDataFrameFromCloudant('training')
result = model.transform(new_df)
result.createOrReplaceTempView('result')
spark.sql("select humidity, temp, class, prediction from result").show(50)
| en/part4/notebooks/IoT Sensor Analytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Meta-FaSTrack
# Meta-planning + Fast and Safe Tracking (FaSTrack): effectively blending fast planning methods with slower, reachability-based safety guarantees for online safe trajectory planning. Please refer to our ICRA 2018 [paper](https://arxiv.org/abs/1710.04731) for technical details.
#
# **NOTE:** This repository is going under a major refactor to ensure efficiency and continuing reliability. In the meantime, please check out a much more stable version of the original FaSTrack idea [here](https://github.com/HJReachability/fastrack/).
#
# ## Repository organization
# All code in this repository is written in the Robot Operating System (ROS) framework, and as such is broken up into atomic packages that implement specific functionality. The `ros/` directory is the root workspace, and individual packages live inside the `ros/src/` directory.
#
# ## Usage
# First, make sure you have ROS installed on your system. The project was developed in Jade, but it should be compatible with anything past Hydro. Please let us know if you have any compatibility issues.
#
# `Meta-FaSTrack` currently depends upon the [crazyflie_clean](https://github.com/dfridovi/crazyflie_clean) repository, which contains drivers and utilities for the HSL's Crazyflie 2.0 testbed. We intend to remove this build dependency in the future so that `Tracking` can be used more easily in other contexts. This will be part of a larger code reorganization/refactor.
#
# Other dependencies:
# * [Gtest](https://github.com/google/googletest) -- Google's C++ unit testing library
# * [Eigen](https://eigen.tuxfamily.org) -- a header-only linear algebra library for C++
# * [OMPL](http://ompl.kavrakilab.org) -- an open C++ library for motion planning (recommend v1.2.1 to avoid g++5 dependency)
# * [MATIO](https://github.com/tbeu/matio) -- an open C library for MATLAB MAT file I/O
# * [FLANN](http://www.cs.ubc.ca/research/flann/) -- an open source library for fast (approximate) nearest neighbors
#
# You must begin by building and sourcing the `crazyflie_clean` repository. Instructions may be found in that project's README. To build `Meta-FaSTrack`, open a terminal window and navigate to the `ros/` directory. Then run:
catkin_make
# Every time you open a new terminal, you'll have to tell ROS how to find this package. Do this by running the following command from the `ros/` directory:
source devel/setup.bash
# `Tracking` includes two demos, one software and one hardware. To run the hardware demo, you will need physical hardware access. For instructions on how to set that up, please contact us. The software demo may be launched as follows. Note that these commands must be run in different terminal windows.
roslaunch meta_planner rviz.launch
roslaunch meta_planner software_demo.launch
# To run unit tests, type:
catkin_make run_tests
# ## C++ reference materials
# We attempt to adhere to the philosophy put forward in the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). Our code is written _for the reader, not the writer_. We write comments liberally and use inheritance whenever it makes sense.
#
# A few tips, tricks, and customs that you'll find throughout our code:
# * Lines of code are no longer than 80 characters.
# * The names of all member variables of a class end with an underscore, e.g. `foo_`.
# * When iterating through a vector, we name the index something like `ii` instead of just `i`. This makes it super easy to find and replace the iterator later.
# * We use the `const` specifier whenever possible.
# * We try to include optional guard statements with meaningful debug messages wherever possible. These may be toggled on/off with the `ENABLE_DEBUG_MESSAGES` cmake option.
# * Whenever it makes sense, we write unit tests for self-contained functionality and integration tests for dependent functions and classes. These are stored in the `test/` directory.
| README.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="i8hWaEWxsbjf"
# ### NMT (Nueral Machine Translation)
#
# In these series of notebooks we are going to do create bidirectional NMT model for our application. We are going to use the following notebooks as reference to this notebook.
#
# 1. [17_Custom_Dataset_and_Translation.ipynb](https://github.com/CrispenGari/pytorch-python/blob/main/09_NLP/03_Sequence_To_Sequence/17_Custom_Dataset_and_Translation.ipynb)
# 2. [16_Data_Preparation_Translation_Dataset.ipynb](https://github.com/CrispenGari/pytorch-python/blob/main/09_NLP/03_Sequence_To_Sequence/16_Data_Preparation_Translation_Dataset.ipynb)
# 3. [07_Attention_is_all_you_need](https://github.com/CrispenGari/pytorch-python/blob/main/09_NLP/03_Sequence_To_Sequence/07_Attention_is_all_you_need.ipynb)
#
# I will be loading the data from my google drive.
# + colab={"base_uri": "https://localhost:8080/"} id="wEY0_2fdsbLZ" outputId="4b158574-7ab1-47a9-f1a1-fa39815f1cdb"
from google.colab import drive
from google.colab import files
drive.mount('/content/drive')
# + [markdown] id="P61k3sTWuGYt"
# ### Imports
# + id="-lNfuXwhsbFx"
import torch
from torch import nn
from torch.nn import functional as F
import spacy, math, random
import numpy as np
from torchtext.legacy import datasets, data
import time, os, json
from prettytable import PrettyTable
from matplotlib import pyplot as plt
# + id="-3nOk8n7sbCx"
SEED = 42
np.random.seed(SEED)
torch.manual_seed(SEED)
random.seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deteministic = True
# + id="sjqtcpj8uZsW" colab={"base_uri": "https://localhost:8080/"} outputId="ca81ab59-9194-401a-94f0-dea69f121ede"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu'
)
device
# + id="IKiHZEUKuR2f" colab={"base_uri": "https://localhost:8080/"} outputId="13a98f5e-1a8a-4a13-a9bf-82a95ac4efa4"
base_path = '/content/drive/My Drive/NLP Data/seq2seq/manythings'
path_to_files = os.path.join(base_path, "Swedish - English")
os.listdir(path_to_files)
# + [markdown] id="ga_vPyt3u5IE"
# ### File extensions
# + id="0XV9PLt7uRyI"
exts = (".sw", ".en")
# + [markdown] id="BMXQVQAZvB60"
# ### Tokenizer models
#
# All the tokenization models that we are going to use are going to be found [here](https://spacy.io/usage/models) but to those languages that doesn't have tokenization models we are going to create our own tokenizers.
# + id="drvBIe75uRwR"
import spacy
spacy_en = spacy.load('en_core_web_sm')
# + id="sC2PFvU6uRsm"
def tokenize_sw(sent):
return [tok for tok in sent.split(" ")]
def tokenize_en(sent):
return [tok.text for tok in spacy_en.tokenizer(sent)]
# + [markdown] id="SmCTTn4Hwiom"
# ### Fields
# + id="Pa8oJesouRqO"
SRC = data.Field(
tokenize = tokenize_sw,
lower= True,
init_token = "<sos>",
eos_token = "<eos>",
include_lengths =True
)
TRG = data.Field(
tokenize = tokenize_en,
lower= True,
init_token = "<sos>",
eos_token = "<eos>"
)
# + [markdown] id="eWi2bUtHw4_e"
# ### Creating dataset
# + id="natPw3mhuRnQ"
train_data, valid_data, test_data = datasets.TranslationDataset.splits(
exts= exts,
path=path_to_files,
train='train', validation='valid', test='test',
fields = (SRC, TRG)
)
# + colab={"base_uri": "https://localhost:8080/"} id="d-04eSK8uRkp" outputId="adf5827d-7386-4a6d-ea52-e18e89d9b2b7"
print(vars(train_data.examples[0]))
# + colab={"base_uri": "https://localhost:8080/"} id="wEHHOMmDxKlO" outputId="b0a88891-ed22-4784-ffcf-af697f6ce028"
print(vars(valid_data.examples[0]))
# + colab={"base_uri": "https://localhost:8080/"} id="VxM9sZGfuRhx" outputId="1d3bc24d-cc4c-4414-cd1e-9b8b34c33e81"
print(vars(test_data.examples[0]))
# + [markdown] id="hAV3a9mPxOss"
# ### Counting examples
# + colab={"base_uri": "https://localhost:8080/"} id="NyEk_Y01uRen" outputId="2d44b891-a629-41c4-daaa-9ad7cea36476"
from prettytable import PrettyTable
def tabulate(column_names, data):
table = PrettyTable(column_names)
table.title= "VISUALIZING SETS EXAMPLES"
table.align[column_names[0]] = 'l'
table.align[column_names[1]] = 'r'
for row in data:
table.add_row(row)
print(table)
column_names = ["SUBSET", "EXAMPLE(s)"]
row_data = [
["training", len(train_data)],
['validation', len(valid_data)],
['test', len(test_data)]
]
tabulate(column_names, row_data)
# + [markdown] id="ljHsxO81xVdn"
# Our dataset is very small so we are not going to set the `min_freq` to a number greater than 1 dring building of the vocabulary.
# + id="jmv9XL9EuRcN"
SRC.build_vocab(train_data, min_freq=2)
TRG.build_vocab(train_data, min_freq=2)
# + [markdown] id="t7oilFldx-X9"
# Saving the dictionary maping of our SRC and TRG to a json file.
# + colab={"base_uri": "https://localhost:8080/"} id="TA_gPfD2uRZb" outputId="6227fea0-79d7-4384-fbce-9c74d99cbb66"
len(SRC.vocab.stoi), len(TRG.vocab.stoi)
# + id="ceitelKvuRWX" colab={"base_uri": "https://localhost:8080/"} outputId="c16520ae-4ca8-4bd9-aa60-b63a7984ee0c"
# src = dict(SRC.vocab.stoi)
# trg = dict(TRG.vocab.stoi)
# src_vocab_path = "src_vocab.json"
# trg_vocab_path = "trg_vocab.json"
# with open(src_vocab_path, "w") as f:
# json.dump(src, f, indent=2)
# with open(trg_vocab_path, "w") as f:
# json.dump(trg, f, indent=2)
# print("Done")
# + id="gd0l5cL4uRTO" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="53ad3764-f627-4a40-e251-973ca3c133db"
# files.download(src_vocab_path)
# files.download(trg_vocab_path)
# + [markdown] id="EX6pZxDwzQAT"
# ### Iterators
# + id="yTE-gGpHuQRH"
BATCH_SIZE = 128 # 128 for languages with good vocab corpus
sort_key = lambda x: len(x.src)
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
sort_key= sort_key,
sort_within_batch = True
)
# + [markdown] id="BY4p8X1Bzrrv"
# ### Encoder
# + id="TstLLxavsbAQ"
class Encoder(nn.Module):
def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):
super(Encoder, self).__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim=emb_dim)
self.gru = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True)
self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_len):
embedded = self.dropout(self.embedding(src)) # embedded = [src len, batch size, emb dim]
# need to explicitly put lengths on cpu!
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, src_len.to('cpu'))
packed_outputs, hidden = self.gru(packed_embedded)
outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs)
hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))
return outputs, hidden
# + [markdown] id="MDKNRHQS04CG"
# ### Attention layer
# + id="iDr-sIsW06HO"
class Attention(nn.Module):
def __init__(self, enc_hid_dim, dec_hid_dim):
super(Attention, self).__init__()
self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim)
self.v = nn.Linear(dec_hid_dim, 1, bias = False)
def forward(self, hidden, encoder_outputs, mask):
batch_size = encoder_outputs.shape[1]
src_len = encoder_outputs.shape[0]
# repeat decoder hidden state src_len times
hidden = hidden.unsqueeze(1).repeat(1, src_len, 1)
encoder_outputs = encoder_outputs.permute(1, 0, 2)
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim = 2))) # energy = [batch size, src len, dec hid dim]
attention = self.v(energy).squeeze(2) # attention= [batch size, src len]
attention = attention.masked_fill(mask == 0, -1e10)
return F.softmax(attention, dim=1)
# + [markdown] id="q7NlD1sz1BSi"
# ### Decoder
# + id="bHiBnQaF1AtW"
class Decoder(nn.Module):
def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):
super(Decoder, self).__init__()
self.output_dim = output_dim
self.attention = attention
self.embedding = nn.Embedding(output_dim, emb_dim)
self.gru = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)
self.fc = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, input, hidden, encoder_outputs, mask):
input = input.unsqueeze(0) # input = [1, batch size]
embedded = self.dropout(self.embedding(input)) # embedded = [1, batch size, emb dim]
a = self.attention(hidden, encoder_outputs, mask)# a = [batch size, src len]
a = a.unsqueeze(1) # a = [batch size, 1, src len]
encoder_outputs = encoder_outputs.permute(1, 0, 2) # encoder_outputs = [batch size, src len, enc hid dim * 2]
weighted = torch.bmm(a, encoder_outputs) # weighted = [batch size, 1, enc hid dim * 2]
weighted = weighted.permute(1, 0, 2) # weighted = [1, batch size, enc hid dim * 2]
rnn_input = torch.cat((embedded, weighted), dim = 2) # rnn_input = [1, batch size, (enc hid dim * 2) + emb dim]
output, hidden = self.gru(rnn_input, hidden.unsqueeze(0))
assert (output == hidden).all()
embedded = embedded.squeeze(0)
output = output.squeeze(0)
weighted = weighted.squeeze(0)
prediction = self.fc(torch.cat((output, weighted, embedded), dim = 1)) # prediction = [batch size, output dim]
return prediction, hidden.squeeze(0), a.squeeze(1)
# + [markdown] id="uRi_d5xy1LPM"
# ### Seq2Seq
# + id="LMyUUMnr1K0I"
class Seq2Seq(nn.Module):
def __init__(self, encoder, decoder, src_pad_idx, device):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.device = device
self.src_pad_idx = src_pad_idx
def create_mask(self, src):
mask = (src != self.src_pad_idx).permute(1, 0)
return mask
def forward(self, src, src_len, trg, teacher_forcing_ratio = 0.5):
"""
src = [src len, batch size]
src_len = [batch size]
trg = [trg len, batch size]
teacher_forcing_ratio is probability to use teacher forcing
e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time
"""
trg_len, batch_size = trg.shape
trg_vocab_size = self.decoder.output_dim
# tensor to store decoder outputs
outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)
"""
encoder_outputs is all hidden states of the input sequence, back and forwards
hidden is the final forward and backward hidden states, passed through a linear layer
"""
encoder_outputs, hidden = self.encoder(src, src_len)
# first input to the decoder is the <sos> tokens
input = trg[0,:]
mask = self.create_mask(src) # mask = [batch size, src len]
for t in range(1, trg_len):
# insert input token embedding, previous hidden state and all encoder hidden states and mask
# receive output tensor (predictions) and new hidden state
output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask)
# place predictions in a tensor holding predictions for each token
outputs[t] = output
# decide if we are going to use teacher forcing or not
teacher_force = random.random() < teacher_forcing_ratio
# get the highest predicted token from our predictions
top1 = output.argmax(1)
# if teacher forcing, use actual next token as next input
# if not, use predicted token
input = trg[t] if teacher_force else top1
return outputs
# + [markdown] id="B6RraZNi1tS0"
# ### Seq2Seq model instance
# + colab={"base_uri": "https://localhost:8080/"} id="JUEfOF60sa6s" outputId="6ea44b11-7fd3-4890-d1aa-da8f667df955"
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = DEC_EMB_DIM = 256
ENC_HID_DIM = DEC_HID_DIM = 128
ENC_DROPOUT = DEC_DROPOUT = 0.5
SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
attn = Attention(ENC_HID_DIM, DEC_HID_DIM)
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn)
model = Seq2Seq(enc, dec, SRC_PAD_IDX, device).to(device)
model
# + [markdown] id="N44tQz5V2CIK"
# ### Model parameters
# + colab={"base_uri": "https://localhost:8080/"} id="gdNL1peTsa0s" outputId="738c7412-c094-40d8-fd2d-1141182cf682"
def count_trainable_params(model):
return sum(p.numel() for p in model.parameters()), sum(p.numel() for p in model.parameters() if p.requires_grad)
n_params, trainable_params = count_trainable_params(model)
print(f"Total number of paramaters: {n_params:,}\nTotal tainable parameters: {trainable_params:,}")
# + [markdown] id="yld0SPhi2HUm"
# Initialize model weights
# + colab={"base_uri": "https://localhost:8080/"} id="9JDmgtkGsav_" outputId="cb0da570-0ac9-42ed-9975-fd8653525bf2"
def init_weights(m):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
model.apply(init_weights)
# + [markdown] id="F1OnvuMKBIg_"
# ### Optimizer and Criterion
# + id="7PowuX5bsasj"
optimizer = torch.optim.Adam(model.parameters())
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX).to(device)
# + [markdown] id="IpNeWJEJ2pcN"
# ### Train and evaluation functions
# + id="UXVjp9XOsamS"
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src, src_len = batch.src
src = src.to(device)
src_len = src_len.to(device)
trg = batch.trg
trg = trg.to(device)
optimizer.zero_grad()
output = model(src, src_len, trg)
"""
trg = [trg len, batch size]
output = [trg len, batch size, output dim]
"""
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
"""
trg = [(trg len - 1) * batch size]
output = [(trg len - 1) * batch size, output dim]
"""
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src, src_len = batch.src
src = src.to(device)
src_len = src_len.to(device)
trg = batch.trg
trg = trg.to(device)
optimizer.zero_grad()
output = model(src, src_len, trg, 0) ## Turn off the teacher forcing ratio.
"""
trg = [trg len, batch size]
output = [trg len, batch size, output dim]
"""
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
"""
trg = [(trg len - 1) * batch size]
output = [(trg len - 1) * batch size, output dim]
"""
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# + [markdown] id="MQG1RgUi2ujL"
# ### Training the model
# + id="0Z8_TXq8sadI"
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
def tabulate_training(column_names, data, title):
table = PrettyTable(column_names)
table.title= title
table.align[column_names[0]] = 'l'
table.align[column_names[1]] = 'r'
table.align[column_names[2]] = 'r'
table.align[column_names[3]] = 'r'
for row in data:
table.add_row(row)
print(table)
# + [markdown] id="XhkS-JDd35qx"
# ### Model Name
# + id="OOCDVLXV39iK"
MODEL_NAME = "sw-eng.pt"
# + colab={"base_uri": "https://localhost:8080/"} id="shJMTRS7saRC" outputId="3d010fa5-b73e-4589-b900-b60fd8d6945d"
N_EPOCHS = 15 # 10
CLIP = 1
best_valid_loss = float('inf')
column_names = ["SET", "LOSS", "PPL", "ETA"]
print("TRAINING START....")
for epoch in range(N_EPOCHS):
start = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end = time.time()
title = f"EPOCH: {epoch+1:02}/{N_EPOCHS:02} | {'saving model...' if valid_loss < best_valid_loss else 'not saving...'}"
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), MODEL_NAME)
rows_data =[
["train", f"{train_loss:.3f}", f"{math.exp(train_loss):7.3f}", hms_string(end - start) ],
["val", f"{valid_loss:.3f}", f"{math.exp(train_loss):7.3f}", '' ]
]
tabulate_training(column_names, rows_data, title)
print("TRAINING ENDS....")
# + id="k1_A2Nz5sKJu" colab={"base_uri": "https://localhost:8080/"} outputId="8e7d8a2d-67a6-468b-836d-217ae43f405b"
model.load_state_dict(torch.load(MODEL_NAME))
test_loss = evaluate(model, test_iterator, criterion)
title = "Model Evaluation Summary"
data_rows = [["Test", f'{test_loss:.3f}', f'{math.exp(test_loss):7.3f}', ""]]
tabulate_training(["SET", "LOSS", "PPL", "ETA"], data_rows, title)
# + [markdown] id="3J9H6zur4WU0"
# ### Model inference
# + id="duN_U05sBlkx"
# import en_core_web_sm
# nlp = en_core_web_sm.load()
# + id="OPwMVJZ14Sxr"
def translate_sentence(sent, src_field, trg_field, mdoel, device, max_len=50):
model.eval()
if isinstance(sent, str):
# tokens = [token.text.lower() for token in nlp(sent)]
tokens = [token.lower() for token in sent.split(" ")]
else:
tokens = [token.lower() for token in sent]
tokens = [src_field.init_token] + tokens + [src_field.eos_token]
src_indexes = [src_field.vocab.stoi[token] for token in tokens]
src_tensor = torch.LongTensor(src_indexes).unsqueeze(1).to(device)
src_len = torch.LongTensor([len(src_indexes)])
with torch.no_grad():
encoder_outputs, hidden = model.encoder(src_tensor, src_len)
mask = model.create_mask(src_tensor)
trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]]
attentions = torch.zeros(max_len, 1, len(src_indexes)).to(device)
for i in range(max_len):
trg_tensor = torch.LongTensor([trg_indexes[-1]]).to(device)
with torch.no_grad():
output, hidden, attention = model.decoder(trg_tensor, hidden, encoder_outputs, mask)
attentions[i] = attention
pred_token = output.argmax(1).item()
trg_indexes.append(pred_token)
if pred_token == trg_field.vocab.stoi[trg_field.eos_token]:
break
trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes]
return trg_tokens[1:], attentions[:len(trg_tokens)-1]
# + id="dqGmZM3q4uRG"
# + id="lyPjW60N4ve_" colab={"base_uri": "https://localhost:8080/"} outputId="de78fe0f-db4c-4bac-b81d-913e11e9b8ec"
example_idx = 6
src = vars(test_data.examples[example_idx])['src']
trg = vars(test_data.examples[example_idx])['trg']
translation, attention = translate_sentence(src, SRC, TRG, model, device)
print(f'src = {src}')
print(f'trg = {trg}')
print(f'predicted trg = {translation}')
# + id="SnwUFJB4B4Aw" colab={"base_uri": "https://localhost:8080/"} outputId="9ff06d16-15f5-4ef1-954c-5a6dbe73e9e8"
example_idx = 0
src = vars(train_data.examples[example_idx])['src']
trg = vars(train_data.examples[example_idx])['trg']
print(f'src = {src}')
print(f'trg = {trg}')
tokens, attention = translate_sentence(src, SRC, TRG, model, device)
print(f'pred = {tokens}')
# + id="5IbwtKGXQRQv" colab={"base_uri": "https://localhost:8080/"} outputId="9dea35d0-40a1-4106-8e71-708c79eca1fc"
example_idx = 0
src = vars(test_data.examples[example_idx])['src']
trg = vars(test_data.examples[example_idx])['trg']
print(f'src = {src}')
print(f'trg = {trg}')
tokens, attention = translate_sentence(src, SRC, TRG, model, device)
print(f'pred = {tokens}')
# + [markdown] id="xndxi8HR5QH_"
# Downloading the model name
# + id="47bvlKVO43k6" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="9863e219-8d44-43f2-ccb7-65cc3f335481"
files.download(MODEL_NAME)
# + [markdown] id="u_0KD0l1OM34"
# ### BLEU SCORE
# + id="1qF2GsD85SM-" colab={"base_uri": "https://localhost:8080/"} outputId="b5e94680-3ca1-486b-e8b7-3c7d0a991df7"
from torchtext.data.metrics import bleu_score
def calculate_bleu(data, src_field, trg_field, model, device, max_len = 50):
trgs = []
pred_trgs = []
for datum in data:
src = vars(datum)['src']
trg = vars(datum)['trg']
pred_trg, _ = translate_sentence(src, src_field, trg_field, model, device, max_len)
# cut off <eos> token
pred_trg = pred_trg[:-1]
pred_trgs.append(pred_trg)
trgs.append([trg])
return bleu_score(pred_trgs, trgs)
bleu_score = calculate_bleu(test_data, SRC, TRG, model, device)
print(f'BLEU score = {bleu_score*100:.2f}')
| 07_NMT_Project/bi-sw-en/01__Swedish_English.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ET_7-K8v6ZaG"
# # Ejercicio02: Dyslectionary
#
# El disleccionario es como un diccionario ordinario, excepto que organiza las palabras en función de su final en lugar de cómo empiezan.
#
# **Input:**
#
# apple
#
# banana
#
# grape
#
# kiwi
#
# pear
#
# **Output:**
#
# banana
#
# apple
#
# grape
#
# kiwi
#
# pear
#
# # Explicación de solución
#
# De un conjunto de grupos de palabras se almacena en un diccionario de palabras de acuerdo a la palabra y su clave que vendría a ser el último caracter, los cuales son ordenados, luego se formatea para que imprima todas las palabras alineadas al lado derecho ordenadamente.
# # Código
# + id="ZGfrDX636UVz"
from itertools import count
def printOrdered(longtext, words):
# Dado un conjunto de palabras, la función ordena e
# imprime las palabras de acuerdo al último caracter
for word in sorted(words, key=lambda word: word[::-1]):
spacing = ' '*(longtext-len(word))
print(f'{spacing}{word}')
def main():
longtext = -1
words = []
for i in count():
try:
word = input()
if not word:
printOrdered(longtext, words)
print()
longtext = -1
words = []
else:
longtext = max(longtext, len(word))
words.append(word)
except EOFError:
printOrdered(longtext,words)
break
if __name__ == "__main__":
main()
# + [markdown] id="NdInHKnM6m5E"
# # Prueba de juez
# 
# 
#
| Aula12_21_12_2021/Lab12Ejercicio02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# Calculate power(x, n)
from nose.tools import assert_equal
def power(x, n):
"""Calcuate pow(x,n)"""
if n < 2:
return x
val = power(x, n/2)
if n%2 == 0:
return val * val
else:
return x * val * val
class TestProblems(object):
def test_power(self, solution):
assert_equal(solution(0, 2), 0)
assert_equal(solution(1, 12312), 1)
assert_equal(solution(2, 10), 1024)
assert_equal(solution(10, 4), 10000)
print 'Success: test_power'
def main():
test = TestProblems()
test.test_power(power)
if __name__ == '__main__':
main()
# -
| Algorithms/Recursion and Divide & Conquer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] toc=true
# ## Gradio-games
# -
# Create directory and upload urls file into your server with
# ```
# scp -r <local/directory> <EMAIL>:<path/to/server/images/dir>
# ```
import os
import requests
from bs4 import BeautifulSoup
from fastai.vision.all import *
from fastbook import *
from torchsummary import summary
from torchvision.models import inception_v3
torch.cuda.set_device(2)
default_device()
# +
# for working around bot protection
google_image = "https://www.google.com/search?site=&tbm=isch&source=hp&biw=1873&bih=990&"
user_agent = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)" +
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
}
def download_images():
data = [('gta san andreas', 'gta'), ('nfs', 'nfs'), ('minecraft', 'minecraft')]
num = 300
for query, folder in data:
search_url = google_image + 'q=' + query
response = requests.get(search_url, headers=user_agent)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
results = soup.findAll('img', {'class': 'rg_i Q4LuWd'})
count = 1
links = []
for result in results:
try:
link = result['data-src']
links.append(link)
count += 1
if(count > num): break
except KeyError: continue
print(f"Downloading {len(links)} images...")
for i, link in enumerate(links):
response = requests.get(link)
image_name = './NfsGtaMinecraft/' + folder + '/' + str(i+1) + '.jpg'
with open(image_name, 'wb') as fh: # save images in './NfsGtaMinecraft/'
fh.write(response.content)
# -
# download and save the images
download_images()
# define path
path = Path('NfsGtaMinecraft/')
path.ls()
# not sure what we're doing here but ok
dls = ImageDataLoaders.from_folder(path, valid_pct=0.2, item_tfms=Resize(224), num_workers=16)
dls.show_batch()
# split into training and validation datasets
print("Training dataset size: \t\t", len(dls.train_ds))
print("Validation dataset size: \t", len(dls.valid_ds))
# train model
learn = vision_learner(dls, resnet18, metrics=accuracy)
# evaluate model
summary(learn.model)
# fine tuning
learn.fine_tune(4)
# show prediction results
learn.show_results()
# what's this button do? No, seriously!
interp = ClassificationInterpretation.from_learner(learn)
# plot confusion matrix
interp.plot_confusion_matrix(figsize=(3, 3))
# export model
learn.export("./resnet18-1.pkl")
# !mv NfsGtaMinecraft/resnet18-1.pkl .
| gradio-app/w13-gradio_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TensorFlow: Logistic Regression
# + deletable=true editable=true
import tensorflow as tf
import numpy as np
import pandas as pd
from pandas import DataFrame as DF, Series
# -
# **Data Source:** https://www.kaggle.com/c/titanic/data
#
#
# **Variable Definition Key**
#
# **survival** Survival 0 = No, 1 = Yes<br>
# **pclass** Ticket class 1 = 1st, 2 = 2nd, 3 = 3rd<br>
# **sex** Sex <br>
# **Age** Age in years <br>
# **sibsp** # of siblings / spouses aboard the Titanic <br>
# **parch** # of parents / children aboard the Titanic <br>
# **ticket** Ticket number <br>
# **fare** Passenger fare <br>
# **cabin** Cabin number <br>
# **embarked** Port of Embarkation C = Cherbourg, Q = Queenstown, S = Southampton
#
# **Variable Notes**
#
# pclass: A proxy for socio-economic status (SES)<br>
# 1st = Upper<br>
# 2nd = Middle<br>
# 3rd = Lower
#
# age: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5
#
# sibsp: The dataset defines family relations in this way...<br>
# Sibling = brother, sister, stepbrother, stepsister<br>
# Spouse = husband, wife (mistresses and fiancés were ignored)
#
# parch: The dataset defines family relations in this way...<br>
# Parent = mother, father<br>
# Child = daughter, son, stepdaughter, stepson<br>
# Some children travelled only with a nanny, therefore parch=0 for them.<br>
# + deletable=true editable=true
import requests
data_url = 'https://storage.googleapis.com/kaggle-competitions-data/kaggle/3136/train.csv?GoogleAccessId=<EMAIL>&Expires=1511997025&Signature=PEIvkHcr9xiKriHwS%2Fk2TzHDxBnlnDOLGP2sap%2FKeRObtkx8CRZPM45vEoVPxnT4q4faBGp4CHLsyS6zU309K%2F4RFq0e41HoqAOpj8vdSi0Uh6GqQGFgMfhvfEoxtTOhOKjcIi9Z51%2FswGttevmyUcjS6t2oePguBFRd5W7bn27u1dWBvMIB9GiGMmY0W0iopb7sPLAvur308QrP%2F97nl6i05NKljB1Myb02dGi3t14wvEPPew%2FD3mPjLsJsi8XEO209R8%2Fg1oWE3dyj5F9mB0DH7e8NKu%2F8EhW2mXijADPtMCn2mSoJLH%2F%2By7gc%2FtlMzN0KolYpg8plUvEI6EV8rg%3D%3D'
# get string of comma separated values
r = requests.get(data_url)
# create dataframe using read_csv on IO object
from io import StringIO
data = pd.read_csv(StringIO(r.content.decode('utf-8')))
# + deletable=true editable=true
data.head()
# + deletable=true editable=true
data.drop(['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True)
# + deletable=true editable=true
# save both to csv
data.to_csv('data.csv', index=False)
# + deletable=true editable=true
del data
import gc
gc.collect()
# + deletable=true editable=true
# read data
data = pd.read_csv('data.csv')
data.head()
# -
data.shape
# + deletable=true editable=true
data.fillna({'Age': -1,
'Cabin': 'Unk',
'Embarked': 'Unk',
'Fare': -1},
inplace=True);
# + [markdown] deletable=true editable=true
# ### Very Basic - Hand Coded Logistic Regression
# + deletable=true editable=true
# convert sex binary
data.loc[:, 'Sex'] = (data.Sex == 'female').astype(int)
# train/test split
Xtr = data.loc[:, ['Pclass','Sex','Age','SibSp','Parch','Fare']].sample(frac=0.75)
Xts = data[~data.index.isin(Xtr.index)].loc[:, ['Pclass','Sex','Age','SibSp','Parch','Fare']]
# one-hot-encode Ytr and Yts (quick method)
Ytr = pd.get_dummies(data[data.index.isin(Xtr.index)].Survived).values
Yts = pd.get_dummies(data[~data.index.isin(Xtr.index)].Survived).values
# + deletable=true editable=true
import tensorflow as tf
# data format is as usual:
# Xtr and test_X have shape (num_instances, num_features)
# Ytr and test_Y have shape (num_instances, num_classes)
num_features = Xtr.shape[1]
num_classes = 2
# shape=[None, num_features] tells the model to accept different numbers of datapoints
X = tf.placeholder('float', [None, num_features])
Y = tf.placeholder('float', [None, num_classes])
# W - weights array
W = tf.Variable(tf.zeros([num_features, num_classes]))
# B - bias array
B = tf.Variable(tf.zeros([num_classes]))
# define the logistic model
# y=wx+b as argument of softmax
yhat = tf.nn.softmax(tf.matmul(X, W) + B)
# define a loss function
loss_fn = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yhat, labels=Y))
# define optimizer and minimize on loss_fn
opt = tf.train.AdamOptimizer(0.01).minimize(loss_fn)
# create session
sess = tf.Session()
# init vars
init = tf.initialize_all_variables()
sess.run(init)
num_epochs = 10
# loop over num_epochs and run optimization step on
# full data each time
for i in range(num_epochs):
sess.run(opt, feed_dict={X: Xtr, Y: Ytr})
# accuracy function
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(yhat, 1), tf.argmax(Y, 1)), 'float'))
# get the test accuracy
accuracy_value = sess.run(accuracy, feed_dict={X: Xts, Y: Yts})
# + deletable=true editable=true
accuracy_value
# + [markdown] deletable=true editable=true
# ## Logistic Regression With Batching
# + [markdown] deletable=true editable=true
# ### Input Function
# + deletable=true editable=true
# read data
data = pd.read_csv('data.csv')
data.head()
# + deletable=true editable=true
# define columns and default values
_csv_column_defaults = [[0],[-1],['Unk'],[-1.],[0],[0],[-1.],['Unk'],['Unk']]
_csv_columns = data.columns.tolist()
# define input function
def input_fn(csv_file, feature_names, batch_size=16, n_epochs=10, shuffle=False):
def decode_csv(line):
parsed_line = tf.decode_csv(line, _csv_column_defaults)
features_dict = dict(zip(feature_names, parsed_line))
# features_dict['Age'] = tf.to_int32(features_dict['Age'])
labels = features_dict.pop('Survived') # removes this from dict
return features_dict, labels
if shuffle:
dataset = dataset.shuffle(buffer_size=100*1024) # buffer 100KB
dataset = (tf.data.TextLineDataset(csv_file) # Read text file
.skip(1) # Skip header row
.map(decode_csv, num_parallel_calls=3)) # Transform each elem by applying decode_csv fn
dataset = dataset.batch(batch_size) # create a batch of size `batch_size`
dataset = dataset.repeat(n_epochs)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
# + [markdown] deletable=true editable=true
# ### Handling Categorical Features
#
# Using `tf.feature_column` is a way to map data to a model, as opposed to using feed dictionaries. It can be efficient and help with certain preprocessing tasks.
# + [markdown] deletable=true editable=true
# #### Base Categorical Features
# + deletable=true editable=true
# pclass = tf.feature_column.categorical_column_with_identity(
# 'Pclass', num_buckets=3)
sex = tf.feature_column.categorical_column_with_vocabulary_list(
'Sex', vocabulary_list=['female','male','Unk'])
embarked = tf.feature_column.categorical_column_with_vocabulary_list(
'Embarked', vocabulary_list=['S','C','Q','Unk'])
# + [markdown] deletable=true editable=true
# #### Base Continuous Features
# + deletable=true editable=true
age = tf.feature_column.numeric_column('Age')
# age_buckets = tf.feature_column.bucketized_column(
# age, boundaries=[5.,10,18,25,35,45,55,65])
sib = tf.feature_column.numeric_column('SibSp')
parch = tf.feature_column.numeric_column('Parch')
fare = tf.feature_column.numeric_column('Fare')
# + [markdown] deletable=true editable=true
# ### Define Model
# + deletable=true editable=true
columns = [age, sib, parch, fare, sex, embarked]
model_dir = 'lr_model'
model = tf.estimator.LinearClassifier(model_dir=model_dir,
feature_columns=columns,
optimizer=tf.train.AdamOptimizer())
# + [markdown] deletable=true editable=true
# ### Train Model
# + deletable=true editable=true
model.train(input_fn=lambda: input_fn('data.csv', _csv_columns))
# + deletable=true editable=true
results = model.evaluate(input_fn=lambda: input_fn('data.csv', _csv_columns, n_epochs=1))
# + deletable=true editable=true
results
| Logistic Regression & NN Basics/logistic_regression_tf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
#
# # 머신러닝(소프트웨어융합전공) 강의 소개
# + [markdown] slideshow={"slide_type": "slide"}
# ## 소개
#
# 최근 몇 년 사이에 머신러닝/딥러닝 기술이 획기적으로 발전하면서
# 데이터분석 및 인공지능 관련 연구의
# 중요성이 사회, 경제, 산업의 거의 모든 분야에 지대한 영향을 미치고 있으며,
# 앞으로 한 동안 그런 경향이 더욱 강화될 것으로 기대된다.
# 이에 본 강의에서는 머신러닝의 기본 아이디어와 다양한 활용법을 실전 예제와
# 함께 전달하여 학습자의 문제해결능력을 향상시키고자 한다.
# 머신러닝의 다양한 개념과 기법에 대한 이해는 이어지는 딥러닝 강좌
# 수강을 위한 필수조건이기도 하다.
#
# 실습에 사용되는 언어는 파이썬(Python)이며, 현재 머신러닝 및 데이터과학 분야에서
# 가장 많이 사용되는 언어이다.
# 학습에 필요한 파이썬 프로그래밍 지식은 학기 시작 이전에 학습해두어야 한다.
# 미리 공부해야 할 파이썬 기초 내용은 아래 __예비학습 안내__에서 소개한다.
#
# 머신러닝 알고리즘의 작동원리를 제대로 이해하려면 선형대수, 확률과통계,
# 미적분학에 대한 기본 지식이 있어야 한다.
# 하지만 이론은 최소로 필요한 정도만 다룰 것이기에 따로 예비학습이 요구되지는 않는다.
#
# __참고:__ 인공지능, 데이터과학, 머신러닝, 딥러닝은 아래의 관계를 갖는다.
#
# * 인공지능: 사고나 학습 등 인간이 가진 지적능력을 컴퓨터를 통해 구현하는 기술 또는 해당 연구 분야
# * 데이터과학: 수학과 통계 지식을 활용하여 데이터로부터 특정 정보를 추출하는 과학
# * 머신러닝: 컴퓨터가 데이터로부터 스스로 정보를 추출하는 기법. 데이터과학, 인공지능 등의 분야에서 가장 중요한 문제해결 기법 또는 해당 연구 분야
# * 딥러닝: 인공신경망 이론을 기반으로 복잡한 비선형 문제를 해결하는 머신러닝 기법 또는 해당 연구 분야
#
# 
#
# 그림 출처: [교보문고: 에이지 오브 머신러닝](https://bit.ly/2YmwB7U)
#
# ## 참여 전제조건
#
# * 선형대수, 확률과통계, 미적분한 과목 이수
#
# * 파이썬 프로그래밍 기초 지식
# * 기본 자료구조, 함수, 파일, 모듈, 클래스, 객체, 상속
# * numpy 및 pandas 패키지 기본 활용법
# * 그래프와 데이터 시각화
#
# ```
#
# ```
# + [markdown] slideshow={"slide_type": "slide"}
# ```
#
#
# ```
#
# ## 주요 학습내용
#
# * 한 눈에 보는 머신러닝
# * 머신러닝 프로젝트 처음부터 끝까지
# * 분류
# * 모델훈련
# * 서포트벡터머신
# * 결정트리
# * 앙상블학습과 랜덤포레스트
# * 차원축소
# * 비지도 학습
#
# ## 교재
#
# * 핸즈온 머신러닝(2판), 오렐리앙 제롱, 한빛미디어
#
# ## 테스트 안내
#
# 학기 둘쨋주에 예비학습 내용을 대상으로 테스트를 실시하여 결과를 성적에 반영할 예정이다.
# 따라서 예비학습 내용을 반드시 이수해야 한다.
#
# ```
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# ```
# -
# ```
#
#
#
# ```
#
# ## 머신러닝(응용수학전공) 강의 예비학습 안내
#
# ### 예비학습 주요 내용과 목적
#
# 학기가 시작하기 전에 아래 내용을 미리 공부해야 하며,
# 이유와 목적은 다음과 같다.
#
# * 파이썬 프로그래밍 왕기초 학습
# * 학기 중에 학습할 내용 일부 익혀두기
# * 학기 초에 아래 내용을 반복하겠지만 보다 많은 내용을 보다 빠른 속도로 학습 예정
# * 파이썬 기초지식이 부족하면 학습에 많은 어려움 발생
#
# ### 예비학습 1
#
# * 구글 코랩(Google Colab) 활용법 익혀두기
# * 구글 코랩만 이용하여 강의 진행 예정
# * 참조: [구글 코랩 기초 사용법 안내 동영상](https://bit.ly/2L1NAcE)
#
# ### 예비학습 2
#
# * __파이썬 데이터과학 입문__ 강좌에서 소개하는 파이썬 프로그래밍 왕기초 학습
# * 참조: [파이썬 데이터과학 입문](https://formal.hknu.ac.kr/Gongsu-DataSci/)
# * 실습환경: [구글 코랩(Google Colab)](https://colab.research.google.com/)
#
# ### 예비학습 3
#
# * 예비학습2 과정을 마친 후 아래 동영상 시리즈 시청 추천
# * 참조: [Data analysis in Python with Pandas](http://bit.ly/3ov40rn)
# * 내용: 다양한 판다스 실전 활용법 소개
#
# * 자동번역 한글자막 수준이 매우 우수함.
# * 설정 => 자막 => 자동 번역 => 한국어.
#
# * 참조: [강의노트](http://bit.ly/3osVCJ7)
#
# ### 예비학습 4
#
# * 파이썬 객체지향 프로그래밍 지식이 약한 경우 해당
# * 클래스, 깨체, 상속 등에 대한 개념을 잘 다져야 함.
# * 참조: [파이썬 프로그래밍 기초](https://formal.hknu.ac.kr/ProgInPython/) (8장에서 10장 내용은 필수)
# * [유튜브 파이썬 프로그래밍 기초](http://bit.ly/2YmDMwP)에서 9장까지 강의 동영상 제공함.
# * pip08-1 동영상부터 8장에 해당함.
# * 실습환경: [레플릿](https://repl.it) 또는 [구글 코랩(Google Colab)](https://colab.research.google.com/)
#
# ### 학습법
#
# * 학습법: 강의노트에서 소개하고 설명하는 내용을 구글 코랩에서 직접 타이핑 하면서 따라해볼 것.
#
# * __주의사항:__ 주어진 구글 코랩 강의노트를 단순히 실행만 하면 제대로 학습되지 않을 것임.
| ref_intro_lectures/ML-SC-Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # AutoComplete
# ### Aim:
# To suggest top k words given a substring
# ### Solution:
# - Split each word into n-grams
# - Sort words based on distance between given word and each candidate. Implemented distances are
# - Jaccard distance : Intersection of ngrams over Union of ngrams
# - Intersection of ngrams : Number of ngrams common between word and candidate
# - Return top k words
# - Additionally, given a corpus such as a user's chat history, the k words could further be sorted based on frequency in corpus
# **Importing words from corpus**
# <br>Omitting words shorter than 4 letters, so that a minimum of 2 trigrams are extracted for every word.
from nltk import jaccard_distance,ngrams
from nltk.corpus import words
word_list = words.words()
reduced_wlist = [w.lower() for w in word_list if len(w)>4]
print "Number of words in corpus :",len(reduced_wlist)
print "Top 10 words (alphabetically):\n",reduced_wlist[:10]
# **Reducing sample space for each word by filtering words that start with same substring as the word**
# <br>`compare_len` : Number of characters of each word that will be matched
#
#
#
# Note : Though this step reduces the sample space, it also assumes that the first few characters of the word have been spelled right.
# Hence it will not be able to handle typos.
# For example, if the user types *cake* instead of *bake*, the function cannot suggest *bakery*
def get_candidates(wlist,word,compare_len=1):
return [w for w in wlist if w[:compare_len] == word[:compare_len]]
# **Get distance between word and candidate. **
# Implemented distances are
# - Jaccard distance : Intersection of ngrams over Union of ngrams
# - Intersection of ngrams : Number of ngrams common between word and candidate
#
# <br>Note : Since Jaccard Distance will penalise longer candidates and favour shorter candidates with more ngrams common and fewer uncommon ngrams.
def distance(candidate,word,jacc = True):
if jacc:
return jaccard_distance(set(ngrams(word, n=4)), set(ngrams(candidate, n=4)))
else:
return len(set(ngrams(word, n=4)).intersection(set(ngrams(candidate, n=4))))*-1
# **Suggest words**
# <br>Get candidates and sort them by distance. Return top k candidates
def predict(word,k=5):
word = word.lower()
candidates = get_candidates(reduced_wlist,word)
preds = sorted(candidates, key=lambda x:distance(x,word,False))[:k]
return preds
for w in ['Exac','Amaz','Bril']:
print w,":",predict(w,5)
# Todo
# - Use a corpus such as chat history to get frequency of each word and use that as well to sort candidates
# - Train a sequential model on a corpus (after embedding words) to also rank suggestions based on the words that occured before it (CBOW)
# - Since one of the most common uses of AutoComplete is to fill longer words rather than shorter ones, consider ranking words by descending order of word length.
| .ipynb_checkpoints/Primary-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + Collapsed="false"
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score
import random
import numpy as np
import pandas as pd
import os
os.chdir("..")
# %load_ext autoreload
# %autoreload 2
# + [markdown] Collapsed="false"
# # Utility Functions
# + Collapsed="false"
def make_mixed_classification(n_samples, n_features, n_categories):
X,y = make_classification(n_samples=n_samples, n_features=n_features, random_state=42, n_informative=5)
cat_cols = random.choices(list(range(X.shape[-1])),k=n_categories)
num_cols = [i for i in range(X.shape[-1]) if i not in cat_cols]
for col in cat_cols:
X[:,col] = pd.qcut(X[:,col], q=4).codes.astype(int)
col_names = []
num_col_names=[]
cat_col_names=[]
for i in range(X.shape[-1]):
if i in cat_cols:
col_names.append(f"cat_col_{i}")
cat_col_names.append(f"cat_col_{i}")
if i in num_cols:
col_names.append(f"num_col_{i}")
num_col_names.append(f"num_col_{i}")
X = pd.DataFrame(X, columns=col_names)
y = pd.Series(y, name="target")
data = X.join(y)
return data, cat_col_names, num_col_names
def print_metrics(y_true, y_pred, tag):
if isinstance(y_true, pd.DataFrame) or isinstance(y_true, pd.Series):
y_true = y_true.values
if isinstance(y_pred, pd.DataFrame) or isinstance(y_pred, pd.Series):
y_pred = y_pred.values
if y_true.ndim>1:
y_true=y_true.ravel()
if y_pred.ndim>1:
y_pred=y_pred.ravel()
val_acc = accuracy_score(y_true, y_pred)
val_f1 = f1_score(y_true, y_pred)
print(f"{tag} Acc: {val_acc} | {tag} F1: {val_f1}")
# + [markdown] Collapsed="false"
# # Generate Synthetic Data
#
# First of all, let's create a synthetic data which is a mix of numerical and categorical features
# + Collapsed="false"
data, cat_col_names, num_col_names = make_mixed_classification(n_samples=10000, n_features=20, n_categories=4)
train, test = train_test_split(data, random_state=42)
train, val = train_test_split(train, random_state=42)
# + [markdown] Collapsed="false"
# # Importing the Library
# + Collapsed="false"
from pytorch_tabular import TabularModel
from pytorch_tabular.models import CategoryEmbeddingModelConfig, NodeConfig, TabNetModelConfig
from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig
# + [markdown] Collapsed="false"
# ## Category Embedding Model
# + Collapsed="false"
data_config = DataConfig(
target=['target'], #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented
continuous_cols=num_col_names,
categorical_cols=cat_col_names,
)
trainer_config = TrainerConfig(
auto_lr_find=True, # Runs the LRFinder to automatically derive a learning rate
batch_size=1024,
max_epochs=100,
gpus=1, #index of the GPU to use. 0, means CPU
)
optimizer_config = OptimizerConfig()
model_config = CategoryEmbeddingModelConfig(
task="classification",
layers="1024-512-512", # Number of nodes in each layer
activation="LeakyReLU", # Activation between each layers
learning_rate = 1e-3
)
experiment_config = ExperimentConfig(project_name="PyTorch Tabular Example", run_name="synthetic_classification_cat_embedding", exp_watch="gradients", log_target="wandb", log_logits=True)
tabular_model = TabularModel(
data_config=data_config,
model_config=model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config,
experiment_config=experiment_config
)
# + Collapsed="false" tags=[]
tabular_model.fit(train=train, validation=val)
# + Collapsed="false"
result = tabular_model.evaluate(test)
# + [markdown] Collapsed="false"
# ## Node Model
# + Collapsed="false"
data_config = DataConfig(
target=['target'], #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented
continuous_cols=num_col_names,
categorical_cols=cat_col_names,
)
trainer_config = TrainerConfig(
auto_lr_find=False, # Runs the LRFinder to automatically derive a learning rate
batch_size=64,
accumulate_grad_batches=16,
max_epochs=100,
gpus=1, #index of the GPU to use. 0, means CPU
)
optimizer_config = OptimizerConfig()
model_config = NodeConfig(
task="classification",
learning_rate = 1e-3
)
experiment_config = ExperimentConfig(project_name="PyTorch Tabular Example",
run_name="synthetic_classification_node",
exp_watch="gradients",
log_target="wandb",
log_logits=True)
tabular_model = TabularModel(
data_config=data_config,
model_config=model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config,
experiment_config=experiment_config
)
# + Collapsed="false" tags=[]
tabular_model.fit(train=train, validation=val)
# + Collapsed="false"
result = tabular_model.evaluate(test)
# + [markdown] Collapsed="false"
# ## Node Model with Category Embeddings
# + Collapsed="false"
data_config = DataConfig(
target=['target'], #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented
continuous_cols=num_col_names,
categorical_cols=cat_col_names,
)
trainer_config = TrainerConfig(
auto_lr_find=False, # Runs the LRFinder to automatically derive a learning rate
batch_size=64,
# accumulate_grad_batches=16,
max_epochs=100,
min_epochs=10,
gpus=1, #index of the GPU to use. 0, means CPU
)
optimizer_config = OptimizerConfig()
model_config = NodeConfig(
task="classification",
learning_rate = 1e-3,
embed_categorical=True
)
experiment_config = ExperimentConfig(project_name="PyTorch Tabular Example",
run_name="synthetic_classification_node_cat_embed",
exp_watch="gradients",
log_target="wandb",
log_logits=True)
tabular_model = TabularModel(
data_config=data_config,
model_config=model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config,
experiment_config=experiment_config
)
# + Collapsed="false" tags=[]
tabular_model.fit(train=train, validation=val)
# + Collapsed="false"
result = tabular_model.evaluate(test)
# + [markdown] Collapsed="false"
# ## TabNet Model
# + Collapsed="false"
data_config = DataConfig(
target=['target'], #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented
continuous_cols=num_col_names,
categorical_cols=cat_col_names,
)
trainer_config = TrainerConfig(
auto_lr_find=True, # Runs the LRFinder to automatically derive a learning rate
batch_size=2048,
# accumulate_grad_batches=16,
max_epochs=50,
min_epochs=10,
early_stopping=None,
gpus=1, #index of the GPU to use. 0, means CPU
)
optimizer_config = OptimizerConfig()
model_config = TabNetModelConfig(
task="classification",
learning_rate = 1e-5,
n_d = 16, n_a=16, n_steps=4,
)
experiment_config = ExperimentConfig(project_name="PyTorch Tabular Example",
run_name="synthetic_classification_tabnet",
exp_watch="gradients",
log_target="wandb",
log_logits=True)
tabular_model = TabularModel(
data_config=data_config,
model_config=model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config,
experiment_config=experiment_config
)
# + Collapsed="false" tags=[]
tabular_model.fit(train=train, validation=val)
# + Collapsed="false"
result = tabular_model.evaluate(test)
# + Collapsed="false"
| docs/tutorials/05-Experiment_Tracking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# In[1]:
__author__ = 'kazem_safari'
import os
import time
import tensorflow as tf
from mnist_helper_functions import mnist_inference, param_counter
# Load MNIST dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# In[2]:
#import matplotlib.pyplot as plt
# #%matplotlib inline
#sample_image = mnist.train.next_batch(1)[0]
#print(sample_image.shape)
#sample_image = sample_image.reshape([28, 28])
#plt.imshow(sample_image, cmap='Greys')
def main():
# Specify training parameters
model_path = './mnist_trad/mnist_trad.ckpt' # path where the training model is saved
max_step = 600 # the maximum iterations. After max_step iterations, the training will stop no matter what
start_time = time.time() # start timing
###############################
# BUILDING THE NETWORK
################################
tf.reset_default_graph()
# placeholders for input data and input labeles
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
keep_prob = tf.placeholder(tf.float32)
# reshape the input image
x_image = tf.reshape(x, [-1, 28, 28, 1])
nh1 = 32
nh2 = 64
nh3 = 1024
num_classes = 10
#inference function
(y_conv, cross_entropy, correct_prediction, accuracy) =\
mnist_inference(x_image, y_, keep_prob, nh1, nh2, nh3, num_classes)
#define an optimizer function to setup training
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# start a session
with tf.Session() as sess:
# Add the variable initializer Op.
init = tf.global_variables_initializer()
# Run the Op to initialize the variables.
sess.run(init)
# start training
for i in range(max_step+1):
batch = mnist.train.next_batch(50) # make the data batch, which is used in the training iteration.
# the batch size is 50
if i%200 == 0:
# output the training accuracy every 200 iterations
train_accuracy = sess.run(accuracy, feed_dict={
x:batch[0], y_:batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) # run one train_step
#save the "mnist_trad" model after training is finished
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
# print test error
print("test accuracy %g"%sess.run(accuracy, feed_dict={
x: mnist.test.images[0:1000], y_: mnist.test.labels[0:1000], keep_prob: 1.0}))
param_counter()
# calculate and print the time it took to finish the training
stop_time = time.time()
print('The training takes %f second to finish'%(stop_time - start_time))
if __name__ == "__main__":
main()
# -
| mnist_trad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Based on
# - https://scikit-learn.org/stable/modules/clustering.html#clustering-performance-evaluation
# -
from sklearn import metrics
from sklearn import datasets
# Load data
x, y = datasets.load_iris(return_X_y=True)
print (x.shape, y.shape)
from sklearn.cluster import KMeans
# Clusters
model = KMeans(n_clusters=3, random_state=1)
clusters = model.fit(x)
# The cluster assignments
labels = clusters.labels_
# ## True clusters are unknown
# Silhoutte score - higher is a better cluster
metrics.silhouette_score(x, labels, metric='euclidean')
# <NAME> - lower is better cluster
metrics.davies_bouldin_score(x, labels)
# ## When true and predicted clusters are known
from sklearn import metrics
labels_true = [0, 0, 0, 1, 1, 1]
labels_pred = [0, 0, 1, 1, 2, 2]
metrics.homogeneity_score(labels_true, labels_pred)
metrics.completeness_score(labels_true, labels_pred)
metrics.v_measure_score(labels_true, labels_pred)
# Three quantities in one call
metrics.homogeneity_completeness_v_measure(labels_true, labels_pred)
labels_true = [0, 0, 0, 1, 1, 1]
labels_pred = [0, 0, 0, 2, 2, 2]
metrics.homogeneity_completeness_v_measure(labels_true, labels_pred)
| sample-code/l10-11-unsupervised-ml/clustering-quality-measures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Zadanie:
#
# Przeprowadziliśmy ankietę mającą zbadać cechy psychologiczne polskich internautów. Na wynik badania składają się dwa pliki:
#
# 1. users.csv - dane demograficzne ankietowanych oraz przeglądarka z jakiej korzystają.
#
# 2. personality.csv - profil psychologiczny ankietowanych, opisany przez 5 cech: A-E.
#
#
# Opis cech demograficznych:
#
# · D01 Płeć
#
# · D02 Rok urodzenia
#
# · D03 Wykształcenie - podstawowe, zawodowe, średnie, wyższe
#
# · D04 Status zawodowy
#
# · D05 Wielkość miejscowości - wieś, do 20k, do 100k, do 500k, powyżej
#
# · D06 Sytuacja finansowa
#
# · D07 Rozmiar gospodarstwa domowego
#
#
# Szukamy odpowiedzi na następujące pytania:
#
# 1. Czy istnieje związek pomiędzy posiadanymi przez nas informacjami o ankietowanych, a ich profilem psychologicznym?
#
# 2. Czy możemy podzielić ankietowanych na grupy osób o podobnym profilu psychologicznym? Jakie to grupy, co wyróżnia każdą z nich, jaka jest ich charakterystyka demograficzna?
#
#
# Przeprowadź odpowiednią analizę danych. Przygotuj krótkie, wysokopoziomowe podsumowanie managementu oraz paczkę z kodem pozwalającym na odtworzenie najważniejszych wyników oraz dalszy rozwój rozwiązań
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
# ## Exploratory Data Analysis
# %reload_ext autoreload
# %autoreload 2
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import eda
# -
users_df = pd.read_csv('data/users.csv')
personality_df = pd.read_csv('data/personality.csv')
users_df.head()
personality_df.head()
# ### first let's get rid of duplicated entries
# +
print(f"there were {users_df.shape[0] - users_df.drop_duplicates().shape[0]} duplicated user entries" )
print(f"there were {personality_df.shape[0] - personality_df.drop_duplicates().shape[0]} duplicated personality entries" )
users_df = users_df.drop_duplicates()
personality_df = personality_df.drop_duplicates()
# -
# # Data cleaning
# ### 1. Split 'UserBrowser' into many columns: 'Browser', 'Version', 'Device'
users_df[['Browser', 'Version', 'Device']] = users_df.UserBrowser.apply(lambda row: pd.Series(row.split(" ")))
users_df.Device = users_df.Device.apply(lambda row: row.strip('()'))
users_df.drop('UserBrowser', axis=1, inplace=True)
users_df.head()
# now, we have to convert the new columns into numerical ones
eda.categ_summary(users_df['Browser'])
# most use chrome
eda.categ_summary(users_df['Device'])
# tablets are very rare
eda.categ_summary(users_df['Version'])
# There are too many versions, and they don't really make an impact
users_df = users_df.drop(['Version'], axis=1)
# finally we convert categorical values into numerical by using one hot encoding
users_df = pd.get_dummies(users_df, columns=['Browser', 'Device'])
# ### 2 . sex into binary
users_df['Sex'] = users_df['D01'].map({"M":0, "K": 1})
# ### 3. Change dtypes from 'float64' to 'int16'
# (Columns D05-D07 have 'nan' values, hence they are left as float)
users_df.dtypes
users_df = users_df.astype(dtype={'D02':np.int16, 'D03':np.int16, 'D04':np.int16})
# #### check for those nan values
col_nan = users_df.isna().sum()
print("Column | %")
col_nan[col_nan > 0.]
users_df[users_df['D05'].isna()]
# only one record with unknown information, for simplicity reasons we discard it. However, if the data is scarce it should be filled with i.e. averages ofother columns if applicable
users_df = users_df.dropna()
# ### 4. Assuming that this data comes from the Polish market, we can divide the year of birth column into generations
# source : https://natemat.pl/235903,do-jakiego-pokolenia-naleze-generacja-z-to-najliczniejsza-grupa-w-polsce
#
# 
users_df['D02'].hist()
year_of_birth_mapper = {"pokolenie Z": range(1995, 2020),
"pokolenie Y": range(1980, 1995),
"pokolenie X": range(1964, 1980),
"pokolenie BB": range(1946, 1964),
"other": range(users_df['D02'].min(), 1946)}
users_df['Generation'] = users_df['D02'].apply(lambda x: next((k for k, v in year_of_birth_mapper.items() if x in v), 0))
users_df['Generation'].hist()
# this was just for show as we need to convert these into numerical form
year_of_birth_mapper_to_numerical = {"pokolenie Z": 5,
"pokolenie Y": 4,
"pokolenie X": 3,
"pokolenie BB": 2,
"other": 1}
users_df['Generation'] = users_df['Generation'].apply(lambda x: next((v for k, v in year_of_birth_mapper_to_numerical.items() if x in k), 0))
users_df = users_df.rename(columns = {"D03": "Education",
"D05": "City size",
"D04": "Professional status",
"D06": "Financial_situation",
"D07": "Size of Household"})
users_df = users_df.drop(["D01", "D02"], axis=1)
# #### We can assume that the higher the number, the better the financial situation. Also this follows a normal distribution implying the wealth distribution is fairly representative of the population.
users_df['Financial_situation'].hist()
users_df['Size of Household'].hist()
users_df['Professional status'].hist()
users_df.head()
# #### end of column preprocessing
# # pre-statistical analysis:
# ### let's see if we have any duplicates in the form of the same user but with different variable values
# +
f"there are {len(users_df['UserIdentifier'].unique())} unique identifies in the users csv"
f"there are {len(personality_df['UserIdentifier'].unique())} unique identifies in the personality csv"
user_counts = pd.DataFrame(np.unique(users_df['UserIdentifier'], return_counts=True, return_index=False, return_inverse=False)).T
user_counts = user_counts.sort_values(by=1, ascending=False)
user_counts = user_counts[user_counts[1]>1]
user_counts.columns = ['id', 'users']
personality_counts = pd.DataFrame(np.unique(personality_df['UserIdentifier'], return_counts=True, return_index=False, return_inverse=False)).T
personality_counts = personality_counts.sort_values(by=1, ascending=False)
personality_counts = personality_counts[personality_counts[1]>1]
personality_counts.columns = ['id', 'personality']
# -
user_counts
personality_counts
# #### let's conside only the users that are present in both, since we cannot evaluate anything useful in this task from only information from one table
user_counts.merge(personality_counts, on='id')
user_1 = '77f0be1043bff8c9a56eade3b14ae1d3'
user_2 = '8015c0d8fc1e5cacfc646805a107a774'
# So we have two users with a unique user id who have more than one entry. Let's explore why that is the case
users_df[users_df['UserIdentifier']==user_1]
users_df[users_df['UserIdentifier']==user_2]
# ### hence we can see that this is because their financial situation has changed, let's see if this had an impact on their personality
personality_df[personality_df['UserIdentifier']==user_1]
personality_df[personality_df['UserIdentifier']==user_2]
# ## we can see that their psychological profile may differ slightly but due to the size of number of anomalies, we will proceed to drop them from further analysis
users_df = users_df[~users_df['UserIdentifier'].isin([user_1, user_2])]
personality_df = personality_df[~personality_df['UserIdentifier'].isin([user_1, user_2])]
# #### personality nan values
nan_per = personality_df.iloc[pd.isnull(personality_df).any(1).nonzero()[0]]
print(nan_per.shape)
nan_per.head()
# Hence we can fill these values with the column mean
personality_df[['A', 'B', 'C', 'D', 'E']] = personality_df[['A', 'B', 'C', 'D', 'E']].apply(lambda x: x.fillna(x.mean()),axis=0)
personality_df.shape, personality_df.dropna().shape
users_df.shape, users_df.dropna().shape
# ## Now we can proceed to join the two dataframes
#
df = personality_df.merge(users_df, on='UserIdentifier')
# ## et voila, the final dataframe
df.head()
# ---------
# # Psychological data analysis
# # pair plot for correlation check
X = df[['A', 'B', 'C', 'D', 'E']].astype(np.float32)
Y = df.drop(['UserIdentifier','A', 'B', 'C', 'D', 'E'], axis=1).astype(np.float32)
sns.pairplot(X)
def corr_heatmap(df):
sns.set(style="white")
# Generate a large random dataset
# Compute the correlation matrix
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
corr_heatmap(X)
corr_heatmap(Y)
corr_heatmap(df.drop(['UserIdentifier'], axis=1))
# #### Since we need to find the correlation between two sets of variables, then we can't use something like multiple-multiple regression.
#
# Instead we can use CCA is a multivariate method for comparing sets of continuous or catergorical variables to each other. It can be used (instead of multiple regression for example) when you suspect that the variables in the sets you're looking at are significantly correlated. Canonical correlation accounts for the idea of multi colinearity or covariance.
# #### 1. Czy istnieje związek pomiędzy posiadanymi przez nas informacjami o ankietowanych, a ich profilem psychologicznym?
# # ...
# Hence, we are asking the question if there is a relationship between the user information based off their connected devices and personal status, and the users psychological profiles
from sklearn.cross_decomposition import CCA
cca = CCA(n_components=1, scale=True, max_iter=3000)
cca.fit(X, Y)
X_c, Y_c = cca.transform(X, Y)
plt.scatter(X_c, Y_c)
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_c[:, 0], Y_c[:, 0])[0, 1])
# ##### hence we can see that there is a correlation between the two multivariate datasets
# To get the significance ewe can perform the hapiro-Wilk test tests
from scipy import stats
shapiro_test = stats.shapiro(X_c[:, 0])
print(f"statistic {shapiro_test[0]}\t p-value {shapiro_test[0]}")
cca.score(X, Y)
# ### redundancy analysis
# when we have results, we get structural coefficients. Here we can see the influence of each of the variables on the cross-variate relationship
x_load = pd.DataFrame(cca.x_loadings_).T
x_load.columns = list(X.columns)
x_load.T
y_load = pd.DataFrame(cca.y_loadings_).T
y_load.columns = list(Y.columns)
y_load.T
# 2. Czy możemy podzielić ankietowanych na grupy osób o podobnym profilu psychologicznym? Jakie to grupy, co wyróżnia każdą z nich, jaka jest ich charakterystyka demograficzna?
# #### Hence from this we can deduce that the generation to which a user belongs has the biggest influence over their psychological profile and certaintly NOT their Professional status.
# #### These groups that differ in psycogoical status are seperated by the generation they belong to, in other words the range of years they were born in. Therefore age has significant influence over mentality.
extracted = df[['A', 'B', 'C', 'D', 'E']]
y = df['Generation']
labels = df['Generation'].apply(lambda x: next((k for k, v in year_of_birth_mapper_to_numerical.items() if x==v), 0)).values
# +
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
# -
np.unique(labels)
# # T-sne decomposition for showing clusters in 2D (with the help of PCA).
# # Generation vs Psychological profile
# Psychological profile
use_PCA=True
# +
if use_PCA:
pca_50 = PCA(n_components=4)
extracted = pca_50.fit_transform(extracted)
print('Cumulative explained variation for 50 principal components: {}'.format(np.sum(pca_50.explained_variance_ratio_)))
# computing t-SNE
time_start = time.time()
tsne = TSNE(n_components=2, verbose=3, perplexity=10, n_iter=500,learning_rate=200)
tsne_results = tsne.fit_transform(extracted)
print ("t-SNE done! Time elapsed: {} seconds".format(time.time()-time_start))
# plotting part
num_classes = len(np.unique(y))
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
colors = cm.Spectral(np.linspace(0, 1, num_classes))
xx = tsne_results[:, 0]
yy = tsne_results[:, 1]
for i in range(num_classes):
ax.scatter(xx[y==i], yy[y==i], color=colors[i], label=labels[i], s=30)
plt.title("t-SNE dimensions colored by class")
plt.axis('tight')
plt.legend(loc='best', scatterpoints=1, fontsize=10,prop={'size': 12})
# plt.savefig("presentation_images/t-sne"+type_+".png")
plt.xlabel('$x$ t-SNE')
plt.ylabel('$y$ t-SNE')
plt.show()
# -
# # Summary to the management
#
# * we have analysed data of
# * Dataset 1: user profiles such Education, Professional status, City size, Financial_situation, Size of Household
# * Dataset 2: corresponding psychological profile information
# * In terms of coding we have cleaned the data, converted "word-like" features (categorical) into "number-like" features (numerical) to be able to perform statistical tests
#
# * We have found a correlation of 0.42 using between the two datasets using Canonical Correlation Analysis.
# * Canonical Correlation Analysis is a multivariate method for comparing sets of continuous or catergorical variables to each other
# * The overwhelming influential factor which in the user features, which is responsible for the correlation of the two datasets is the generaton to which the user belongs to. Example: If they were born in the 60s or in the 80s.
# * Hence we can see that we need to target groups and custom make ads to age groups instead of simply looking at variables such as wealth status, where they live etc.
#
| Zadanie.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GnvNDv2t5tDc"
# # WikiRecs Part 1 - Data Ingestion, Loading and Cleaning
# > In this first part of the two part series, we will handle the data side. We will fetch data from wikipedia, store in feather format and register the combined data on recochef. After that, we will perform EDA and extensive cleaning.
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [Ingestion, EDA, DataCleaning]
# - author: "<a href='https://towardsdatascience.com/how-can-you-tell-if-your-recommender-system-is-any-good-e4a6be02d9c2'><NAME></a>"
# - image:
# + [markdown] id="1rxKKMQB2Ygc"
# ## Data ingestion
#
# Downloading data from Wikipedia using Wiki API and storing in Google drive. There are more than 11 millions records which would take 6-8 hours in single colab session, so used 10 colab workers to fetch all the data within 30-40 mins.
# + id="EOyiO5ce2TcZ"
# !git clone https://github.com/sparsh-ai/reco-wikirecs
# %cd /content/reco-wikirecs/
# !pip install -r requirements.txt
# + id="c_uAz5OS4sm3" executionInfo={"status": "ok", "timestamp": 1625763148431, "user_tz": -330, "elapsed": 1759, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
import yaml
import os
from wiki_pull import *
from itables.javascript import load_datatables
load_datatables()
# + id="iT-JOJiF3G8M" executionInfo={"status": "ok", "timestamp": 1625762573030, "user_tz": -330, "elapsed": 15, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
with open('config.yaml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="tQPeT_sk7Mud" executionInfo={"status": "ok", "timestamp": 1625755605162, "user_tz": -330, "elapsed": 432198, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="a248ea52-7dd1-4633-bd10-bc24d0201250"
get_sample_of_users(config['edit_lookback'], config['outfile'])
# + [markdown] id="26kXx5-b3EUd"
# ### Start the ingestion
#
# I ran the same code from start till this cell, in 10 different colab notebooks, changing start position. The design is a master-worker setup where 1 notebooks was the master one, and 9 are workers.
#
# In master, start=0. In worker 1, start=5000. In worker 2, start=10000, and so on. This start value indicates the number of users. Since there are 54K users, each worker handled 5000 users on average.
# + id="-vNLJxSLB2m8"
pull_edit_histories(
config['outfile'],
os.path.join(config['file_save_path'],config['edit_histories_file_pattern']),
config['users_per_chunk'],
config['earliest_timestamp'],
start=0,
)
# + [markdown] id="K8K_erP24SfD"
# ## Data storage
#
# During ingestion, we stored data in feather format parts. Now, we will combine the data and store in compressed parquet format.
#
# We will also register this data on recochef so that we can easily load it anywhere and also make it reusable for future use cases.
# + id="ZsdBdQU4SlPy"
import os
import yaml
import pandas as pd
from pyarrow import feather
# + id="wXZfzk8rTYX6"
with open('config.yaml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# + id="8k7gkgbdSYrF"
all_histories = []
for fname in os.listdir(config['file_save_path']):
if 'feather' in fname:
all_histories.append(feather.read_feather(os.path.join(config['file_save_path'],fname)))
# + id="gMtXFbWLTpJT"
all_histories = pd.concat(all_histories, ignore_index=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Zg-ZRYUFVW33" executionInfo={"status": "ok", "timestamp": 1625768191836, "user_tz": -330, "elapsed": 415, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="76249d15-9f6e-45ef-8863-41cb6eef25d8"
all_histories.head()
# + colab={"base_uri": "https://localhost:8080/"} id="hU_SbzmmVeG1" executionInfo={"status": "ok", "timestamp": 1625768201872, "user_tz": -330, "elapsed": 634, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1820caac-cb05-4c4a-a62d-064a88338d00"
all_histories.shape
# + colab={"base_uri": "https://localhost:8080/"} id="UYjNXm6bVy2o" executionInfo={"status": "ok", "timestamp": 1625768283454, "user_tz": -330, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="17921817-c2e3-47b5-a75a-4ff45afe8ebc"
all_histories.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="Z6lerT-PVgXn" executionInfo={"status": "ok", "timestamp": 1625768227959, "user_tz": -330, "elapsed": 2973, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b2ba7815-305e-49b5-e527-8591fbcefe16"
all_histories.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="mvU0BTqOVmXs" executionInfo={"status": "ok", "timestamp": 1625768275900, "user_tz": -330, "elapsed": 27586, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="cea875e4-9b28-4a45-f68c-cf6f9fd15562"
all_histories.describe(include=['O'])
# + id="muyaq_HuVsBL"
all_histories.to_parquet('wikirecs.parquet.gzip', compression='gzip')
# + [markdown] id="q_s-Fib4XF31"
# > Note: Data is also registered with [recochef](https://github.com/sparsh-ai/recochef/blob/master/src/recochef/datasets/wikirecs.py) for easy access
# + [markdown] id="hMXlY1JF5Lvi"
# ## EDA and Data cleaning
# + id="LLMOakVK7lZg"
# !git clone https://github.com/sparsh-ai/reco-wikirecs
# %cd /content/reco-wikirecs/
# !pip install -r requirements.txt
# !pip install -q git+https://github.com/sparsh-ai/recochef.git
# + id="xX-hzqMQ5Odd"
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix
from recochef.datasets.wikirecs import WikiRecs
from utils import *
from wiki_pull import *
# + id="X1XNTud2orfP"
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# + [markdown] id="ssPq8Lv5heto"
# ### Data loading
# + id="19-zA66p5Odf"
wikidata = WikiRecs()
# + colab={"base_uri": "https://localhost:8080/"} id="ClFOpx4g5Odf" executionInfo={"status": "ok", "timestamp": 1625770074180, "user_tz": -330, "elapsed": 37009, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0a11cd60-7888-4f05-9a59-e857705c98d4"
df = wikidata.load_interactions()
df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="1xeCAd1jcgvx" executionInfo={"status": "ok", "timestamp": 1625770883423, "user_tz": -330, "elapsed": 654, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2d7289fc-a8bf-4f7a-f6c4-c26f550cfe3d"
df.head()
# + [markdown] id="1Qu0d-0Qhg23"
# ### EDA
# + colab={"base_uri": "https://localhost:8080/", "height": 435} id="KeeLRV8tfvOd" executionInfo={"status": "ok", "timestamp": 1625771142694, "user_tz": -330, "elapsed": 1774, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9b81f205-5de1-4460-a325-531eaf30eda3"
# Look at the distribution of edit counts
edit_counts = df.groupby('USERID').USERID.count().values
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,20000,200))
plt.xlabel('Number of edits by user')
plt.subplot(1,2,2)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,200,1))
plt.xlim([0,200])
plt.xlabel('Number of edits by user')
num_counts = len(edit_counts)
print("Median edit counts: %d" % np.median(edit_counts))
thres = 5
over_thres = np.sum(edit_counts > thres)
print("Number over threshold %d: %d (%.f%%)" % (thres, over_thres, 100*over_thres/num_counts))
# + colab={"base_uri": "https://localhost:8080/"} id="Y36Q_2ZTgeMl" executionInfo={"status": "ok", "timestamp": 1625771206828, "user_tz": -330, "elapsed": 1866, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="2ef045d2-d681-49c6-892b-f9e19fceb0de"
# Most edits by user
df.groupby(['USERID','USERNAME']).USERID.count().sort_values(ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="DEWSXfV9g94l" executionInfo={"status": "ok", "timestamp": 1625771245998, "user_tz": -330, "elapsed": 1418, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3f5c41c6-8c59-40a7-d961-2d43ef9c02ee"
# Find the elbow in number of edits
plt.plot(df.groupby(['USERID','USERNAME']).USERID.count().sort_values(ascending=False).values)
# plt.ylim([0,20000])
# + colab={"base_uri": "https://localhost:8080/"} id="z-Yg51cShHdT" executionInfo={"status": "ok", "timestamp": 1625771301248, "user_tz": -330, "elapsed": 16636, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="b1e55e48-974b-44f2-c224-7c6ea76a7df5"
# What are the most popular pages (edited by the most users)
page_popularity = df.drop_duplicates(subset=['TITLE','USERNAME']).groupby('TITLE').count().USERNAME.sort_values()
page_popularity.iloc[-1000:].iloc[::-1]
# + colab={"base_uri": "https://localhost:8080/"} id="T0p3yQorpFWs" executionInfo={"status": "ok", "timestamp": 1625773347902, "user_tz": -330, "elapsed": 689, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3136f4a0-038c-4722-cf18-8cad5199fd3f"
df.sample().USERNAME
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="f5zITFxcozuU" executionInfo={"status": "ok", "timestamp": 1625773446016, "user_tz": -330, "elapsed": 932, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="92908c8e-2847-4d48-f1f9-3178a3bc5d53"
cols = ['userid', 'user', 'pageid', 'title',
'timestamp', 'sizediff']
oneuser = get_edit_history(user="SanAnMan",
latest_timestamp="2021-07-08T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
oneuser
# + [markdown] id="aF2RyBNxhiXv"
# ### Data cleaning
# + [markdown] id="Qu3Pz_tS-WMQ"
# #### Remove consecutive edits and summarize runs
# + id="7hWZQkiX-WMR" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1625771653795, "user_tz": -330, "elapsed": 64566, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="98ac4ee0-37a6-44cb-d89e-e28d209ba3ee"
# %%time
def remove_consecutive_edits(df):
c = dict(zip(df.columns, range(len(df.columns))))
keyfunc = lambda x: (x[c['USERID']],x[c['ITEMID']])
first_and_last = lambda run: [run[0][c['USERID']],
run[0][c['USERNAME']],
run[0][c['ITEMID']],
run[0][c['TITLE']],
run[-1][c['TIMESTAMP']],
run[0][c['TIMESTAMP']],
sum([abs(r[c['SIZEDIFF']]) for r in run]),
len(run)]
d = df.values.tolist()
return pd.DataFrame([first_and_last(list(g)) for k,g in itertools.groupby(d, key=keyfunc)],
columns=['USERID', 'USER', 'ITEMID', 'TITLE', 'FIRST_TIMESTAMP', 'LAST_TIMESTAMP','SUM_SIZEDIFF','CONSECUTIVE_EDITS'])
clean_df = remove_consecutive_edits(df)
# + [markdown] id="ybEnBJ3p-WMT"
# #### Remove top N most popular pages
# + id="jOEyyMOs-WMU" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1625771697564, "user_tz": -330, "elapsed": 18456, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="372cec11-d432-46f4-bf33-d52962a4a222"
# Get the top most popular pages
TOPN = 20
popularpages = df.drop_duplicates(subset=['TITLE','ITEMID','USERID']).groupby(['TITLE','ITEMID']).count().USERNAME.sort_values()[-TOPN:]
popularpages
# + id="uSz0qP6g-WMV" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1625772016143, "user_tz": -330, "elapsed": 1917, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f8dddfa2-c74d-4216-8492-6b6605b95a86"
# Remove those popular pages
before_count = len(df)
popular_pageids = popularpages.index.get_level_values(level='ITEMID').values
is_popular_page_edit = clean_df.ITEMID.isin(popular_pageids)
clean_df = clean_df.loc[~is_popular_page_edit].copy()
all_histories = None
after_count = len(clean_df)
print("%d edits (%.1f%%) were in top %d popular pages. Length after removing: %d" % (np.sum(is_popular_page_edit),
100* np.sum(is_popular_page_edit)/before_count,
TOPN,
after_count)
)
# + id="YLAq2a7I-WMZ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1625772019859, "user_tz": -330, "elapsed": 687, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="860cc588-196c-4d76-dc7e-67257d66a41a"
print("Number of unique page ids: {}".format(len(clean_df.ITEMID.unique())))
# + [markdown] id="R8hqTBZZ-WMa"
# #### Remove users with too many or too few edits
# + id="WajFz7Bz-WMc"
MIN_EDITS = 5
MAX_EDITS = 10000
# + id="SnbOrhH9-WMd" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1625772042100, "user_tz": -330, "elapsed": 1259, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="17467712-d2cd-4ee8-81fe-73901448d891"
# Get user edit counts
all_user_edit_counts = clean_df.groupby(['USERID','USER']).USERID.count()
# Remove users with too few edits
keep_user = all_user_edit_counts.values >= MIN_EDITS
# Remove users with too many edits
keep_user = keep_user & (all_user_edit_counts.values <= MAX_EDITS)
# Remove users with "bot" in the name
is_bot = ['bot' in username.lower() for username in all_user_edit_counts.index.get_level_values(1).values]
keep_user = keep_user & ~np.array(is_bot)
print("Keep %d users out of %d (%.1f%%)" % (np.sum(keep_user), len(all_user_edit_counts), 100*float(np.sum(keep_user))/len(all_user_edit_counts)))
# + id="nWX-64bD-WMf"
# Remove those users
userids_to_keep = all_user_edit_counts.index.get_level_values(0).values[keep_user]
clean_df = clean_df.loc[clean_df.USERID.isin(userids_to_keep)]
clean_df = clean_df.reset_index(drop=True)
# + id="WbfD8r0w-WMg" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1625772079295, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0c2dc2e6-ab40-4d48-ff4d-4415f45f459a"
print("Length after removing users: {}".format(len(clean_df)))
# + [markdown] id="RjNaQy7Y-WMi"
# ### Build lookup tables
# + id="EGhN2Znk-WMj"
# Page id to title and back
lookup = clean_df.drop_duplicates(subset=['ITEMID']).loc[:,['ITEMID','TITLE']]
p2t = dict(zip(lookup.ITEMID, lookup.TITLE))
t2p = dict(zip(lookup.TITLE, lookup.ITEMID))
# User id to name and back
lookup = clean_df.drop_duplicates(subset=['USERID']).loc[:,['USERID','USER']]
u2n = dict(zip(lookup.USERID, lookup.USER))
n2u = dict(zip(lookup.USER, lookup.USERID))
# + id="lOfK-7xE-WMk"
# Page id and userid to index in cooccurence matrix and back
pageids = np.sort(clean_df.ITEMID.unique())
userids = np.sort(clean_df.USERID.unique())
p2i = {pageid:i for i, pageid in enumerate(pageids)}
u2i = {userid:i for i, userid in enumerate(userids)}
i2p = {v: k for k, v in p2i.items()}
i2u = {v: k for k, v in u2i.items()}
# + id="KOSXJpAr-WMl"
# User name and page title to index and back
n2i = {k:u2i[v] for k, v in n2u.items() if v in u2i}
t2i = {k:p2i[v] for k, v in t2p.items() if v in p2i}
i2n = {v: k for k, v in n2i.items()}
i2t = {v: k for k, v in t2i.items()}
# + [markdown] id="QSrLvJ9W-WMn"
# ### Build test and training set
# + id="ay5BdOLB-WMo"
# Make a test set from the most recent edit by each user
histories_test = clean_df.groupby(['USERID','USER'],as_index=False).first()
# + id="ab8sxNLt-WMo"
# Subtract it from the rest to make the training set
histories_train = dataframe_set_subtract(clean_df, histories_test)
histories_train.reset_index(drop=True, inplace=True)
# + id="ZVWFlnSK-WMp"
# Make a dev set from the second most recent edit by each user
histories_dev = histories_train.groupby(['USERID','USER'],as_index=False).first()
# Subtract it from the rest to make the final training set
histories_train = dataframe_set_subtract(histories_train, histories_dev)
histories_train.reset_index(drop=True, inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="peWz8aTJmNGt" executionInfo={"status": "ok", "timestamp": 1625772590008, "user_tz": -330, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="765d7fb4-2f3f-49cb-a2ad-b6675b7fa2db"
print("Length of test set: {}".format(len(histories_test)))
print("Length of dev set: {}".format(len(histories_dev)))
print("Length of training after removal of test: {}".format(len(histories_train)))
# + id="cYDo1XJM-WMr" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1625772682855, "user_tz": -330, "elapsed": 12712, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="1c7d52b3-b616-4ca8-c810-4e0567a33a9d"
print("Number of pages in training set: {}".format(len(histories_train.ITEMID.unique())))
print("Number of users in training set: {}".format(len(histories_train.USERID.unique())))
print("Number of pages with > 1 user editing: {}".format(np.sum(histories_train.drop_duplicates(subset=['TITLE','USER']).groupby('TITLE').count().USER > 1)))
# + id="ht3O-0DL-WMx" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1625773152503, "user_tz": -330, "elapsed": 4494, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="365a7edf-d948-4739-8705-5c1129ea27fe"
resurface_userids, discovery_userids = get_resurface_discovery(histories_train, histories_dev)
print("%d out of %d userids are resurfaced (%.1f%%)" % (len(resurface_userids), len(userids), 100*float(len(resurface_userids))/len(userids)))
print("%d out of %d userids are discovered (%.1f%%)" % (len(discovery_userids), len(userids), 100*float(len(discovery_userids))/len(userids)))
# + [markdown] id="sgzzNkOxr8Z9"
# ### Build matrix for implicit collaborative filtering
# + id="tKvJEuJNrrs-"
# Get the user/page edit counts
for_implicit = histories_train.groupby(["USERID","ITEMID"]).count().FIRST_TIMESTAMP.reset_index().rename(columns={'FIRST_TIMESTAMP':'edits'})
for_implicit.loc[:,'edits'] = for_implicit.edits.astype(np.int32)
# + id="78pLFLfesDF1"
row = np.array([p2i[p] for p in for_implicit.ITEMID.values])
col = np.array([u2i[u] for u in for_implicit.USERID.values])
implicit_matrix_coo = coo_matrix((for_implicit.edits.values, (row, col)))
implicit_matrix = csc_matrix(implicit_matrix_coo)
# + [markdown] id="2GtDLqdKsx1d"
# ### Saving artifacts
# + id="cX7sQzl_nNx3"
save_pickle((p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t), 'lookup_tables.pickle')
save_pickle((userids, pageids), 'users_and_pages.pickle')
save_pickle((resurface_userids, discovery_userids), 'resurface_discovery_users.pickle')
save_pickle(implicit_matrix,'implicit_matrix.pickle')
| _docs/nbs/wikirecs-01-data-ingestion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GroupBy
#
# "Group by" refers to an implementation of the "split-apply-combine" approach known from [pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html) and [xarray](http://xarray.pydata.org/en/stable/groupby.html).
# Scipp currently supports only a limited number of operations that can be applied.
#
# ## Grouping based on label values
#
# Suppose we have measured data for a number of parameter values, potentially repeating measurements with the same parameter multiple times:
# + tags=[]
import numpy as np
import scipp as sc
np.random.seed(0)
# -
param = sc.Variable(dims=['x'], values=[1,3,1,1,5,3])
values = sc.Variable(dims=['x', 'y'], values=np.random.rand(6,16))
values += 1.0 + param
# If we store this data as a data array we obtain the following plot:
data = sc.DataArray(
values,
coords={
'x': sc.Variable(dims=['x'], values=np.arange(6)),
'y': sc.Variable(dims=['y'], values=np.arange(16))
})
sc.plot(data)
# Note that we chose the "measured" values such that the three distinct values of the underlying parameter are visible.
# We can now use the split-apply-combine mechanism to transform our data into a more useful representation.
# We start by storing the parameter values (or any value to be used for grouping) as a non-dimension coordinate:
data.coords['param'] = param
# Next, we call `scipp.groupby` to split the data and call `mean` on each of the groups:
grouped = sc.groupby(data, group='param').mean('x')
sc.plot(grouped)
# Apart from `mean`, `groupby` also supports `sum`, `concat`, and more. See [GroupByDataArray](../generated/classes/scipp.GroupByDataArray.rst) and [GroupByDataset](../generated/classes/scipp.GroupByDataset.rst) for a full list.
#
# ## Grouping based on binned label values
#
# Grouping based on non-dimension coordinate values (also known as labels) is most useful when labels are strings or integers.
# If labels are floating-point values or cover a wide range, it is more convenient to group values into bins, i.e., all values within certain bounds are mapped into the same group.
# We modify the above example to use a contiuously-valued parameter:
param = sc.Variable(dims=['x'], values=np.random.rand(16))
values = sc.Variable(dims=['x', 'y'], values=np.random.rand(16,16))
values += 1.0 + 5.0*param
data = sc.DataArray(
values,
coords={
'x': sc.Variable(dims=['x'], values=np.arange(16)),
'y': sc.Variable(dims=['y'], values=np.arange(16))
})
sc.plot(data)
# We create a variable defining the desired binning:
bins = sc.Variable(dims=["z"], values=np.linspace(0.0, 1.0, 10))
# As before, we can now use `groupby` and `mean` to transform the data:
data.coords['param'] = param
grouped = sc.groupby(data, group='param', bins=bins).mean('x')
sc.plot(grouped)
# The values in the white rows are `NaN`.
# This is the result of empty bins, which do not have a meaningful mean value.
# Alternatively, grouping can be done based on groups defined as Variables rather than strings. This, however, requires bins to be specified, since bins define the new dimension label.
grouped = sc.groupby(data, group=param, bins=bins).mean('x') # note the lack of quotes around param!
sc.plot(grouped)
# ## Usage examples
#
# ### Filtering a variable using `groupby.copy`
#
# Apart from reduction operations discussed above, `groupby` also supports `copy`, which allows us to extract a group without changes.
# We can use this, e.g., to filter data.
# This can be used for filtering variables:
var = sc.array(dims=['x'], values=np.random.rand(100))
select = var < 0.5 * sc.Unit('')
# We proceed as follows:
#
# 1. Create a helper data array with a dummy coord that will be used to group the data elements.
# 2. Call `groupby`, grouping by the `dummy` coord. Here `select` contains two distinct values, `False` and `True`, so `groupby` returns an object with two groups.
# 2. Pass `1` to `copy` to extract the second group (group indices start at 0) which contains all elements where the dummy coord value is `True`.
# 3. Finally, the `data` property returns only the filtered variable without the temporary coords that were required for `groupby`.
helper = sc.DataArray(var, coords={'dummy':select})
grouped = sc.groupby(helper, group='dummy')
filtered_var = grouped.copy(1).data
filtered_var
# Note that we can also avoid the named helpers `helper` and `grouped` and write:
filtered_var = sc.groupby(sc.DataArray(var, coords={'dummy':select}), group='dummy').copy(1).data
| docs/user-guide/groupby.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style="width:1000 px">
#
# <div style="float:right; width:98 px; height:98px;">
# <img src="https://raw.githubusercontent.com/Unidata/MetPy/master/src/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
# </div>
#
# <h1>Matplotlib Basics</h1>
# <h3>Unidata Python Workshop</h3>
#
# <div style="clear:both"></div>
# </div>
#
# <hr style="height:2px;">
#
# <div style="float:right; width:250 px"><img src="https://matplotlib.org/_static/logo2.png" alt="NumPy Logo" style="height: 150px;"></div>
#
# ### Questions
# 1. How are line plots created using Matplotlib?
# 1. What methods exist to customize the look of these plots?
#
# ### Objectives
# 1. Create a basic line plot.
# 1. Add labels and grid lines to the plot.
# 1. Plot multiple series of data.
# 1. Plot imshow, contour, and filled contour plots.
# ## Plotting with Matplotlib
# Matplotlib is a python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms.
# The first step is to set up our notebook environment so that matplotlib plots appear inline as images:
# %matplotlib inline
# Next we import the matplotlib library's `pyplot` interface; this interface is the simplest way to create new Matplotlib figures. To shorten this long name, we import it as `plt` to keep things short but clear.
import matplotlib.pyplot as plt
import numpy as np
# Now we generate some data to use while experimenting with plotting:
times = np.array([ 93., 96., 99., 102., 105., 108., 111., 114., 117.,
120., 123., 126., 129., 132., 135., 138., 141., 144.,
147., 150., 153., 156., 159., 162.])
temps = np.array([310.7, 308.0, 296.4, 289.5, 288.5, 287.1, 301.1, 308.3,
311.5, 305.1, 295.6, 292.4, 290.4, 289.1, 299.4, 307.9,
316.6, 293.9, 291.2, 289.8, 287.1, 285.8, 303.3, 310.])
# Now we come to two quick lines to create a plot. Matplotlib has two core objects: the `Figure` and the `Axes`. The `Axes` is an individual plot with an x-axis, a y-axis, labels, etc; it has all of the various plotting methods we use. A `Figure` holds one or more `Axes` on which we draw; think of the `Figure` as the level at which things are saved to files (e.g. PNG, SVG)
#
# 
#
# Below the first line asks for a `Figure` 10 inches by 6 inches. We then ask for an `Axes` or subplot on the `Figure`. After that, we call `plot`, with `times` as the data along the x-axis (independant values) and `temps` as the data along the y-axis (the dependant values).
# +
# Create a figure
fig = plt.figure(figsize=(10, 6))
# Ask, out of a 1x1 grid, the first axes.
ax = fig.add_subplot(1, 1, 1)
# Plot times as x-variable and temperatures as y-variable
ax.plot(times, temps)
# -
# From there, we can do things like ask the axis to add labels for x and y:
# +
# Add some labels to the plot
ax.set_xlabel('Time')
ax.set_ylabel('Temperature')
# Prompt the notebook to re-display the figure after we modify it
fig
# -
# We can also add a title to the plot:
# +
ax.set_title('GFS Temperature Forecast', fontdict={'size':16})
fig
# -
# Of course, we can do so much more...
# Set up more temperature data
temps_1000 = np.array([316.0, 316.3, 308.9, 304.0, 302.0, 300.8, 306.2, 309.8,
313.5, 313.3, 308.3, 304.9, 301.0, 299.2, 302.6, 309.0,
311.8, 304.7, 304.6, 301.8, 300.6, 299.9, 306.3, 311.3])
# Here we call `plot` more than once to plot multiple series of temperature on the same plot; when plotting we pass `label` to `plot` to facilitate automatic creation. This is added with the `legend` call. We also add gridlines to the plot using the `grid()` call.
# +
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# Plot two series of data
# The label argument is used when generating a legend.
ax.plot(times, temps, label='Temperature (surface)')
ax.plot(times, temps_1000, label='Temperature (1000 mb)')
# Add labels and title
ax.set_xlabel('Time')
ax.set_ylabel('Temperature')
ax.set_title('Temperature Forecast')
# Add gridlines
ax.grid(True)
# Add a legend to the upper left corner of the plot
ax.legend(loc='upper left')
# -
# We're not restricted to the default look of the plots, but rather we can override style attributes, such as `linestyle` and `color`. `color` can accept a wide array of options for color, such as `red` or `blue` or HTML color codes. Here we use some different shades of red taken from the Tableau color set in matplotlib, by using `tab:red` for color.
# +
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# Specify how our lines should look
ax.plot(times, temps, color='tab:red', label='Temperature (surface)')
ax.plot(times, temps_1000, color='tab:red', linestyle='--',
label='Temperature (isobaric level)')
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Temperature')
ax.set_title('Temperature Forecast')
ax.grid(True)
ax.legend(loc='upper left')
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>:
# <ul>
# <li>Use add_subplot to create two different subplots on the figure.</li>
# <li>Create one subplot for temperature, and one for dewpoint.</li>
# <li>Set the title of each subplot as appropriate.</li>
# <li>Use ax.set_xlim and ax.set_ylim to control the plot boundaries.</li>
# <li><b>BONUS:</b> Experiment with passing sharex and sharey to add_subplot to <a href="https://matplotlib.org/gallery/subplots_axes_and_figures/shared_axis_demo.html#sphx-glr-gallery-subplots-axes-and-figures-shared-axis-demo-py">share plot limits</a></li>
# </ul>
# </div>
# +
# Fake dewpoint data to plot
dewpoint = 0.9 * temps
dewpoint_1000 = 0.9 * temps_1000
# Create the figure
fig = plt.figure(figsize=(10, 6))
# YOUR CODE GOES HERE
# -
# <div class="alert alert-info">
# <b>SOLUTION</b>
# </div>
# # %load solutions/subplots.py
# ## Scatter Plots
# Maybe it doesn't make sense to plot your data as a line plot, but with markers (a scatter plot). We can do this by setting the `linestyle` to none and specifying a marker type, size, color, etc.
# +
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# Specify no line with circle markers
ax.plot(temps, temps_1000, linestyle='None', marker='o', markersize=5)
ax.set_xlabel('Temperature (surface)')
ax.set_ylabel('Temperature (1000 hPa)')
ax.set_title('Temperature Cross Plot')
ax.grid(True)
# -
# You can also use the `scatter` methods, which is slower, but will give you more control, such as being able to color the points individually based upon a third variable.
# +
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# Specify no line with circle markers
ax.scatter(temps, temps_1000)
ax.set_xlabel('Temperature (surface)')
ax.set_ylabel('Temperature (1000 hPa)')
ax.set_title('Temperature Cross Plot')
ax.grid(True)
# -
# <div class="alert alert-success">
# <b>EXERCISE</b>:
# <ul>
# <li>Beginning with our code above, add the `c` keyword argument to the `scatter` call and color the points by the difference between the surface and 1000 hPa temperature.</li>
# <li>Add a 1:1 line to the plot (slope of 1, intercept of zero). Use a black dashed line.</li>
# <li><b>BONUS:</b> Change the color map to be something more appropriate for this plot.</li>
# <li><b>BONUS:</b> Try to add a colorbar to the plot (have a look at the matplotlib documentation for help).</li>
# </ul>
# </div>
# +
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
# YOUR CODE GOES HERE
ax.set_xlabel('Temperature (surface)')
ax.set_ylabel('Temperature (1000 hPa)')
ax.set_title('Temperature Cross Plot')
ax.grid(True)
# -
# <div class="alert alert-info">
# <b>SOLUTION</b>
# </div>
# # %load solutions/color_scatter.py
# ## imshow/contour
# - `imshow` displays the values in an array as colored pixels, similar to a heat map.
# - `contour` creates contours around data.
# - `contourf` creates filled contours around data.
#
# First let's create some fake data to work with - let's use a bivariate normal distribution.
x = y = np.arange(-3.0, 3.0, 0.025)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
# Let's start with a simple imshow plot.
fig, ax = plt.subplots()
im = ax.imshow(Z, interpolation='bilinear', cmap='RdYlGn',
origin='lower', extent=[-3, 3, -3, 3])
# We can also create contours around the data.
fig, ax = plt.subplots()
ax.contour(X, Y, Z)
fig, ax = plt.subplots()
c = ax.contour(X, Y, Z, levels=np.arange(-2, 2, 0.25))
ax.clabel(c)
fig, ax = plt.subplots()
c = ax.contourf(X, Y, Z)
# <div class="alert alert-success">
# <b>EXERCISE</b>:
# <ul>
# <li>Create a figure using imshow and contour that is a heatmap in the colormap of your choice. Overlay black contours with a 0.5 contour interval.</li>
# </ul>
# </div>
#
# +
# YOUR CODE GOES HERE
# -
# <div class="alert alert-info">
# <b>SOLUTION</b>
# </div>
# +
# # %load solutions/imshow_contour.py
# -
# ## Resources
#
# The goal of this tutorial is to provide an overview of the use of the Matplotlib library. It covers creating simple line plots, but it is by no means comprehensive. For more information, try looking at the:
# - [Matplotlib Documentation](http://matplotlib.org)
# - [Matplotlib `plot` documentation](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot)
| pages/workshop/Matplotlib/Matplotlib Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HW2
#
#
# Before submitting your **HTpy-HW2.ipynb** file, perform the following
# * `Kernel>Restart & Run All`
# * Make sure that there are no errors
#
# The following includes problems that need coding and others that only need to perform simple operations (python as a calculator).
from Libraries import thermodynamics as thermo
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as csts
# ## 1 Insulation
#
# This problem is an expansion of PB 3 in `HTpy-HW2.ipynb`
#
# Assuming that the outer surface temperature of the pipe is $150^\circ C$, find an insulation that works (i.e. an insulation that will not melt, for which you can find the price and thermal conductivity online). Derive the necessary equations that allow for determination of the different costs as a function of the thickness. Generate a plot for at least 3 different thicknesses.
#
# Hint: Derive the heat loss for a variable thickness of insulation, then apply this formula for a range of thicknesses. Compare savings of gas vs cost of insulation.
# ### Solution
# Assumptions:
# <ul>
# <li>Heat transfer is steady and 1D</li>
# </ul>
# #define all variables
# d=.1
# r_inner=.05
# r_insulation=[0,0.0127,0.0254,0.0381,0.0508]
# r_outer=[r_inner+x for x in r_insulation]
# print(r_outer)
# +
#import math so we can use log function
import math
#define all variables
d = .1
L = 25
r_inner = .05;
r_insulation = [0,0.0127,0.0254,0.0381,0.0508]
r_outer = [r_inner+x for x in r_insulation]
pi = 3.1415;
A = [2*pi*L*x for x in r_outer]
emissivity = 0.8
h = 10
T_inf = 25+273.15
T_sur = 25+273.15
sbc = 5.67*(10**-8)
k = .056
T_pipe = 150+273.15
#
from Libraries import HT_thermal_resistance as res
Rth = [];
Rth.append(res.Resistance("$R'_{cond,2}$",'W/m'))
Rth.append(res.Resistance("$R'_{conv,o}$",'W/m'))
Rth.append(res.Resistance("$R'_{rad,o}$",'W/m'))
#define the thermal resistances
R_rad=[1/(emissivity*sbc*(T_sur+T_inf)*(T_sur**2+T_inf**2)*x) for x in A]
R_conv=[1/(h*x) for x in A]
R_cond=[math.log(x/r_inner)/2*pi*k*L for x in r_outer]
import numpy as np
vector1 = np.array(R_rad)
vector2 = np.array(R_conv)
vector3 = np.array(R_cond)
R_eq =np.reciprocal(np.reciprocal(vector1)+np.reciprocal(vector2)) + vector3
q=(T_inf-T_pipe)/R_eq
insulation_cost=[0,16.25,18.50,25.72,36.94]
costofheat=-0.02*10**-6
efficiency=0.9
heatloss_cost=q*costofheat/efficiency
heatloss_cost_per_year=heatloss_cost*31536000
total_insulation_cost=[28*x for x in insulation_cost]
yearly=np.array(heatloss_cost_per_year)
initial=np.array(total_insulation_cost)
pairs=np.vstack((initial,yearly)).T
print(pairs)
#graph
time = np.linspace(0,20,5)
#series
legend_title = (heatloss_cost_per_year*time)+(total_insulation_cost[0])+20
width1 = (pairs[1][0]+pairs[1][1]*time)
width2 = (pairs[2][0]+pairs[2][1]*time)
width3 = (pairs[3][0]+pairs[3][1]*time)
width4 = (pairs[4][0]+pairs[4][1]*time)
plt.figure(figsize=(6,4), dpi=100)
#plot series
plt.plot(time,legend_title,color='white', label='Insulation Width (In.)')
plt.plot(time,width1, lw=2, label='0.5')
plt.plot(time,width2, lw=2, label='1')
plt.plot(time,width3, lw=2, label='1.5')
plt.plot(time,width4, lw=2, label='2')
plt.title('Total Cost Over 20 Years')
plt.xlabel('Time (years)')
plt.ylabel('Total Cost (USD)')
plt.legend()
plt.show()
# +
import schemdraw as schem
import schemdraw.elements as e
# Create a new circuit
d = schem.Drawing()
L1=d.add( e.LINE, d = 'right')
d.labelI(L1,'q')
#create a dot for inside temperature
d.add( e.DOT, label='$T_{\infty,i}$')
#create the first resistance
R0 = d.add( e.RES, d='right', label=Rth[0].name )
d.add( e.DOT, label='$T_1$')
d.push()
d.add(e.LINE, l = 1.5, d = 'up')
R3 = d.add( e.RES, d='right', label=Rth[1].name )
d.add( e.DOT, label='$T_{\infty,o}$')
d.pop()
d.add(e.LINE, d='down', l = 1.5)
R4 = d.add( e.RES, d='right', label=Rth[2].name )
d.add( e.DOT, label='$T_{\infty,o}$')
#L1 = d.add(e.LINE, toplabel = "$q'$", endpts = [[-0.25, 0], [-2.25, 0]])
#d.labelI(L1, arrowofst = 0)
d.draw()
# -
# ## 2 Window
#
# Set up equations and apply realistic numerical values to them to discuss heat losses of a single pane window, a single pane window with storm window and a double paned window with air trapped at a vacuum of $10^{-3} \mathrm{torr}$ in a gap of $5\mathrm{mm}$. Do not consider the effects of radiation for any of the window.
#
# <img src="ThermalCondAir.png" alt="my awesome sketch" width=100% >
# ### Solution
# Assumptions:
# <ul>
# <li>Heat transfer is steady and 1D</li>
# <li>No radiation</li>
# <li>Air in storm window is stagnant</li>
# </ul>
#
# DISCUSSION: As we can see, the heat flux through the single pane window was by far the largest. This is unsuprising, as the heat has the fewest barriers to transfer through on its way outside. The storm window had a very small rate of heat transfer because the air inside was very stagnant, meaning that there was no convection, and the heat had to be conducted through the air. The heat flux through the window with the vacuum between the panes had a somewhat comparable rate of heat transfer to the storm window. However, the gap between the panes of the vacuumed window was about 5% as large as it was for the storm window, meaning that the vacuum caused the interpane spacing to be less conductive.
# +
h = 20 #Convection coeff, (free convection outdoors), in W/(m^2 K)
k_s = .026 #Conduction coeff for stagnant air at 1 atm, T = ~ 16-18 C, in W/(m K)
k_v = .0015 #Convection coeff for air at 10^-3 torr (approx.)
L_s = .1 #pane spacing for storm window, in meters
L_v = .005 #pane spacing for vacuum window, in meters
T_w = thermo.C2K(18) #window temperature, in kelvin
T_inf = thermo.C2K(5) #outdoor temperature in kelvin
#single pane window
q_sp = h*(T_inf - T_w) #all results in W/m^2
#storm window
q_sw = (1/(L_s/k_s + 1/h))*(T_inf - T_w)
#vacuum window
q_vw = (1/(L_v/k_v + 1/h))*(T_inf - T_w)
print('The heat flux through a single pane window is:', q_sp, 'W/M^2 \n')
print('The heat flux through a storm window is:', q_sw, 'W/M^2 \n')
print('The heat flux through a vacuum window is:', q_vw, 'W/M^2 \n')
# -
| HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/HW 2 Assignment_Team 8_attempt_2021-03-01-23-54-36_HTpy-HW2-Final-Team8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 3_Convert_ONNX_to_TensorRT
#
# This notebook will convert ONNX model to TensorRT serialized engine.
#
# The batch size is choosen to be 1.
# Conversion from ONNX to TensorRT would use the ONNXClassifierWrapper from Nvidia TensorRT repository.
#
# https://github.com/NVIDIA/TensorRT/tree/master/quickstart/IntroNotebooks
#
# Please see onnx_helper.py.
# Important modification to note is, explicit batch must be set.
#
# **explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)**
#
# We can also use the trtexec command to convert.
#
# trtexec --onnx=[ONNX file name.onnx] --saveEngine=[TensorRT engine.trt] --explicitBatch
#
# ### Jetson Nano Specific Information:
# In Jetson Nano, during the conversion, RAM might run out.
# Hence please set up swap space in the device.
#
# Please refer to the setup instructions [jetson_nano_setup_instructions.md] in this repository.
#Import packages
import numpy as np
from onnx_helper import ONNXClassifierWrapper,convert_onnx_to_engine
import torch
import json
from PIL import Image
from torchvision import transforms
#Set constants
BATCH_SIZE=1
N_CLASSES=1000
PRECISION=np.float32
image_size=224
#PyTorch transform
tfms=transforms.Compose([transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
# +
img=Image.open('panda.jpg')
img=tfms(img)#.unsqueeze(0)
img=torch.unsqueeze(img, 0)
print("Img shape",img.shape)
BATCH_SIZE=1
dummy_batch=np.zeros((BATCH_SIZE,3,224,224))
for idx in range(BATCH_SIZE):
dummy_batch[idx]=img
print(dummy_batch.shape)
# -
ONNX_PATH='models/efficientnetb2_batch1.onnx'
BATCH_SIZE=1
TRT_PATH='models/efficientnetb2_batch1.trt'
trt_engine=convert_onnx_to_engine(ONNX_PATH, TRT_PATH)
| 3_Convert_ONNX_to_TensorRT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Don't forget to delete the hdmi_out and hdmi_in when finished
# # RGB Filter Example
# In this notebook, we will explore the colors that are used to create an image. Although humans are able to see a multitude of colors, our eyes are technically only capable of detecting red, green, and blue. Every other color that we see is a composition of these three primary colors. Black is the absense of any color while white is the combination of all colors.
#
# <img src="data/AdditiveColor.png"/>
# *This diagram shows how colors are added together to create new colors.*
#
# This notebook will use a video filter that will allow for the addition and removal of colors from a live video feed. This will help show how colors are formed from the three primary colors: red, green, and blue.
# #### 1. Download base overlay to the board
# Ensure that the camera is not connected to the board. Run the following script to provide the PYNQ with its base overlay.
# +
from pynq.drivers.video import HDMI
from pynq import Bitstream_Part
from pynq.board import Register
from pynq import Overlay
Overlay("demo.bit").download()
# -
# #### 2. Connect camera
# Physically connect the camera to the HDMI-in port of the PYNQ. Run the following code to instruct the PYNQ to capture the video from the camera and to begin streaming video to your monitor (connected to the HDMI-out port).
hdmi_in = HDMI('in')
hdmi_out = HDMI('out', frame_list=hdmi_in.frame_list)
hdmi_out.mode(2)
hdmi_out.start()
hdmi_in.start()
# #### 3. Program board with RGB Filter
# Run the following script to download the RGB Filter to the PYNQ. This will allow us to modify the colors of the video stream.
Bitstream_Part("rgb_p.bit").download()
# #### 4. Create a user interface
# We will communicate with the filter using a nice user interface. Run the following code to activate that interface.
# +
import ipywidgets as widgets
from ipywidgets import Button, HBox, VBox, Label
words = ['HDMI Reset']
items = [Button(description=w) for w in words]
def on_hdmi_clicked(b):
hdmi_out.stop()
hdmi_in.stop()
hdmi_out.start()
hdmi_in.start()
R0=Register(0)
R1=Register(1)
R2=Register(2)
R0.write(255)
R1.write(255)
R2.write(255)
R0_s = widgets.IntSlider(
value=255,
min=0,
max=255,
step=1,
description='Red:',
disabled=False,
continuous_update=True,
orientation='vertical',
readout=True,
readout_format='i',
slider_color='red'
)
R1_s = widgets.IntSlider(
value=255,
min=0,
max=255,
step=1,
description='Green:',
disabled=False,
continuous_update=True,
orientation='vertical',
readout=True,
readout_format='i',
slider_color='green'
)
R2_s = widgets.IntSlider(
value=255,
min=0,
max=255,
step=1,
description='Blue:',
disabled=False,
continuous_update=True,
orientation='vertical',
readout=True,
readout_format='i',
slider_color='blue'
)
def update_r0(*args):
R0.write(R0_s.value)
R0_s.observe(update_r0, 'value')
def update_r1(*args):
R1.write(R1_s.value)
R1_s.observe(update_r1, 'value')
def update_r2(*args):
R2.write(R2_s.value)
R2_s.observe(update_r2, 'value')
items[0].on_click(on_hdmi_clicked)
widgets.HBox([VBox([items[0]]),R0_s,R1_s,R2_s])
# -
# #### 5. Exploration
# Feel free to play with the sliders above. As the slider decreases in value, the color associated with that slider will be removed from the video. Likewise, increasing the slider value will add color back into the image.
#
# Notice that when all sliders are reduced to 0 that the image is black. Now, increase the red slider. The image should only include various shades of red. Add green into the image. The video should now include shades of red and green, but also yellow! This is because yellow is the combination of red and green.
# #### 6. Clean up
# When you are done playing with the RGB filter, run the following code to stop the video stream
hdmi_out.stop()
hdmi_in.stop()
del hdmi_out
del hdmi_in
| Pynq-Z1/notebooks/Video_PR/RGB_Filter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
import json
# training_data = {'params.json': [], "progress.csv": []}
df_list = []
for root, dirs, files in os.walk('/Users/alexanderkell/Documents/PhD/Projects/18-battery-optimisation/data/models/new'):
for name in files:
file = "{}/{}".format(root,name)
if name.endswith(("csv")):
d = pd.read_csv(file)
d['battery_size'] = root.split("battery_size=")[1].split("_")[0]
# d['lr'] = root.split(",lr=")[1].split("_")[0]
# training_data['progress.csv'].append(d)
df_list.append(d)
training = pd.concat(df_list)
training.head()
training['battery_size'] = pd.to_numeric(training['battery_size'])
training.to_csv("/Users/alexanderkell/Documents/PhD/Projects/18-battery-optimisation/data/results/training/training_results.csv")
sns.lineplot(data=training, x="training_iteration", y='episode_reward_mean', hue="battery_size")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel("Total training iterations")
plt.ylabel("Mean episode reward")
| notebooks/results/4.0-ajmk-visualise-training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/deansadang/Linear-Algebra-58020/blob/main/Application_of_Linear_Systems.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="ItPRAyW1o0Vy" outputId="6add354b-04da-4aa3-809b-e516507beec5"
import numpy as np
from scipy.linalg import solve
A = np.array([[4,5],[3,-2]])
print(A)
B = np.array([[7],[11]])
print(B)
X=np.linalg.inv(A).dot(B)
print(X)
# + colab={"base_uri": "https://localhost:8080/"} id="l1sYAMS_o1jd" outputId="3f32891b-39a2-4394-cfc5-f68d4d7c9238"
inv_A = np.linalg.inv(A)
print(inv_A)
X = np.dot(inv_A,B)
print(X)
# + colab={"base_uri": "https://localhost:8080/"} id="wPKoUxAMo3gL" outputId="aab5f0da-805e-4a64-96c0-76a223b5362b"
X =np.linalg.solve(A,B)
print(X)
# + colab={"base_uri": "https://localhost:8080/"} id="1L049ZHxo7Y6" outputId="9cb08961-e172-4bf7-d5da-a32169b7cfdf"
from scipy.linalg import solve
X = solve(A,B)
print(X)
| Application_of_Linear_Systems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_csv('../weatherHistory.csv')
df.head()
df.shape
df.dtypes
df.describe()
df.info()
dfnulls = df.isnull()
for column in dfnulls.columns.values.tolist():
print(column)
print(dfnulls[column].value_counts(), '\n')
df.isna().sum()
df['Precip Type'].value_counts()
df['Precip Type'].replace(np.nan, "unknow", inplace=True)
df.head()
df['Loud Cover'].value_counts()
df.drop(['Loud Cover'], axis=1, inplace=True)
df.head()
df['Wind Speed (km/h)'] = df['Wind Speed (km/h)']/df['Wind Speed (km/h)'].max()
df['Wind Bearing (degrees)'] = df['Wind Bearing (degrees)']/df['Wind Bearing (degrees)'].max()
df['Visibility (km)'] = df['Visibility (km)']/df['Visibility (km)'].max()
df['Pressure (millibars)'] = df['Pressure (millibars)']/df['Pressure (millibars)'].max()
dfnorm = df[['Wind Speed (km/h)', 'Wind Bearing (degrees)', 'Visibility (km)', 'Pressure (millibars)']]
df['Wind Speed (km/h)']
#OHE
#ONE HOT ENCODER
dummy = pd.get_dummies(df['Precip Type'])
dummy.value_counts()
dummy.rename(columns={'rain':'precip-type-rain', 'snow':'precipt-type-snow', 'unkwnow':'precip-type-unknow'}, inplace=True)
dummy
df = pd.concat([df, dummy], axis=1)
df.corr()
df.corr()
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
df[['Humidity', 'Temperature (C)']].corr()
sns.regplot(x='Humidity', y='Temperature (C)', data=df)
plt.ylim(0,)
dfSample = df.sample(1000)
sns.regplot(x='Humidity', y='Temperature (C)', data=dfSample)
plt.ylim(0,)
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm
X = df[['Humidity']]
Y = df['Temperature (C)']
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.30) #holdout
lm.fit(X_train, Y_train)
lm.coef_
lm.score(X_train, Y_train)
y_pred = lm.predict(X_test)
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
# +
# y_pred, y_test
print('R2:', r2_score(Y_test, y_pred))
print('MAE: ', mean_absolute_error(Y_test, y_pred))
print('MSE:', mean_squared_error(Y_test, y_pred))
# +
# RMSE
| desafio-09-03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <span style="font-family:Papyrus; font-size:3em;" >Estimating Parameter Confidence Intervals With Bootstrapping</span>
# This notebook demonstrates the calculations required to do confidence interval constructions.
# 1. Construct a good model. This means checking that we get good $R^2$ values (or other model quality metrics) for each fold in a cross validation.
# 1. Compute residuals for the good model.
# 1. Construct a collection of parameter estimates. That is, for many repetitions
# 1. Construct new observations (by using randomly selected residuals)
# 1. Estimate parameter values
# 1. Construct the confidence interval
# # Programming Preliminaries
IS_COLAB = False
#
if IS_COLAB:
# !pip install matplotlib
# !pip install numpy
# !pip install tellurium
# !pip install SBstoat
#
# Constants for standalone notebook
if not IS_COLAB:
CODE_DIRS = ["/home/ubuntu/advancing-biomedical-models/common"]
CODE_DIRS.append("/home/ubuntu/SBstoat")
DATA_DIR = "/home/ubuntu/advancing-biomedical-models/lecture_12"
else:
from google.colab import drive
drive.mount('/content/drive')
CODE_DIRS = ["/content/drive/MyDrive/Winter 2021/common"]
DATA_DIR = "/content/drive/MyDrive/Modeling_Class/Lecture Notes/12_lecture"
import sys
for path in CODE_DIRS:
sys.path.insert(0, path)
# Other constants
TIME = "time"
# %matplotlib inline
import numpy as np
import lmfit # Fitting lib
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import tellurium as te
from SBstoat.modelFitter import ModelFitter
from SBstoat.observationSynthesizer import ObservationSynthesizerRandomizedResiduals, ObservationSynthesizerRandomErrors
from SBstoat.namedTimeseries import NamedTimeseries
# # Model and Data
# Model used in this example
MODEL = """
A -> B; k1*A
B -> C; k2*B
A = 5;
B = 0;
C = 0;
k1 = 0.1
k2 = 0.2
"""
PARAMETERS_TO_FIT = ["k1", "k2"]
# Globals
NUM_POINT = 30
SIM_TIME= 30
NOISE_STD = 0.3
rr = te.loada(MODEL)
fittedData = rr.simulate(0, SIM_TIME, NUM_POINT)
fittedTS = NamedTimeseries(namedArray=fittedData)
synthesizer = ObservationSynthesizerRandomErrors(fittedTS=fittedTS, std=NOISE_STD)
OBS_TS = synthesizer.calculate()
OBS_TS
# +
def plotData(observedTS):
"""
Creates a scatter plot of the observed data.
Parameters
----------
observedTS: NamedTimeseries
Returns
-------
"""
figure, ax = plt.subplots(1)
for column in observedTS.colnames:
_ = ax.scatter(observedTS[TIME], observedTS[column])
_ = plt.legend(observedTS.colnames)
return figure
# Tests
figure = plotData(OBS_TS)
assert(isinstance(figure, matplotlib.figure.Figure))
_ = figure.clear()
# -
_ = plotData(OBS_TS)
# # Bootstrapping Workflow
# ## Construct a good model.
# First, we need to estimate the parameter values to use in our model. To this end, we do a fit on the full set of data.
FITTER = ModelFitter(MODEL, OBS_TS, parametersToFit=["k1", "k2"])
FITTER.fitModel()
FITTED_TS = FITTER.fittedTS
PARAMETER_ESTIMATES = FITTER.params
PARAMETER_ESTIMATES
print(FITTER.reportFit())
FITTER.plotFitAll()
# **Question**
# 1. What are the parameter estimates if the NOSE_STD is 0.8?
# ## Compute the Residuals
# Residuals need to be calculated by chemical species since they may be in very different units.
# Note that the residuals for the chemical species differ. Compare the residuals for A (1st col) with
# the residuals for C (3rd col)
columns = FITTED_TS.colnames
RESIDUALS_TS = OBS_TS.copy() # Create the shape of the residuals
RESIDUALS_TS[columns] -= FITTED_TS[columns]
RESIDUALS_TS
# The standard deviation of the residuals should be approximately the same as the standard deviation
# of the random noise we injected in the construction of the observations.
np.std(RESIDUALS_TS.flatten())
# A validation of our codes and workflow is that the standard deviation of the residuals is close to the standard deviation of the noise used in the observational data.
# ## Construct a Collection of Parameter Estimates
# ### Step 3a: Construct Synthetic Observations
# We define a function that constructs a set of observations from residuals and a model.
synthesizer = ObservationSynthesizerRandomizedResiduals(fittedTS=FITTED_TS,
observedTS=OBS_TS)
synthesizer.calculate()
size = 3
fig, axes = plt.subplots(size)
for idx in range(size):
ax = axes[idx]
residualsTS = synthesizer.calculate()
xvalues = residualsTS["time"]
for col in residualsTS.colnames:
ax.scatter(xvalues, residualsTS[col])
# ### Repeatedly estimate parameter values
# +
def estimateParameters(model, observedTS, parametersToFit, numIteration=10):
estimateDct = {p: [] for p in parametersToFit}
# Get the initial estimates
fitter = ModelFitter(model, observedTS, parametersToFit=parametersToFit)
fitter.fitModel()
initialParams = fitter.params
# Create the observation synthesizer
synthesizer = ObservationSynthesizerRandomizedResiduals(fittedTS=fitter.fittedTS, observedTS=observedTS)
# Iterative calculate parameters
for _ in range(numIteration):
newObservedTS = synthesizer.calculate()
newFitter = ModelFitter(model, newObservedTS, parametersToFit=parametersToFit)
newFitter.fitModel(params=initialParams)
[estimateDct[p].append(newFitter.params.valuesdict()[p]) for p in parametersToFit]
return estimateDct
# Tests
numIteration = 5
estimateDct = estimateParameters(MODEL, OBS_TS, PARAMETERS_TO_FIT, numIteration=numIteration)
assert(len(estimateDct[PARAMETERS_TO_FIT[0]]) == len(estimateDct[PARAMETERS_TO_FIT[1]]))
assert(len(estimateDct[PARAMETERS_TO_FIT[0]]) == numIteration)
# -
_ = plt.hist(ESTIMATE_DCT["k1"],bins=40)
_ = plt.hist(ESTIMATE_DCT["k2"],bins=40)
# **Questions**
# 1. How do the quality of the parameter estimates change with the number of simulation points? With the simulation time?
# ## Compute Confidence Intervals
np.quantile(range(10), [.10, .90])
# **Question**
# 1. Calculate 95% confidence intervals for the parameters.
# 1. How many iterations should you use?
# ### Solution
# Solution
for parameter in estimateDct.keys():
quantile = np.quantile(estimateDct[parameter], [0.025, 0.975])
print("%s: %s" % (parameter, str(quantile)))
# **Questions**
# 1. Why does ``k1`` have such a wide confidence interval?
# # Exercise
#
# TRUE MODEL:
#
# - A -> B
# - A -> C
# - B + C -> D
#
# All kinetics are mass action. The kinetics constants are (in order of the reactions): 0.5, 0.5, 1.0. The initial concentration of A is 5. Consider a time course of duration 30 with 20 points.
#
#
# 1. Generate synthetic observations using this model using a normally distributed noise with a standard deviation
# of 0.1.
# 1. Using the true model (the one above), find the $R^2$ values in a cross validation with 4 folds.
# 1. Construct confidence intervals for the parameters.
# Model used in this example
NEW_MODEL = """
$A -> B; k1*$A
$A -> C; k2*$A
C -> D; k3*C
D + B -> $E; k4*D*B
A = 5;
B = 10;
C = 8;
D = 4;
E = 2
k1 = 0.5
k2 = 0.1
k3 = 1.3;
k4 = 1;
"""
# Solution: Constants
NEW_NUM_POINT = 100
NEW_END_TIME = 5
NEW_NOISE_STD = 0.4
# Plot of the ground truth model
fittedTS = ModelFitter.runSimulation(roadrunner=NEW_MODEL, numPoint=NEW_NUM_POINT, endTime=NEW_END_TIME,
returnDataFrame=False)
synthesizer = ObservationSynthesizerRandomErrors(fittedTS=fittedTS, std=0.0)
_ = plotData(synthesizer.calculate())
# We are going to estimate the parameters ``k1``, ``k2``, ``k3``, and the constant value of ``A``.
NEW_PARAMETERS_TO_FIT = ["k1", "k2", "k3", "A"]
# ## Generate randomized observations
# **Question**
# 1. Generate data with 100 points over 30 s and a stardard deviation of 0.4.
# +
# Solution
fittedTS = ModelFitter.runSimulation(roadrunner=NEW_MODEL, numPoint=NEW_NUM_POINT, endTime=NEW_END_TIME,
returnDataFrame=False)
synthesizer = ObservationSynthesizerRandomErrors(fittedTS=fittedTS, std=NEW_NOISE_STD)
NEW_OBS_TS = synthesizer.calculate()
_ = plotData(NEW_OBS_TS)
# -
# **Questions**
# 1. For what part of these data will it be difficult to estimate the dynamics and therefore the kinetics constants?
# ## Construct a good fit of the model
# **Question**
# 1. Find a good fit for the model.
# 1. How good are the estimates relative to ground truth? If there are bad estimates, explain why.
# 1. Plot the model using the estimated values.
# Solution
NEW_FITTER = ModelFitter(NEW_MODEL, NEW_OBS_TS, parametersToFit=NEW_PARAMETERS_TO_FIT)
NEW_FITTER.fitModel()
NEW_FITTED_TS = NEW_FITTER.fittedTS
NEW_PARAMETER_ESTIMATES = NEW_FITTER.params
print(NEW_FITTER.reportFit())
fittedTS = ModelFitter.runSimulation(roadrunner=NEW_MODEL, numPoint=NEW_NUM_POINT, endTime=NEW_END_TIME,
returnDataFrame=False, parameters=NEW_PARAMETER_ESTIMATES)
_ = plotData(fittedTS)
# ## Estimate the residuals
# ## Construct bootstrap estimates
# # Bootstrapping With SBstoat
NEW_FITTER = ModelFitter(NEW_MODEL, NEW_OBS_TS, parametersToFit=NEW_PARAMETERS_TO_FIT)
NEW_FITTER.fitModel()
NEW_FITTER.bootstrap(numIteration=500, reportInterval=100)
print(NEW_FITTER.reportBootstrap())
| archived_lectures/Winter 2021/lecture_12/bootstrapping/12_stimating_Parameter_Confidence_Intervals_With_Bootstrapping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Window Functions
#
# Spark also supports window functions for aggregations. Window functions allow more complex aggregations like sliding windows or ranking, where for each row a set of 'surrounding' rows are used for calculating an additional metric.
#
# In this example, we will use the weather data and add a sliding average temparature to the existing columns. The result DataFrame shall have both metrics: The actual temperature (as stored in the original records) and an averaged value.
# # 1 General Preparations
#
# First we enable Matplot inline graphics and set the base location of all data
# %matplotlib inline
storageLocation = "s3://dimajix-training/data/weather"
# # 2 Loading Data
#
# Again we load data for the single year 2003 from S3 (or whatever storage location is used)
# +
from pyspark.sql.functions import *
from pyspark.sql.types import *
rawWeatherData = spark.read.text(storageLocation + "/2003")
weatherData = rawWeatherData.select(
substring(col("value"), 5, 6).alias("usaf"),
substring(col("value"), 11, 5).alias("wban"),
to_timestamp(substring(col("value"), 16, 12), "yyyyMMddHHmm").alias("timestamp"),
to_timestamp(substring(col("value"), 16, 12), "yyyyMMddHHmm")
.cast("long")
.alias("ts"),
substring(col("value"), 42, 5).alias("report_type"),
substring(col("value"), 61, 3).alias("wind_direction"),
substring(col("value"), 64, 1).alias("wind_direction_qual"),
substring(col("value"), 65, 1).alias("wind_observation"),
(substring(col("value"), 66, 4).cast("float") / lit(10.0)).alias("wind_speed"),
substring(col("value"), 70, 1).alias("wind_speed_qual"),
(substring(col("value"), 88, 5).cast("float") / lit(10.0)).alias("air_temperature"),
substring(col("value"), 93, 1).alias("air_temperature_qual"),
)
# -
# Make the weather data available as a temporary view
weather_all = weatherData.cache()
weather_all.createOrReplaceTempView("weather_all")
# Peek inside the data, just to make sure everything looks right
spark.sql(
"""
SELECT * FROM weather_all LIMIT 10
"""
).toPandas()
# # 3 Pick a single station
#
# For our first steps, we limit ourselves to a single weather station. We pick one with `usaf='954920'` and `wban='99999'`. This is enough to demonstrate the basic functions of window functions for a sliding average.
weather_single = weatherData.where("usaf='954920' and wban='99999'").cache()
weather_single.createOrReplaceTempView("weather_single")
spark.sql(
"""
SELECT * FROM weather_single LIMIT 10
"""
).toPandas()
# # 4 Sliding Average
#
# Now we want to calculate the sliding average of the temperature as an additional metric. First we use SQL for that task, later we will see how to use the DataFrame API for performing the same task.
#
# In order to perform a windowed aggregation, you use the following syntax to specify a column expression:
# ```
# AGGREGATE_FUNCTION(columns) OVER(window_specification)
# ```
# The term `window_specification` is constructed from the following components:
# ```
# PARTITION BY category
# ORDER BY ordering_column [ASC|DESC]
# RANGE BETWEEN start PRECEEDING AND end FOLLOWING
# ```
#
# * `PARTITION BY` works similar to a `GROUP BY` operation. It controls which rows will be in the same partition with the given row. Also, the user might want to make sure all rows having the same value for the category column are collected to the same machine before ordering and calculating the frame. If no partitioning specification is given, then all data must be collected to a single machine. - it filters records which are used for creating each window
# * `ORDER BY` sorts all records of a single window accordingly. It controls the way that rows in a partition are ordered, determining the position of the given row in its partition.
# * `RANGE BETWEEN` states which rows will be included in the frame for the current input row, based on their relative position to the current row. For example, “the three rows preceding the current row to the current row” describes a frame including the current input row and three rows appearing before the current row.
#
# ### Frame Types
#
# As an alternative to `RANGE BETWEEN` there is also `ROWS BETWEEN`. While `RANGE BETWEEN` refers to the values of the sorting column, `ROWS BETWEEN` simply counts the number of rows. Both window types have their use: `RANGE BETWEEN` is perfect for sliding averages over time windows of constant duration, while `ROWS BETWEEN` is useful for ordered entries lacking a proper arithmetic scale.
#
# ### Boundaries
# Both frame types (range and rows) support different boundary types:
# * `UNBOUNDED PRECEDING`
# * `UNBOUNDED FOLLOWING`
# * `CURRENT ROW`
# * `<value> PRECEDING`
# * `<value> FOLLOWING`
# ## 4.1 Sliding average calculation
# +
result = spark.sql(
"""
SELECT
*,
CASE WHEN air_temperature_qual = 1 THEN air_temperature END AS temp,
AVG(CASE WHEN air_temperature_qual = 1 THEN air_temperature END)
OVER (ORDER BY ts ASC RANGE BETWEEN 36000 PRECEDING AND 36000 FOLLOWING) AS avg_temp
FROM
weather_single
ORDER BY ts
LIMIT 300
"""
).toPandas()
result
# -
# ### Draw a picture
#
# In order to verify our approach, let's draw a picture with Matplotlib, which shows the current temperature and the sliding average in a single plot.
result.plot(x='timestamp', y=['temp', 'avg_temp'], figsize=[16, 8])
# ## 4.2 Window Aggregation Functions
#
# We already used simple standard aggregation functions, which are also available without windows. But there are also some special aggregation functions, which were specifically designed to be used with windowed aggregation and cannot be used without a window definition.
#
# These are
#
# Function class | SQL | DataFrame Function | Description
# ---------------|-----|--------------------|-------------
# Ranking functions|rank|ranke|Get rank in window
# |dense_rank|denseRank|
# |percent_rank|percentRank|
# |ntile|ntile|
# |row_number|rowNumber|Get row number in window
# Analytic functions|cume_dist|cumeDist|
# |first_value|first|Pick first value in window
# |last_value|last|Pick last value in window
# |lag|lag|Pick preceeding value
# |lead|lead|Pick following value
# ## 4.3 Exercise: Comparing to pervious day
#
# Another use case for window functions is to compare todays temperature to yesterday at the same time. This can be achived by using the function `FIRST_VALUE` together with an appropriate window with a range from 86400 (number of seconds of one day) preceeding and the current row.
#
# **Exercise**: Create a DataFrame with the columns `timestamp`, `temp` (current temperature) and `prev_temp` (previous temperature) and plot the first 300 records.
# +
result = spark.sql(
"""
SELECT
*,
CASE WHEN air_temperature_qual = 1 THEN air_temperature END AS temp,
FIRST_VALUE(CASE WHEN air_temperature_qual = 1 THEN air_temperature END)
OVER (ORDER BY ts ASC RANGE BETWEEN 86400 PRECEDING AND CURRENT ROW) AS prev_temp
FROM
weather_single
ORDER BY ts
LIMIT 300
"""
).toPandas()
result
# -
# ### Draw a picture
#
# Again, draw a picture of the result.
result.plot(x='timestamp', y=['temp', 'prev_temp'], figsize=[16, 8])
# # 5 DataFrame Window API
#
# In addition to the SQL interface, there is also a direct Python interface for creating windowed aggregations. Let us reformulate the initial sliding window average aggregation using the Spark DataFrame API instead of SQL.
# ## 5.1 Sliding average
# +
from pyspark.sql.window import Window
window_spec = Window.orderBy(weather_single.ts.asc()).rangeBetween(-36000, 36000)
result = (
weather_single.select(
weather_single["ts"],
weather_single["timestamp"],
when(
weather_single["air_temperature_qual"] == 1, weather_single["air_temperature"]
).alias("temp"),
avg(
when(
weather_single["air_temperature_qual"] == 1,
weather_single["air_temperature"],
)
)
.over(window_spec)
.alias("avg_temp"),
)
.orderBy(weather_single['ts'])
.limit(300)
.toPandas()
)
result
# -
# ### Draw a picture
#
# Using Matplotlib, let's make a picture containing the current temperature and the average temperature in a single plot.
result.plot(x='timestamp', y=['temp', 'avg_temp'], figsize=[16, 8])
# ## 5.2 Exercise: Compare temperature to previous day
#
# Now perform the same task as the previous exercise: Make a plot of the current temperature and the one 24h ago using the `first` function. But this time, use the DataFrame API instead of SQL.
# +
from pyspark.sql.window import Window
window_spec = Window.orderBy(weather_single.ts.asc()).rangeBetween(-86400, 0)
result = (
weather_single.select(
weather_single["ts"],
weather_single["timestamp"],
when(
weather_single["air_temperature_qual"] == 1, weather_single["air_temperature"]
).alias("temp"),
first(
when(
weather_single["air_temperature_qual"] == 1,
weather_single["air_temperature"],
)
)
.over(window_spec)
.alias("prev_temp"),
)
.orderBy(weather_single['ts'])
.limit(300)
.toPandas()
)
result
# -
# ### Draw a picture
#
# In order to verify our approach, let's draw a picture with Matplotlib, which shows the current temperature and the previous temperature in a single plot.
result.plot(x='timestamp', y=['temp', 'prev_temp'], figsize=[16, 8])
# ## 5.3 Partitioned Windows
#
# So far we only used windows covering a specific time range. This was good enough, since we were only looking at a single station. But in most cases, you want to perform analyses covering multiple different entitites (different weather stations in this example). In these cases you also need to *partition* the aggregation window, such that only records from the same entity are processed.
#
# Let us calculate the difference of the current temperature to the average of the last day, but this time for all stations at once.
# +
from pyspark.sql.window import Window
window_spec = (
Window.orderBy(weather_all.ts.asc())
.partitionBy(weather_all.usaf, weather_all.wban)
.rangeBetween(-86400, 0)
)
# Common column expression for valid temperature value or NULL otherwise
valid_temp = when(weather_all.air_temperature_qual == 1, weather_all.air_temperature)
result = weather_all.select(
weather_all.usaf,
weather_all.wban,
weather_all.ts,
weather_all.timestamp,
valid_temp.alias("temp"),
(valid_temp - avg(valid_temp).over(window_spec)).alias("temp_avg_diff"),
).orderBy(weather_all.ts)
result.limit(300).toPandas()
# -
# ### Draw a Picture
#
# In order to check the result, we again pick a single station. But this time, we pick it from the final result and not from the input data
pdf = result.where("usaf='954920' and wban='99999'").limit(300).toPandas()
pdf.plot(x='timestamp', y=['temp', 'temp_avg_diff'], figsize=[16, 8])
# ## 5.4 Exercise: Min/Max Change Analysis
#
# Now we want to calculate for every weather station:
# * The maximum upward difference of temperature within 5 days
# * The maximum downward difference of temperature within 5 days
#
# Logically, we want to perform the following steps for every weather station:
# 1. For every measurement, look back five days
# 2. Within these five days, find the minimum and maximum temperature
# 3. Calculate the difference of the current temepature and the minimum and maximum. Store these in `temp_rise` and `temp_fall`
# 4. Calculate the overall maximum of `temp_rise` and `temp_fall` per station for the whole year
# Calculate the number of seconds for five days
one_day = 24 * 60 * 60
five_days = 5 * one_day
five_days
# +
# Create a window, which creates a new partition per weather station and looks back 5 days
window_spec = (
Window.orderBy(weather_all.ts.asc())
.rangeBetween(-five_days, 0)
.partitionBy(weather_all.wban, weather_all.usaf)
)
# Create a column representing a valid temperature or NULL otherwise
valid_temp = when(
weather_all.air_temperature_qual == 1, weather_all.air_temperature
).alias("temp_valid")
# Calculate the difference for each day from the maximum and minimum temperature of the last five days using the window
# The resulting DataFrame should have the following columns:
# timestamp
# usaf
# wban
# temp_rise = valid_temp - min(valid_temp).over(window_spec)
# temp_fall = max(valid_temp).over(window_spec) - valid_temp
weather_rise_fall = weather_all.select(
weather_all.timestamp,
weather_all.usaf,
weather_all.wban,
valid_temp,
(valid_temp - min(valid_temp).over(window_spec)).alias("temp_rise"),
(max(valid_temp).over(window_spec) - valid_temp).alias("temp_fall"),
)
# Calculate the maximum raise and fall for each station for the whole year. This should be done by a simple grouped aggregation
# The groups are determined by the weather station id, which is given by usaf and wban
result = weather_rise_fall.groupBy(weather_all.usaf, weather_all.wban).agg(
max(weather_rise_fall.temp_rise).alias("max_temp_rise"),
max(weather_rise_fall.temp_fall).alias("max_temp_fall"),
)
# Finally show the whole result by converting it to a Pandas DataFrame
result.toPandas()
# -
| spark-training/spark-python/jupyter-advanced-windows/Weather Analysis - Full.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Gradient Descent (Gradienten-Abstieg):
#
# $x = x - \eta \cdot \nabla f(x)$
# $f(x)$ ist die Funktion (z.B. Fehlerfunktion)
# $x$ ist der Parameter (z.B. Gewicht im NN)
#
# $f(x) = 100 \cdot (x_0^2 - x_1)^2 + (x_0 -1)^2$
#
# $f'(x_0) = 2 \cdot (200 \cdot x_0 \cdot (x_0^2 - x_1) + x_0 - 1)$
# $f'(x_1) = -200 \cdot (x_0^2 - x_1)$
# +
# Rosenbrock Funktion
def f(x0, x1):
return 100 * (x0**2 - x1)**2 + (x0 - 1)**2
def f_prime_x0(x0, x1):
return 2 * (200 * x0 * (x0**2 - x1) + x0 - 1)
def f_prime_x1(x0, x1):
return -200 * (x0**2 - x1)
# Globales Minimum bei x = (1, 1)
print("Minimum: ", f(1, 1))
print("Starte bei: ", f(-1, -1))
# +
# #%matplotlib notebook
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure(figsize=(7, 7))
ax = fig.gca(projection='3d')
s = 0.3
X = np.arange(-2, 2.+s, s)
Y = np.arange(-2, 3.+s, s)
#Create the mesh grid(s) for all X/Y combos.
X, Y = np.meshgrid(X, Y)
#Rosenbrock function w/ two parameters using numpy Arrays
Z = f(X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, alpha=0.8)
# Global minimum
ax.scatter(1, 1, f(1, 1), color="red", marker="*", s=200)
# Starting point
ax.scatter(-1, -1, f(-1, -1), color="green", marker="o", s=200)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# +
eta = 1/1000
x0 = -1
x1 = -1
y = f(x0, x0)
stop_conv = 1e-6
stop_div = 1e+6
stop_iter = 1e4
it = 0
downhill_points = []
while y > stop_conv and y < stop_div and it < stop_iter:
x0 = x0 - eta * f_prime_x0(x0, x1)
x1 = x1 - eta * f_prime_x1(x0, x1)
it += 1
fx = f(x0, x1)
if it % 100 == 0:
downhill_points.append([x0, x1])
print("Solution: ", fx)
print("X0 = ", x0)
print("X1 = ", x1)
# +
# #%matplotlib notebook
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure(figsize=(7, 7))
ax = fig.gca(projection='3d')
eps = 0
s = 0.3
X = np.arange(-2, 2.+s, s)
Y = np.arange(-2, 3.+s, s)
#Create the mesh grid(s) for all X/Y combos.
X, Y = np.meshgrid(X, Y)
#Rosenbrock function w/ two parameters using numpy Arrays
Z = f(X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, alpha=0.5)
# Global minimum
ax.scatter(1, 1, f(1, 1)+eps, color="red", marker="*", s=100)
# Starting point
ax.scatter(-1, -1, f(-1, -1)+eps, color="green", marker="o", s=100)
# Plot Updated Points
for (x0, x1) in downhill_points:
ax.scatter(x0, x1, f(x0, x1)+eps, color="green", marker="o", s=50)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# -
for eta in [1e-7, 1e-6, 1e-5, 1e-4, 1e-3]:
x0 = -1
x1 = -1
y = f(x0, x0)
stop_conv = 1e-6
stop_div = 1e+6
stop_iter = 1e4
it = 0
while y > stop_conv and y < stop_div and it < stop_iter:
x0 = x0 - eta * f_prime_x0(x0, x1)
x1 = x1 - eta * f_prime_x1(x0, x1)
it += 1
fx = f(x0, x1)
print("Eta = ", format(eta, 'e'))
print("Solution: ", fx)
print("X0 = ", x0)
print("X1 = ", x1, "\n")
| Chapter12_NN/GradientDescent/GradientDescent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.display import display, HTML
display(HTML("<style>.container {width:100% !important;}</style>"))
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# # queries to SQL nutrient-foods tables
# +
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
import pymysql
engine = create_engine("mysql+pymysql://root:tennis33@localhost/dashcnf?charset=utf8mb4")#, echo=True)
print(engine.table_names())
# -
import pandas as pd
table_name = 'calcium_foods'
sql = "SELECT * from " + table_name
#query = '''
#SELECT * from calcium-foods
#'''
print(sql)
df = pd.read_sql_query(sql, engine)
#conn = engine.connect()
#table_name = 'calcium-foods'
#df = pd.read_sql_query(query, engine )
print(df.head())
df.columns
df.iloc[0]
# # Test LIKE %food name% with CNF
# record num matches, num mismatches, USDA food matches
#
# # Need USDA database in MySQL (link also has CNF in MySQL)
#
# https://github.com/m5n/nutriana.git
# ! git clone https://github.com/m5n/nutriana.git
# ### mysql command
# source file.sql
# +
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
import pymysql
engine_usda = create_engine("mysql+pymysql://root:tennis33@localhost/usda_nndsr")#, echo=True)
print(engine_usda.table_names())
# +
import pandas as pd
# want Long_Desc and NDB_No
sql = "SELECT * from FOOD_DES"
# want NDB_No, Nutr_No, Nutr_Val
sql2 = "SELECT * from NUT_DATA"
# want Nutr_No, NutrDesc, Units
sql3 = "SELECT * from NUTR_DEF"
food_desc_df = pd.read_sql_query(sql, engine_usda)
nut_data_df = pd.read_sql_query(sql2, engine_usda)
nutr_def_df = pd.read_sql_query(sql3, engine_usda)
# -
# find lens
print(len(food_desc_df.index))
print(len(nut_data_df.index))
print(len(nutr_def_df.index))
# +
#CNF mysql
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
import pymysql
engine_cnf = create_engine("mysql+pymysql://root:tennis33@localhost/canadian_nf")#, echo=True)
print(engine_cnf.table_names())
# +
import pandas as pd
# want Long_Desc and NDB_No
sql_cnf = "SELECT * from FOOD_NAME"
cnf_foodname_df = pd.read_sql_query(sql_cnf, engine_cnf)
# +
target = 'Cheese, swiss'
cnf_foodname_df[cnf_foodname_df['FoodDescription'].str.contains(target, regex=False)]
# -
# ## CNF hits 3 times, if miss can send error message "search similar term in input" or also search USDA
#search usda
food_desc_df[food_desc_df['Long_Desc'].str.contains(target)]
# # Search foods by nutrients conclusion
# -connect this search to main search, list nutrients, either by using the default MongoDB CNF if CNF hit, otherwise new search on USDA MySQL
#
mask = food_desc_df['Long_Desc'] == "Cheese, swiss"
food = food_desc_df[mask]
print(type(food))
print(food)
ndb_no=food['NDB_No'].values
print(len(ndb_no), type(ndb_no))
print(str(ndb_no))
print(nut_data_df.columns)
'''
# want NDB_No, Nutr_No, Nutr_Val
sql2 = "SELECT * from NUT_DATA"
# want Nutr_No, NutrDesc, Units
sql3 = "SELECT * from NUTR_DEF"
'''
nut_data_df.head()
print(ndb_no[0])
ndb_no = ndb_no[0]
mask2 = nut_data_df['NDB_No'] == ndb_no
nutrient_rows = nut_data_df[mask2]
#print(type(nutrient_rows))
nut_num_and_val = nutrient_rows.loc[:, 'Nutr_No': 'Nutr_Val']
#nut_num_and_val.head()
# +
# step 3, get the nutrient names and units
#nutr_def_df
# add 2 cols to nut_num_and_val, iterate rows and for each Nutr_No,
# get NutrDesc and units and fill the two cols
# -
| 1_foodsByNutrients/queryNutrientFoodsAndUSDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append('../../pyutils')
import numpy as np
import sklearn.metrics
import torch
import metrics
import revdiff as rd
import utils
np.random.seed(12)
# -
# # Metrics
# ## Binary classification
# +
## Generate random y_true / y_pred
ACC_APPROX = 0.84
ZERO_PROP = 0.65
y_true = (np.random.rand((12745)) > ZERO_PROP).astype(np.long)
y_pred = y_true.copy()
for i in range(len(y_pred)):
if np.random.rand() > ACC_APPROX:
y_pred[i] = not y_true[i]
# -
print(y_pred[:30])
print(y_true[:30])
# ## Accuracy
#
# $$\text{accuracy} = \frac{\text{nb true preditions}}{\text{total predictions}}$$
# Also works for multi-label classification
# +
def accuracy(y_true, y_pred):
return np.average(y_true == y_pred)
print(accuracy(y_true, y_pred))
print(sklearn.metrics.accuracy_score(y_true, y_pred))
# -
# ## Precision, Recall and F1-Score
#
# $tp$ (true positives): number of example with label 1 that are corectly classified.
# $fp$ (false positives): number of example with label 1 that are incorectly classified.
# $tn$ (true negatives): number of example with label 0 that are corectly classified.
# $fn$ (false negatives): number of example with label 0 that are incorectly classified.
#
# $$\text{precision} = \frac{tp}{tp + fp}$$
# $$\text{recall} = \frac{tp}{tp + fn}$$
# $$\text{F1} = 2 * \frac{\text{precision} * \text{recall}}{\text{precision }+ \text{recall}}$$
# +
def precision(y_true, y_pred):
tp = ((y_pred == 1) & (y_true == y_pred)).sum()
p = (y_pred == 1).sum()
return tp / p
def recall(y_true, y_pred):
tp = ((y_pred == 1) & (y_true == y_pred)).sum()
true1 = (y_true == 1).sum()
return tp / true1
def f1_score(y_true, y_pred):
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
return 2 * (p * r) / (p + r)
print(precision(y_true, y_pred))
print(sklearn.metrics.precision_score(y_true, y_pred))
print(recall(y_true, y_pred))
print(sklearn.metrics.recall_score(y_true, y_pred))
print(f1_score(y_true, y_pred))
print(sklearn.metrics.f1_score(y_true, y_pred))
# -
| courses/ml/metrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 10)
import json
# +
with open('nga_lga.json') as json_data:
data = json.load(json_data)
data_subset = data['var_catgry']
df = pd.DataFrame(data_subset)
df.head()
# -
df = df[['value','labl']]
df.head()
labl_split = list(df['labl'].str.split())
for sublist in labl_split:
sublist.pop(0)
labl_split[:10]
for i in np.arange(len(labl_split)):
if len(labl_split[i]) > 1:
labl_split[i] = ' '.join(labl_split[i])
labl_split[:10]
def flatten_list(multilist):
flat_list = []
# Iterate through the outer list
for element in multilist:
if type(element) is list:
# If the element is of type list, iterate through the sublist
for item in element:
flat_list.append(item)
else:
flat_list.append(element)
return flat_list
new_labels = flatten_list(labl_split)
new_labels[:10]
df['lga'] = new_labels
df.head()
df['lga'] = df['lga'].str.title()
df.head()
df.to_csv('nga_lga_labels.csv', index=False)
| src/data_processing/nga_lga_cleanup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="XoS0ZzVRRrsq"
# # Paddy Doctor - Paddy Disease Classification
#
# > "A Multi Class Classification problem, for classifying the diseases in the Paddy Plant"
#
# - toc: true
# - branch: master
# - badges: true
# - comments: true
# - categories: [paddy, plant, disease, classification, multi, class, kaggle, convolutional, neural, network]
# - hide: false
# + id="mZdZ3o8AR_XV"
# Required modules
import os
import cv2
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from zipfile import ZipFile
from matplotlib import pyplot as plt
# + id="oPNZva7RSKUj"
# Config
# %matplotlib inline
plt.rcParams['figure.figsize'] = (12, 12)
# + [markdown] id="SuAyvXST3goH"
# Before running the below cell, upload your kaggle token, to make sure an error doesn't popup.
# + colab={"base_uri": "https://localhost:8080/"} id="1SmGGqHmSKYm" outputId="e1850900-4313-45e9-a6a3-2af5ad69d3a8"
# Create kaggle folder
# !mkdir ~/.kaggle
# !cp kaggle.json ~/.kaggle/
# !chmod 600 ~/.kaggle/kaggle.json
# + colab={"base_uri": "https://localhost:8080/"} id="wYF67PGbSkbB" outputId="cf666fe4-a752-477c-eef9-8aa3bf6fb6fe"
# Test the command
# !kaggle competitions download -c paddy-disease-classification
# + id="MEK4jkJQS0fX"
# Extract the zip file
with ZipFile('/content/paddy-disease-classification.zip', 'r') as zf:
zf.extractall('./')
# + [markdown] id="w5QtnKGG3VZ3"
# ## Loading the data
# + id="WR3HJnXiCWB1"
# Load Constants
img_dim = 256
batch_size = 32
training_dir = './train_images'
testing_dir = './test_images'
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="wbZsjCQ2TBjO" outputId="8ae03160-8bbe-48f6-80b6-6395323e9668"
# Load the data
train = pd.read_csv('train.csv')
train.head()
# + colab={"base_uri": "https://localhost:8080/"} id="c6jC3ulMBO6t" outputId="fc2334cb-35b6-4fb2-9ed6-18e9f11ea106"
# Load the image data
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1/255.,
horizontal_flip=True,
vertical_flip=True,
validation_split=0.2
)
train_generator = datagen.flow_from_directory(
training_dir,
subset='training',
seed=88,
target_size=(img_dim, img_dim),
batch_size=batch_size,
class_mode='categorical'
)
valid_generator = datagen.flow_from_directory(
training_dir,
subset='validation',
seed=88,
target_size=(img_dim, img_dim),
batch_size=batch_size,
class_mode='categorical'
)
# + colab={"base_uri": "https://localhost:8080/"} id="DNBkmG1_BO-K" outputId="bb665a41-4406-4014-8ee6-2f4069fa55bc"
# Class Indices
train_generator.class_indices
# + [markdown] id="7s3tfaGn7yhT"
# ## Modelling
# + [markdown] id="v0ZC8jlf2h-8"
# ### Approach-1
#
#
# + [markdown] id="aGzAVbL-2lEU"
# Use of a Hand picked model, in modelling the problem
# + colab={"base_uri": "https://localhost:8080/"} id="wqB4wkg1DO3b" outputId="e6a22de8-5f9c-4d5f-fb7d-c2e209ae29aa"
# Defining the model
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(256, 256, 3)),
tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax'),
])
model.summary()
# + [markdown] id="Zp9my_0lfcm3"
# ### Approach-2
# + colab={"base_uri": "https://localhost:8080/"} id="bvRHzrkY70qP" outputId="3d59f038-b7df-4ff6-cf44-e0f49c3bd62a"
# Loading the pre-trained model
input_layer = tf.keras.layers.Input(shape=(img_dim, img_dim, 3))
pretrained = tf.keras.applications.mobilenet_v2.MobileNetV2(include_top=False, weights='imagenet', input_tensor=input_layer, classes=10)
for layer in pretrained.layers:
layer.trainable = False
custom_network = tf.keras.layers.Flatten()(pretrained.output)
custom_network = tf.keras.layers.Dense(32, activation='relu')(custom_network)
custom_network = tf.keras.layers.Dense(16, activation='relu')(custom_network)
custom_network = tf.keras.layers.Dense(10, activation='relu')(custom_network)
model = tf.keras.models.Model(inputs=input_layer, outputs=custom_network)
model.summary()
# + id="HyI8synbFH0u"
# Callbacks
model_checkpoint = tf.keras.callbacks.ModelCheckpoint('custom_model_checkpoint.hdf5', save_best_only=True)
# + colab={"base_uri": "https://localhost:8080/"} id="2IcMZXPDMrNW" outputId="1222a4f9-c017-44eb-e159-720cd645eeb8"
# Compile the model
optim = tf.keras.optimizers.Adam(lr=0.001)
loss = tf.keras.losses.categorical_crossentropy
metrics = ['acc']
model.compile(optimizer=optim, loss=loss, metrics=metrics)
# + colab={"base_uri": "https://localhost:8080/"} id="PtQK0ZLRQ7-Y" outputId="14c17d47-49e6-4b2c-d199-229f71b00e48"
# Fitting the model
epochs = 15
batch_size = 64
model.fit(train_generator, validation_data=valid_generator, epochs=epochs, batch_size=batch_size, shuffle=True, callbacks=[model_checkpoint])
# + id="6Kjgr590MypO"
# Loading the best model
model = tf.keras.models.load_model('custom_model_checkpoint.hdf5')
# + colab={"base_uri": "https://localhost:8080/"} id="B1GsK4GWj2Pq" outputId="cbff3e86-d4e9-4a33-9988-bf64b0e65857"
# Loading the test data
test_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255).flow_from_directory(
directory=testing_dir,
target_size=(256, 256),
classes=['.'],
shuffle=False,
seed=88
)
# + colab={"base_uri": "https://localhost:8080/"} id="-NJUfjJRSmsW" outputId="50ab0b6b-0d53-4205-af99-50d9716cc767"
# Getting test predictions
test_preds = model.predict(test_generator, verbose=1)
test_preds = np.argmax(test_preds, axis=1)
# + id="16sIVsXSStDz"
# Mapping the labels to disease names
inverse_map = {v: k for k, v in train_generator.class_indices.items()}
test_preds = [inverse_map[idx] for idx in test_preds]
# + colab={"base_uri": "https://localhost:8080/"} id="8lLJCaLNStF4" outputId="b089a730-a62c-42b3-b8ef-f3527e0517c0"
# Saving the output
filenames = test_generator.filenames
output = pd.DataFrame({"image_id": filenames, "label": test_preds})
output.image_id = output.image_id.str.replace('./', '')
output.to_csv("submission.csv",index=False)
# + colab={"base_uri": "https://localhost:8080/"} id="xlEr8HgAeQiu" outputId="20d9ca93-a3e1-4369-e584-a0ed411babd6"
# Submission
# !kaggle competitions submit -c paddy-disease-classification -f submission.csv -m "Custom Model with best saved V2 Network, epochs=25"
# + [markdown] id="c6nzJyMpN8NV"
# Accuracy: 0.71
| _notebooks/2022-05-12-Paddy-Doctor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ATSC 500 Assignment IX (structure function)
# +
import numpy as np
from numba import jit
from glob import glob
from matplotlib import pyplot as plt
% matplotlib inline
# -
name = glob('_data/ATSC-500/aircraft.npz')
print(name)
data = np.load(name[0])
wvel = data['wvel']
wvel = wvel - np.mean(wvel)
# $$
# S_p = \frac{1}{N}\sum_{i=0}^{N}\left[f(x + L) - f(x)\right]^p
# $$
#
# where $L$ is a given constant.
@jit(nopython=True)
def strfun(data, steps, p):
"""
Imitated from https://clouds.eos.ubc.ca/~phil/courses/atsc500/html/strfun_fft.html
"""
L = len(data)
L2 = np.int32(L/2.)
X = np.arange(1, L2)
strfun = np.zeros(X.shape)
count = np.zeros(X.shape)
for str_ind, step in enumerate(X[:steps]):
data_inds = np.arange(step, L2)
for ind in data_inds:
strfun[str_ind] += np.abs(data[ind] - data[ind - step])**p
count[str_ind] += 1
strfun[str_ind] = strfun[str_ind]/count[str_ind]
return strfun[:steps]
L = 999
P = np.arange(2, 6)
xi = np.zeros(len(P))*np.nan
strfuns = np.zeros([L, len(P)])*np.nan
scaling = np.zeros([L, len(P)])*np.nan
K34scale = np.zeros([L, len(P)])*np.nan
X = np.arange(1, L+1)
for i, p in enumerate(P):
strfuns[:, i] = strfun(wvel, L, p)
xi[i], _, _, _ = np.linalg.lstsq(np.log(X).reshape(-1, 1), np.log(strfuns[:, i]/strfuns[0, i]).reshape(-1, 1))
scaling[:, i] = strfuns[0, i]*X**(xi[i])
K34scale[:, i] = strfuns[0, i]*X**(p/3)
# +
R1 = plt.cm.RdBu(25)
B1 = plt.cm.RdBu(250)
B2 = plt.cm.RdBu(175)
fig, AX = plt.subplots(1, len(P), figsize=(15.5, 3.75))
AX = AX.flat
strs = ['(a)', '(b)', '(c)', '(d)']
for i, ax in enumerate(AX):
ax.grid(linestyle=':')
ax.spines["top"].set_visible(False)
#ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=14)
#ax.set_ylim([1e-4, 1e2])
[j.set_linewidth(2.5) for j in ax.spines.values()]
ax.tick_params(axis="both", which="both", bottom="off", top="off", \
labelbottom="on", left="off", right="off", labelleft="on")
ax.set_title(strs[i]+' Order-{} str. fun.'.format(i+2), fontsize=14)
ax.loglog(X, strfuns[:, i], '-', color=R1, lw=4, label='Miami data')
ax.loglog(X, scaling[:, i], '--', color=B1, lw=2.5, label='Least square fit')
ax.loglog(X, K34scale[:, i], '--', color=B2, lw=2.5, label="Kolmogorov's scaling law")
ax.text(0, -0.3, "Fited scaling {:.2f}\nKolmogorov's scaling {:.2f}".format(xi[i], (i+2)/3), \
fontsize=14, transform=ax.transAxes)
AX[0].text(0.05, 0.925, 'Unit [m/s]', fontsize=14, transform=AX[0].transAxes)
AX[0].spines["left"].set_visible(True)
AX[0].tick_params(labelleft="on")
LG = AX[0].legend(bbox_to_anchor=(1.05, -0.35), prop={'size':14}); LG.draw_frame(False)
plt.tight_layout()
# -
| ATSC_500/ATSC_500_Assignment_IX_structure_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/nyp-sit/it3103-tutors/blob/main/transformer_overview.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="el1cqlSpTcWe"
# # A Quick Tour of What Transformers can do?
#
# In this exercise, we will use Hugging Face Transformer library to perform some common NLP tasks.
#
# We will use the pipeline() function, which supports several NLP tasks such as classification, summarization, machine translation and so on. For a list of support tasks see the documentation [here](https://huggingface.co/transformers/main_classes/pipelines.html#transformers.pipeline). Pipeline connects a task-specific model with its necessary preprocessing and postprocessing steps, allowing us to directly input any text and get an intelligible answer.
# + [markdown] id="-nWsvPM7TcWg"
# ### Installation of Transformers library
#
# Install the Transformers and Datasets libraries to run this notebook.
# + id="3IfOjzLPTcWh"
# install the extra package "sentencepiece" required for machine translation tasks
# !pip install datasets transformers[sentencepiece]
# + [markdown] id="pnczDwT-b9cp"
# ### Sentiment Analysis
#
# This pipeline uses the default model fine-tuned on Stanford Sentiment Treebank v2 dataset - "distilbert-base-uncased-finetuned-sst-2-english" to classify if a text express positive or negative sentiment.
#
# If you want to use other models available from Hugging Face models library, you can specify it in the parameter `pipeline("sentiment-analysis", model=???)`.
#
# + id="RllwJ1cdTcWi"
from transformers import pipeline
classifier = pipeline("sentiment-analysis")
classifier("I fell asleep during the movie.")
# + [markdown] id="VmNqgbWbelAF"
# ### Zero-shot classification
#
# Zero-shot-classification pipeline allows you to specify which labels to use for the classification, so you don’t have to rely on the labels of the pretrained model. You can classify the text using the labels you specified.
#
# This is especially useful for real-world projects where you have a lot of unlabelled data and do not have the resources to annotate your data. Zero-shot classification allows you to quickly annotate your dataset.
# + id="oWQTitzLTcWj"
from transformers import pipeline
classifier = pipeline("zero-shot-classification")
classifier(
"The CO2 emission has been growing at an alarming rate.",
candidate_labels=["environment", "politics", "business"],
)
# + [markdown] id="-Gk1zo84gCoC"
# ### Text Generation
#
# Now let’s see how to use a pipeline to generate some text. The main idea here is that you provide a prompt and the model will auto-complete it by generating the remaining text. This is similar to the predictive text feature that is found on many phones. Text generation involves randomness, so it’s normal if you don’t get the same results as shown below. The default model used is gpt-2.
#
# You can control how many different sequences are generated with the argument `num_return_sequences` and the total length of the output text with the argument `max_length`.
#
#
# + id="iZylpuzgTcWk"
from transformers import pipeline
generator = pipeline("text-generation")
generator("<NAME> whipped out his wand and", max_length=50, num_return_sequences=2)
# + [markdown] id="KZLHEFBoqwyX"
# Try generating text in another language.
#
# Go to the [Hugging Face Model Hub](https://huggingface.co/models) and click on the corresponding tag on the left to display only the supported models for text generation task. You can then refine your search for a model by clicking on the language tags, and pick a model that will generate text in another language.
# + [markdown] id="Kfx7cu_yoQd4"
# ### Mask filling
#
# The next pipeline you’ll try is fill-mask. The idea of this task is to fill in the blanks in a given text.
#
# The top_k argument controls how many possibilities you want to be displayed. Note that here the model fills in the special <mask> word, which is often referred to as a mask token. Other mask-filling models might have different mask tokens, so it’s always good to verify the proper mask word when exploring other models.
# + id="QH0gRd3vTcWl"
from transformers import pipeline
unmasker = pipeline("fill-mask")
unmasker("The tech giant has been accused of trademark <mask> by other companies.", top_k=2)
# + [markdown] id="_4MrbU1nt_sn"
# ### Named Entity Recognition
#
# Named entity recognition (NER) is a task where the model has to find which parts of the input text correspond to entities such as persons, locations, or organizations. For example, `<NAME>` was the ex-mayor of `New York`. <NAME> will be identified as PER, whereas New York will be identified as LOC.
#
# We pass the option `grouped_entities=True` in the pipeline creation function to tell the pipeline to regroup together the parts of the sentence that correspond to the same entity, e.g. "Michael" and "Bloomberg" are parts that refer to the same person.
# + id="TrEVHBKTTcWm"
from transformers import pipeline
ner = pipeline("ner", grouped_entities=True)
ner("<NAME> previously lived at 38 Oxley Road.")
# + [markdown] id="euYWzKg3wPBg"
# ### Question Answering
#
# The question-answering pipeline answers questions using information from a given context. Note that the answer is extracted from the given context and not generated. The `start` and `end` in the example below tells you the span of the text in the context that provide the answer.
# + id="CSgyTlnRTcWm"
from transformers import pipeline
question_answerer = pipeline("question-answering")
question_answerer(
question="What course I am studying?",
context="I am currently studying part time in NYP, taking a course in Specialist Diploma in Applied AI.",
)
# + [markdown] id="Ym_9-2BhxjIn"
# ### Summarization
#
# Summarization is the task of reducing a text into a shorter text while keeping all (or most) of the important aspects referenced in the text. Like with text generation, you can specify a `max_length` or a `min_length` for the result.
# + id="iuBDVgJ-TcWn"
from transformers import pipeline
summarizer = pipeline("summarization")
summarizer(
"""
America has changed dramatically during recent years. Not only has the number of
graduates in traditional engineering disciplines such as mechanical, civil,
electrical, chemical, and aeronautical engineering declined, but in most of
the premier American universities engineering curricula now concentrate on
and encourage largely the study of engineering science. As a result, there
are declining offerings in engineering subjects dealing with infrastructure,
the environment, and related issues, and greater concentration on high
technology subjects, largely supporting increasingly complex scientific
developments. While the latter is important, it should not be at the expense
of more traditional engineering.
Rapidly developing economies such as China and India, as well as other
industrial countries in Europe and Asia, continue to encourage and advance
the teaching of engineering. Both China and India, respectively, graduate
six and eight times as many traditional engineers as does the United States.
Other industrial countries at minimum maintain their output, while America
suffers an increasingly serious decline in the number of engineering graduates
and a lack of well-educated engineers.
"""
)
# + [markdown] id="JiajlXFFx35s"
# ### Translation
#
# You can choose the model that corresponds to the language pair you want to translate. For example, if you want to translate from fr to en, you need to choose the model that has a naming like "mt-fr-en". Similary, if I want to translate from English to Chinese, then it should be "mt-en-zh".
# + id="5xC6xIXtTcWn"
from transformers import pipeline
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-zh")
translator("US government has been slow in responding to the threat of pandemic.")
| transformer_overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python3
# name: python3
# ---
# # Theano Variables
# Theano Tensor
import theano.tensor as T
import pydot
import graphviz
import matplotlib.pyplot as plt
# We can create a scalar, vector, and matrix as follows:
c = T.scalar('c')
v = T.vector('v')
A = T.matrix('A')
# We also have Tensors, which work with dimensionality 3 and up. This is commonly used when dealing with images that have _not_ been flattened. For instance, if we had a 28x28 image, and we wanted to store the images as squares and we had $N$ images, we would have an $Nx28x28$ (3 dimensional) tensor.
#
# Notice that the variables we have created so far _do not have values_, they are just symbols. This means we can even do algebra on them:
# Dot production
w = A.dot(v)
# How do we actually set values to these variables? This is where _theano functions_ come into play.
import theano
matrix_times_vector = theano.function(inputs=[A,v], outputs=w)
# Now we can import numpy so we can create real arrays and call the function:
import numpy as np
# +
A_val = np.array([[1,2], [3,4]])
v_val = np.array([5,6])
w_val = matrix_times_vector(A_val, v_val)
w_val
# -
# One of the greatest benefits of theano is that it links all of these variables up into a graph. We can use that structure to calculate gradients for you, using the chain rule! In theano, regular variables are _not_ updateable. In order for a variable to be updateable it must be a _shared_ variable.
x = theano.shared(20.0, 'x')
y = theano.shared(20.0)
y
# We can now create a simple cost function that we can solve ourselves, and that we know has a global minimum.
cost = x*x + x + 1
theano.printing.pydotprint(cost, var_with_name_simple=True)
# <img src="https://drive.google.com/uc?id=1g-p9MGa2TLbzVvXofLdY7vIMyZNe46Dm">
# Now we can tell theano how to update $x$ by giving it an update expression:
x_update = x - 0.3*T.grad(cost, x)
# What is nice about theano is that it calculates gradients automatically. The `grad` function takes in two parameters. The first is the function you want to take the gradient of, and the second is the variable you want the gradient with respect to.
#
# We can now create a theano train function. It will be like the previous function we created, except we are going to add a new argument which is updates. The updates argument takes in a list of tuples, and each tuple has two things in it:
# 1. The shared variable to update.
# 2. The update expression to use.
train = theano.function(inputs=[], outputs=cost, updates=[(x, x_update)])
# We have created a function to train, but we haven't actually called it yet. Notice that $x$ is not an input, it is the thing that we update. In later examples the inputs will be the data and labels. So, the inputs param takes in data and labels, and the updates param takes in your model parameters with their updates.
#
# Now we can write a loop to call the training function. Each iteration of the training function is going to calculate the `outputs`, then perform the `updates`. In this case the `updates` is setting `x` to the value resulting in the evaluation of `x_update`. The value returned from the evaluation of `x_update` is the current `x` value minus the learning rate, `0.3`, times the gradient of the `cost` with respect to `x`.
x_val_list = []
for i in range(10):
print('Iteration: ', i)
print('x value before calling train: ', x.get_value())
x_val_list.append(x.get_value().tolist())
cost_val = train()
print('cost: ',cost_val)
print('x value after calling train: ', x.get_value())
print('-----------------')
# We converge very quickly to the expected cost. We can print the optimal value of $x$ using the `get_value` function:
x.get_value()
# +
x_axis = np.arange(-30,30,0.1)
def cost_func(x):
return x*x + x +1
plt.figure(figsize=(8,6))
for x_val in x_val_list:
d_tan_marker, = plt.plot(x_val, cost_func(x_val), 'or')
plt.plot(x_axis, cost_function)
# -
| notebooks/Programming Appendix/Theano Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/pymedphys/pymedphys/blob/master/examples/protyping/tensorflow/001-DeepMind_Images.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={} colab_type="code" id="smtp_dgDUB0x"
# # !pip install pynrrd
# # !git clone https://github.com/deepmind/tcia-ct-scan-dataset.git
# # !curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash
# # !sudo apt install git-lfs
# # !cd tcia-ct-scan-dataset && git lfs pull
# + colab={} colab_type="code" id="CdF_1jruFr8u"
# # !pip install tf-nightly
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="wrodJ26tUe3S" outputId="95ba0706-94fd-497a-c911-cc735c4c8d45"
import os
import pathlib
import functools
import random
import tqdm
import matplotlib.pyplot as plt
import numpy as np
import nrrd
# # %tensorflow_version 2.x
import tensorflow as tf
tf.__version__ # Expect '2.2.0-dev20200311'
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="pWpz0FLSImkl" outputId="138f0b5f-4289-497d-cb1c-35d9152b5557"
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
# + colab={"base_uri": "https://localhost:8080/", "height": 595} colab_type="code" id="kLmqnl_pIq5V" outputId="4c40ead3-094e-4bbe-e34e-c75e6f5587b1"
strategy = tf.distribute.experimental.TPUStrategy(resolver)
# + colab={} colab_type="code" id="2hXVAX7GY8_8"
data_root = pathlib.Path('tcia-ct-scan-dataset/nrrds')
test_data_roots = list(data_root.glob('test/*/*'))
validation_data_roots = list(data_root.glob('validation/*/*'))
# + colab={} colab_type="code" id="rS1odfgTZi9t"
# all_ct_files = list(data_root.glob('*/*/*/CT_IMAGE.nrrd'))
# all_ct_shapes = []
# for ct_file in tqdm.tqdm(all_ct_files):
# data, header = nrrd.read(ct_file)
# all_ct_shapes.append(np.shape(data))
# all_ct_shapes
# + colab={} colab_type="code" id="7GYTX2EFaIBt"
GRID_SIZE = 512
Z_CONTEXT_DISTANCE = 3
BATCH_SIZE = 8
TENSOR_TYPE = tf.bfloat16
# + colab={} colab_type="code" id="<KEY>"
# @functools.lru_cache() # This uses too much RAM
def load_data(path):
return nrrd.read(path)
# + colab={} colab_type="code" id="ZkHgtz8ycHuw"
def get_random_dataset(paths, structure_name, z_context_distance, tensor_type):
while True:
data_path = random.choice(paths)
structure_data, structure_header = load_data(data_path.joinpath(f'segmentations/{structure_name}.nrrd'))
slices = np.shape(structure_data)[-1]
slice_choice = random.randint(z_context_distance, slices - 1 - z_context_distance)
to_contain_structure = bool(random.randint(0, 1))
no_structure_found = np.all(structure_data[:, :, slice_choice] == 0)
if to_contain_structure != no_structure_found:
break
ct_data, ct_header = load_data(data_path.joinpath('CT_IMAGE.nrrd'))
slice_to_use = slice(
slice_choice - z_context_distance,
slice_choice + z_context_distance + 1
)
ct_slices = ct_data[:, :, slice_to_use]
structure_slice = structure_data[:, :, slice_choice]
model_input = tf.convert_to_tensor(ct_slices[:,:,:,None], dtype=tensor_type)
model_output = tf.convert_to_tensor(structure_slice[:,:,None,None], dtype=tensor_type)
return model_input, model_output
# get_random_dataset(test_data_roots, 'Spinal-Cord', Z_CONTEXT_DISTANCE, TENSOR_TYPE)
# + colab={} colab_type="code" id="_mZcKQX1g-2i"
def create_dataset_pipeline(paths, batch_size, grid_size, structure_name, z_context_distance, tensor_type):
def dataset_generetor():
yield get_random_dataset(paths, structure_name, z_context_distance, tensor_type)
num_input_slices = z_context_distance * 2 + 1
dataset = tf.data.Dataset.from_generator(
dataset_generetor,
(tensor_type, tensor_type),
(
tf.TensorShape([grid_size, grid_size, num_input_slices, 1]),
tf.TensorShape([grid_size, grid_size, 1, 1])
)
)
dataset = dataset.repeat().batch(batch_size)
return dataset
create_dataset = functools.partial(
create_dataset_pipeline,
batch_size=BATCH_SIZE,
grid_size=GRID_SIZE,
structure_name='Spinal-Cord',
z_context_distance=Z_CONTEXT_DISTANCE,
tensor_type=TENSOR_TYPE
)
test_dataset = create_dataset(test_data_roots)
validation_dataset = create_dataset(validation_data_roots)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="JnD2LWrNiKWZ" outputId="95adf1c6-06db-405e-b3e8-c674b9ec577a"
for model_input, model_output in test_dataset.take(1):
print(f"{model_input.shape()}, {model_output.shape()}")
# + colab={} colab_type="code" id="e3Hrrfzuimwi"
tf.keras.backend.clear_session()
initializer = tf.random_normal_initializer(0., 0.02)
def down_block(x, depth, m, n, channels, pool):
convolution_sequence = tf.keras.Sequential(name=f'down-convolution-d{depth}')
convolution_sequence.add(
tf.keras.layers.ReLU()
)
for _ in range(m):
convolution_sequence.add(
tf.keras.layers.Conv3D(
channels, (3, 3, 1), strides=1, padding='same',
kernel_initializer=initializer, use_bias=False)
)
for i in range(n):
convolution_sequence.add(
tf.keras.layers.Conv3D(
channels, (3, 3, 1), strides=1, padding='same',
kernel_initializer=initializer, use_bias=False)
)
convolution_sequence.add(
tf.keras.layers.Conv3D(
channels, (1, 1, 3), strides=1, padding='valid',
kernel_initializer=initializer, use_bias=False)
)
if i != n - 1:
convolution_sequence.add(
tf.keras.layers.ReLU()
)
short_circuit_sequence = tf.keras.Sequential(name=f'down-short-circuit-d{depth}')
short_circuit_sequence.add(tf.keras.layers.Cropping3D((0,0,n)))
short_circuit_sequence.add(
tf.keras.layers.Conv3D(
channels, (1, 1, 1), strides=1, padding='same',
kernel_initializer=tf.ones_initializer(),
use_bias=False)
)
x = tf.keras.layers.Add()(
[convolution_sequence(x), short_circuit_sequence(x)]
)
unet_short_circuit = x
if pool != 0:
x = tf.keras.layers.AveragePooling3D((pool, pool, 1), strides=None, padding='valid')(x)
return x, unet_short_circuit
def fully_connected_block(x, input_size, internal_channels, output_channels):
x = tf.keras.layers.Conv3D(
internal_channels,
(input_size, input_size, 1),
strides=1,
padding='valid',
kernel_initializer=initializer,
use_bias=True
)(x)
repeats = 2
for _ in range(repeats):
short_circuit = x
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Dense(internal_channels)(x)
x = tf.keras.layers.Add()([x, short_circuit])
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.Dense(input_size * input_size * output_channels)(x)
x = tf.keras.layers.Reshape((input_size, input_size, 1, output_channels))(x)
return x
def up_block(x, unet_short_circuit, depth, cropping, m, channels, up_scale):
unet_short_circuit = tf.keras.layers.Cropping3D((0,0,cropping))(
unet_short_circuit
)
if up_scale != 0:
x = tf.keras.layers.UpSampling3D(size=(up_scale, up_scale, 1))(x)
x = tf.keras.layers.Concatenate(axis=-2)([x, unet_short_circuit])
convolution_sequence = tf.keras.Sequential(name=f'up-convolution-d{depth}')
convolution_sequence.add(
tf.keras.layers.ReLU()
)
for _ in range(m):
convolution_sequence.add(
tf.keras.layers.Conv3D(
channels, (3, 3, 1), strides=1, padding='same',
kernel_initializer=initializer, use_bias=False)
)
convolution_sequence.add(
tf.keras.layers.ReLU()
)
internal_short_circuit = tf.keras.Sequential(name=f'up-short-circuit-d{depth}')
internal_short_circuit.add(
tf.keras.layers.Conv3D(
channels, (1, 1, 1), strides=1, padding='same',
kernel_initializer=tf.ones_initializer(),
use_bias=False)
)
x = tf.keras.layers.Add()(
[convolution_sequence(x), internal_short_circuit(x)]
)
return x
def Model(grid_size=GRID_SIZE, z_context_distance=Z_CONTEXT_DISTANCE, batch_size=BATCH_SIZE):
down_block_params = [ # Start at 512, 3
(0, (3, 0, 32, 2)), # 256, 3
(1, (3, 1, 32, 2)), # 128, 2
(2, (3, 1, 64, 4)), # 32, 1
(3, (3, 1, 64, 4)), # 8, 1
(4, (3, 0, 128, 0)), # 8, 0
]
fully_connected_params = (8, 512, 128)
up_block_params = [
(4, (0, 4, 64, 0)),
(3, (0, 4, 64, 4)),
(2, (1, 4, 32, 4)),
(1, (2, 4, 32, 2)),
(0, (3, 4, 32, 2)),
]
inputs = tf.keras.layers.Input(
shape=[grid_size, grid_size, z_context_distance * 2 + 1, 1],
batch_size=batch_size
)
x = inputs
unet_short_circuits = []
for depth, down_block_param in down_block_params:
m, n, channels, pool = down_block_param
x, unet_short_circuit = down_block(x, depth, m, n, channels, pool)
unet_short_circuits.append(unet_short_circuit)
input_size, internal_channels, output_channels = fully_connected_params
x = fully_connected_block(x, input_size, internal_channels, output_channels)
unet_short_circuits = reversed(unet_short_circuits)
for unet_shot_circuit, (depth, up_block_param) in zip(unet_short_circuits, up_block_params):
cropping, m, channels, up_scale = up_block_param
x = up_block(x, unet_shot_circuit, depth, cropping, m, channels, up_scale)
x = tf.keras.layers.Conv3D(
1, (1, 1, 6), strides=1, padding='valid',
kernel_initializer=tf.ones_initializer(),
use_bias=False
)(x)
x = tf.keras.activations.sigmoid(x)
return tf.keras.Model(inputs=inputs, outputs=x)
with strategy.scope():
model = Model()
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
# + colab={} colab_type="code" id="1YoYCkZBAjPy"
tf.keras.utils.plot_model(model, show_shapes=True, dpi=64)
# + colab={} colab_type="code" id="QAMLAv2a_Iic"
# + colab={} colab_type="code" id="JPBauA9f_Jao"
# + colab={} colab_type="code" id="zRDfvvFE_NKP"
# + colab={} colab_type="code" id="hncaH1s9_NMy"
model.fit(test_dataset, epochs=5, validation_data=validation_dataset)
# + colab={} colab_type="code" id="r7SBTgZJ_NPR"
# + colab={} colab_type="code" id="b44pxFbS_NRK"
# + colab={} colab_type="code" id="PUef6h2YY1t8"
| examples/protyping/tensorflow/001-DeepMind_Images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from scipy import optimize
import copy
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# +
aa_string = "ACDEFGHIKLMNPQRSTVWY"
aa_dict = dict([(a,0) for a in aa_string])
all_aa = []
for i in range(12):
all_aa.append(copy.deepcopy(aa_dict))
# Read the untreated library counts for each amino acid
count_list = []
with open("../fig_s2/untreated-library.counts") as f:
for line in f:
if line.startswith("#") or line.strip() == "":
continue
seq = line.split()[0]
count = int(line.split()[1])
for i, aa in enumerate(seq):
all_aa[i][aa] += count
# Record counts as array
out_seqs = []
data = np.zeros((20,12),dtype=np.float)
for i in range(12):
for j in range(20):
data[j,i] = all_aa[i][aa_string[j]]
# Normalize so each column sums to 1.0
data = data/np.sum(data,axis=0)
aa_to_grab = np.array(list(aa_string))
# Generate random sequences sampling from this library
gen_seq = []
for i in range(12):
gen_seq.append(np.random.choice(aa_to_grab,size=10000,p=data[:,i]))
f = open("seq-for-logo.fasta","w")
gen_seq = np.array(gen_seq)
for i in range((gen_seq.shape[1])):
seq_as_string = "".join(gen_seq[:,i])
f.write(f">{seq_as_string}\n{seq_as_string}\n")
f.close()
# -
| fig_2a/fig_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating a Molecular Function Reference for OmicsIntegrator2
# +
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
# %matplotlib inline
from collections import defaultdict
def flatten(list_of_lists): return [item for sublist in list_of_lists for item in sublist]
import mygene
# -
genes = pd.read_csv('../../GONN/GO/molecular_function.csv')
genes.head()
# ## I. Evidence Codes
#
# Copied from http://geneontology.org/page/guide-go-evidence-codes
#
# ### Experimental Evidence codes
# Use of an experimental evidence code in a GO annotation indicates that the cited paper displayed results from a physical characterization of a gene or gene product that has supported the association of a GO term. The Experimental Evidence codes are:
#
# - Inferred from Experiment (EXP)
# - Inferred from Direct Assay (IDA)
# - Inferred from Physical Interaction (IPI)
# - Inferred from Mutant Phenotype (IMP)
# - Inferred from Genetic Interaction (IGI)
# - Inferred from Expression Pattern (IEP)
#
# ### High Throughput (HTP) evidence codes
# High throughput (HTP) evidence codes may be used to make annotations based upon high throughput methodologies. Use of HTP evidence codes should be carefully considered and follow the GOC's guidelines for their use. The High Throughput Evidence Codes are:
#
# - Inferred from High Throughput Experiment (HTP)
# - Inferred from High Throughput Direct Assay (HDA)
# - Inferred from Hight Throughput Mutant Phenotype (HMP)
# - Inferred from High Throughput Genetic Interaction (HGI)
# - Inferred from High Throughput Expression Pattern (HEP)
#
# ### Computational Analysis evidence codes
# Use of the computational analysis evidence codes indicates that the annotation is based on an in silico analysis of the gene sequence and/or other data as described in the cited reference. The evidence codes in this category also indicate a varying degree of curatorial input. The Computational Analysis evidence codes are:
#
# - Inferred from Sequence or structural Similarity (ISS)
# - Inferred from Sequence Orthology (ISO)
# - Inferred from Sequence Alignment (ISA)
# - Inferred from Sequence Model (ISM)
# - Inferred from Genomic Context (IGC)
# - Inferred from Biological aspect of Ancestor (IBA)
# - Inferred from Biological aspect of Descendant (IBD)
# - Inferred from Key Residues (IKR)
# - Inferred from Rapid Divergence (IRD)
# - Inferred from Reviewed Computational Analysis (RCA)
#
# ### Author statement evidence codes
# Author statement codes indicate that the annotation was made on the basis of a statement made by the author(s) in the reference cited. The Author Statement evidence codes are:
#
# - Traceable Author Statement (TAS)
# - Non-traceable Author Statement (NAS)
#
# ### Curator statement evidence codes
# Use of the curatorial statement evidence codes indicates an annotation made on the basis of a curatorial judgement that does not fit into one of the other evidence code classifications. The Curatorial Statement codes:
#
# - Inferred by Curator (IC)
# - No biological Data available (ND)
#
# ### Electronic Annotation evidence code
# All of the above evidence codes are assigned by curators. However, GO also uses one evidence code that is assigned by automated methods, without curatorial judgement. The Automatically-Assigned evidence code is
#
# - Inferred from Electronic Annotation (IEA)
# +
solid_codes = ['EXP','IDA','IPI','IMP','IGI','IEP','TAS','NAS']
sketchy_codes = ['HTP','HDA','HMP','HGI','HEP','IC']
bad_codes = ['ISS','ISO','ISA','ISM','IGC','IBA','IBD','IKR','IRD','RCA','IEA','ND']
{'solid': len(genes[genes['Evidence'].isin(solid_codes)]), 'sketchy': len(genes[genes['Evidence'].isin(sketchy_codes)]), 'bad': len(genes[genes['Evidence'].isin(bad_codes)])}
# -
# #### We'll try with all the codes first, and then try with just the solid codes if we're dissatisfied.
# ## II. Find good ontology depth
g = nx.read_gpickle('../../GONN/GO/GO_molecular_function.pickle')
g
df = pd.DataFrame.from_dict(dict(g.nodes(data=True))).transpose()
df.head()
df[df.depth == 0]
df[df.depth == 1]
df[df.depth == 2]
# #### Depth 1 seems good.
# ## III. Build a mapping from genes to terms via subterms
# #### We need to find a list of terms, and for each term, all subterms. Then we can map all genes to the list of terms
level1_terms = df[df.depth == 1].index.tolist()
terms_and_subterms = {term: np.unique(flatten(list(nx.dfs_successors(g, term).values()))).tolist() for term in level1_terms}
terms = [item for l in [subterms+[term] for term, subterms in list(terms_and_subterms.items())] for item in l]
len(terms), len(np.unique(terms)), len(df)
terms_and_genes = {term: genes[genes.GO_ID.isin(subterms+[term])][['GeneSymbol', 'Evidence']].values.tolist() for term, subterms in terms_and_subterms.items()}
genes_and_terms = flatten([[(gene, term, evidence) for [gene, evidence] in genes] for term, genes in terms_and_genes.items()])
evidence = pd.DataFrame(genes_and_terms, columns=['gene','GO_ID','Evidence']).groupby(['gene', 'GO_ID'])['Evidence'].apply(list).to_frame()
evidence.head()
# #### We need to score the evidence for each term for each gene, in cases when a gene maps to two terms
score = {**{type: 3 for type in solid_codes}, **{type: 2 for type in sketchy_codes}, **{type: 1 for type in bad_codes}}
def evidence_list_to_score_list(evidence_list): return [[score[evidence] for evidence in evidence_list]]
evidence_scores = evidence.apply(lambda row: evidence_list_to_score_list(row['Evidence']), axis=1)
evidence_scores.head()
evidence_scores = evidence_scores.apply(lambda row: sum(row['Evidence']), axis=1).to_frame().rename(columns={0:'Evidence'})
evidence_scores.head()
best_evidence = evidence_scores[evidence_scores['Evidence'] == evidence_scores.groupby(['gene'])['Evidence'].transform(max)]
best_evidence.head()
# #### Although we don't see them here, we need to deal with ties
len(best_evidence), len(best_evidence.reset_index().drop_duplicates('gene'))
best_evidence = best_evidence.reset_index().drop_duplicates('gene').set_index(['gene', 'GO_ID'])
best_evidence.head()
gene_to_function_term = best_evidence.reset_index()[['gene', 'GO_ID']].set_index('gene')
gene_to_function_term.head()
functions = df[df.depth == 1]['name'].to_frame()
functions
functions['name'] = functions['name'].str.replace(' activity','')
molecular_functions = gene_to_function_term.merge(functions, how='left', left_on='GO_ID', right_index=True)[['GO_ID','name']]
molecular_functions.head()
# ## IV. Add "Specific Function" information to each gene
level2_terms = df[df.depth == 2].index.tolist()
terms_and_subterms = {term: np.unique(flatten(list(nx.dfs_successors(g, term).values()))).tolist() for term in level2_terms}
terms = [item for l in [subterms+[term] for term, subterms in list(terms_and_subterms.items())] for item in l]
len(terms), len(np.unique(terms)), len(df)
terms_and_genes = {term: genes[genes.GO_ID.isin(subterms+[term])][['GeneSymbol', 'Evidence']].values.tolist() for term, subterms in terms_and_subterms.items()}
genes_and_terms = flatten([[(gene, term, evidence) for [gene, evidence] in genes] for term, genes in terms_and_genes.items()])
evidence = pd.DataFrame(genes_and_terms, columns=['gene','GO_ID','Evidence']).groupby(['gene', 'GO_ID'])['Evidence'].apply(list).to_frame()
evidence.head()
# #### We've made a committment for each gene to belong to a single level1 term, so let's remove all the level2 terms which aren't subterms of the previously selected level1 term for each gene
predecessors = {term: list(g.predecessors(term)) for term in level2_terms}
predecessors = {term: [parent for parent in parents if parent in level1_terms] for term, parents in predecessors.items()}
predecessors = {term: parents[0] for term, parents in predecessors.items()}
predecessors = pd.Series(predecessors).rename_axis('level2_term').rename('level1_term').to_frame()
predecessors.head()
evidence = evidence.reset_index().merge(predecessors, how='left', left_on='GO_ID', right_index=True).set_index(['gene', 'level1_term', 'GO_ID'])
evidence.head()
evidence = evidence.reset_index().merge(molecular_functions['GO_ID'].rename('chosen_level1').to_frame(), how='left', left_on='gene', right_index=True)
evidence = evidence[evidence.level1_term == evidence.chosen_level1]
evidence = evidence.set_index(['gene','GO_ID'])['Evidence'].to_frame()
evidence.head()
evidence_scores = evidence.apply(lambda row: evidence_list_to_score_list(row['Evidence']), axis=1)
evidence_scores.head()
evidence_scores = evidence_scores.apply(lambda row: sum(row['Evidence']), axis=1).to_frame().rename(columns={0:'Evidence'})
evidence_scores.head()
best_evidence = evidence_scores[evidence_scores['Evidence'] == evidence_scores.groupby(['gene'])['Evidence'].transform(max)]
best_evidence.head()
len(best_evidence), len(best_evidence.reset_index().drop_duplicates('gene'))
best_evidence = best_evidence.reset_index().drop_duplicates('gene').set_index(['gene', 'GO_ID'])
best_evidence.head()
gene_to_function_term = best_evidence.reset_index()[['gene', 'GO_ID']].set_index('gene')
gene_to_function_term.head()
functions = df[df.depth == 2]['name'].to_frame()
functions
functions['name'] = functions['name'].str.replace(' activity','')
specific_molecular_functions = gene_to_function_term.merge(functions, how='left', left_on='GO_ID', right_index=True)[['GO_ID','name']]
specific_molecular_functions.head()
molecular_functions = molecular_functions.rename(columns={'GO_ID':'general_function_GO_ID', 'name':'general_function'})
molecular_functions.head()
molecular_functions = molecular_functions.merge(specific_molecular_functions, how='left', left_index=True, right_index=True).rename(columns={'GO_ID':'specific_function_GO_ID', 'name':'specific_function'})
molecular_functions.head()
molecular_functions.to_pickle('molecular_function_gene_annotation.pickle')
| src/annotation/molecular_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # String
"Hello World"
'Hello World'
'123'
123
type('Hello')
type('123')
type(123)
var = 'Hello'
print (var)
string1 = 'Hello'
string2 = 'World'
string1
string2
string1 + ' ' + string2
type(' ')
first_name = 'Ashish'
last_name = "Jangra"
first_name
last_name
full_name = first_name + ' ' + last_name
first_name + ' ' + last_name
full_name
full_name[:]
full_name[-13]
full_name[6:]
len(string2)
string2
string = "Hello World. How are you. I'm doing great."
string.split('.')
# # Integers
type(123)
type(-123)
type(0)
type("123")
12 + 13
'12' + '13'
12-13
31-12
12/4
12%5
12*5
2*2*2*2*2
2**5
3**5
num1 = 12
num2 = 13
num3 = num1 + 1
num1
num1 = num1 + 1
num1 *= 2
num1
# # Floats
type(12)
type(12.0)
-22.0/7
22%7.0
12.4 + 12.5
12.4 - 12.5
12.4 * 12.5
2.1**4
# # Boolean
type(True)
type(False)
True == 1
False == 0
1 == 1.0000001
var = False
type(var)
| Module 1/Data Types.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 1. Data processing and exploratory analysis
# ### Download and Load ready-to-use data
import pandas as pd
import scTenifold as st
from scTenifold.data import list_data, fetch_data
# ### Get datasets from another repo
# Download and load the specified dataset if it is not in local directory, otherwise just load the dataset
list_data()
# get dict of data
AD_datasets = fetch_data("AD")
# + jupyter={"outputs_hidden": true} tags=[]
AD_datasets["AD"]
# -
# ### Use simulated data
from scTenifold.data import get_test_df
sim_data = get_test_df(100, 100)
# + jupyter={"outputs_hidden": true} tags=[]
sim_data.head(10)
# -
# ### Data visualization
from scTenifold.plotting import plot_embedding
plot_embedding(sim_data, groups={"G1": sim_data.columns.to_list()[:33],
"G2": sim_data.columns.to_list()[33:66],
"G3": sim_data.columns.to_list()[66:]},
method='PCA',
title="PCA result", size=15, plot_2D=False)
plot_embedding(sim_data, groups={"G1": sim_data.columns.to_list()[:33],
"G2": sim_data.columns.to_list()[33:66],
"G3": sim_data.columns.to_list()[66:]},
method='TSNE',
title="TSNE result", size=15)
plot_embedding(sim_data, groups={"G1": sim_data.columns.to_list()[:33],
"G2": sim_data.columns.to_list()[33:66],
"G3": sim_data.columns.to_list()[66:]},
method='UMAP',
title="UMAP result", size=15)
| docs/source/1_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.4 64-bit (''python39-2vM0bQN-'': pipenv)'
# name: python3
# ---
# ## Party 2 - Facebook's team in Menlo Park
import syft as sy
import numpy as np
sy.logger.remove()
# ## Dataset Creation
# +
training_data_fb = np.array([
[1,0,1],
[1,1,1]]
)
training_targets_fb = np.array([[1,1]]).T
# -
# ## Logging into the domain
# Let's login into the domain
fb = sy.login(email="<EMAIL>", password="<PASSWORD>", port=8082)
# ## Upload the dataset to Domain node
#DP metadata Addition
training_data_fb = sy.Tensor(training_data_fb).private(
min_val=0,
max_val=1,
entities=["Rasswanth"] * training_data_fb.shape[0],
ndept=True
)
training_targets_fb = sy.Tensor(training_targets_fb).private(
min_val=0,
max_val=1,
entities=["Rasswanth"] * training_targets_fb.shape[0],
ndept=True
)
# +
# Upload a private dataset to the Domain object, as the root owner
fb.load_dataset(
assets={
"training_data":training_data_fb,
"training_targets":training_targets_fb
},
name="Our training data for XOR networks!",
description="Collected on Jan 27 2022"
)
# -
fb.datasets
# ### Create a Data Scientist User
#change budget before demo
fb.users.create(
**{
"name": "<NAME>",
"email": "<EMAIL>",
"password": "<PASSWORD>",
"budget":9_999_999
}
)
fb.users
# ### Accept/Deny Requests to the Domain
fb.requests.pandas
# +
# fb.requests[-1].accept()
# -
| notebooks/smpc/Training Demo/Data Owner - Facebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="8AXXi8pFSuyQ" colab_type="text"
#
# # **Assignment - 3: Improving Graphs**
#
# ---
# Course website: [SHALA-2020](https://shala2020.github.io/)
#
# This assignment will get you familiarized with improving plots/graphs in Python.
# + [markdown] id="zfzGTdwUS-eP" colab_type="text"
# ---
# ## Part 1 - Importing packages and loading data
# ---
# + [markdown] id="YQIKucddS_nA" colab_type="text"
# ###Import the following libraries:
#
# * ```numpy``` with an alias name ```np```,
# * ```pandas``` with an alias name ```pd```,
# * ```matplotlib.pyplot``` with an alias name ```plt```, and
# * ```seaborn``` with an alias name ```sns```.
#
# *You are free to use any other popular libraries*.
# + id="0BJxaUABR-4q" colab_type="code" outputId="ee400b7f-af4e-44b2-c2cf-450f6fba38d6" colab={"base_uri": "https://localhost:8080/", "height": 71}
# Load the libraries with their aliases
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] id="DCfU-kQrTa4h" colab_type="text"
# ### Using the files ```train.csv``` and ```moviesData.csv```, peform the following:
#
# * Load these file as ```pandas``` dataframes and store it in variables named ```train``` and ```movies``` respectively.
# * Print the first ten rows of ```train``` and last ten rows of ```movies```.
# + id="xIaAWjd_Tl_s" colab_type="code" outputId="61b34950-ad60-4939-91f8-f851d5f00b3a" colab={"base_uri": "https://localhost:8080/", "height": 549}
# Load train.csv and movies.csv
url_train = "https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Assignments/DataScience/L2/train.csv"
train = pd.read_csv(url_train)
# Print the first ten rows of train
train.head(10)
# + id="0o_LlJ04fzgK" colab_type="code" outputId="3c6418a0-469c-4cc1-a697-e58ad33ffc66" colab={"base_uri": "https://localhost:8080/", "height": 634}
# Load movies.csv
url_movies = "https://raw.githubusercontent.com/shala2020/shala2020.github.io/master/Lecture_Materials/Google_Colab_Notebooks/DataScience/L2/moviesData.csv"
movies = pd.read_csv(url_movies)
# Print the last ten rows of movies
movies.head(10)
# + [markdown] id="6fyLpq2LVuDl" colab_type="text"
# ---
# ## Part 2 - Drawing correlation and heatmap
# ---
# + [markdown] id="yFkmY_N9ThPT" colab_type="text"
# ### Using the correlation matrix, peform the following:
#
# * Find the correlation matrix for the ```movies```.
# * Draw a **heatmap** with the correlation matrix and display the values of correlation coefficients for each pair of features. Make sure the values are properly visible. You might want to adjust the figure size for this.
# * Create a ```mask``` to remove the duplicate values from the **heatmap**.
# + id="3JhqkVZ7VNej" colab_type="code" outputId="26f96367-e10b-4c3a-e24e-a9cb56c0f34e" colab={"base_uri": "https://localhost:8080/", "height": 272}
# Find out the correlation matrix
correlation_matrix = movies.corr()
print(correlation_matrix)
# + id="-MLMNlVQ4Qhm" colab_type="code" outputId="ad12c93a-4d0c-42aa-be20-ee381149e4c1" colab={"base_uri": "https://localhost:8080/", "height": 868}
# Draw the heatmap
plt.figure(figsize=(20,15))
sns.heatmap(correlation_matrix, annot=True)
plt.show()
# + id="m-U-oootigJQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f3f6367a-872b-43db-e6eb-dcd6da7e4199"
# Creating a mask for removing duplicates
# https://gist.github.com/Swarchal/e29a3a1113403710b6850590641f046c
# correlation_matrix.loc[:, :] = np.tril(correlation_matrix, k=-1)
# correlation_matrix
correlation_matrix = correlation_matrix[abs(correlation_matrix) >= 0.01].stack().reset_index()
correlation_matrix = correlation_matrix[correlation_matrix['level_0'].astype(str)!=correlation_matrix['level_1'].astype(str)]
# filtering out lower/upper triangular duplicates
correlation_matrix['ordered-cols'] = correlation_matrix.apply(lambda x: '-'.join(sorted([x['level_0'],x['level_1']])),axis=1)
correlation_matrix = correlation_matrix.drop_duplicates(['ordered-cols'])
correlation_matrix.drop(['ordered-cols'], axis=1, inplace=True)
correlation_matrix.sort_values(by=[0], ascending=False).head(10)
correlation_matrix
# + [markdown] id="I-VzLRngWuoq" colab_type="text"
# ---
# ## Part 3 - Drawing plots and customizing these
# ---
# + [markdown] id="syeshF3sW180" colab_type="text"
# ### Using a histogram, peform the following:
#
# * Draw a histogram of all the numeric features in ```train```. Please note that this can be done in a single command.
#
# * Find out whether there is any redundant features in ```train```, which can be dropped while building a model.
# + id="owsgy5JLZIIH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 298} outputId="81348d82-76eb-4f2e-f225-edd0fb136585"
# Draw a histogram
plt.figure(figsize=(15,10))
train._get_numeric_data().hist()
plt.show()
# + id="barZsZAj5S17" colab_type="code" colab={}
# Find out the redundant features
# Answer : DistanceFromHome,Education,EducationNumber
# + [markdown] id="g2hQHS-rZdbE" colab_type="text"
# ### Using a scatter plot, perform the following:
#
# * Draw a scatter plot between between ```imdb_rating``` and ```audience_score```, and also fit a regression line.
# * Change the color of marker to red, size to 200, transparency to 0.3.
#
#
# + id="H-C3T6PHb5qT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="84c0a812-9759-457d-a430-7fa8fdc512db"
# Scatter plot to fit the regression line
sns.regplot(x = movies["imdb_rating"],
y = movies["audience_score"], ci = 95, color=(1,0,0,0.3))
plt.show()
# + id="UY_R6uF3kInV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 281} outputId="960a49c9-3b09-43b5-9cd3-852ae70a6dbc"
# Change marker of the scatter plot
sns.scatterplot(x = movies["imdb_rating"],
y = movies["audience_score"], ci = 95, color=(1,0,0,0.3),marker='+')
plt.show()
# + [markdown] id="4Qg3SvfpmNFj" colab_type="text"
# ### Using a scatter plot, perform the following:
#
# * Load the dataset ```iris```. This has been done for you.
# * Draw a scatter plot between between ```petal_length``` and ```petal_width``` and color it according to the ```species```.
# * Change the size of marker to 100 and place the legend in ```lower right```.
# + id="7D_r9PIBlOe8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 280} outputId="187d0af1-3efd-468b-e228-6d5a608389a1"
# library & dataset
import seaborn as sns
df = sns.load_dataset('iris')
# Use the 'hue' argument to provide a factor variable
# Move the legend to an empty part of the plot
sns.scatterplot(df.petal_length,df.petal_width,hue=df.species)
plt.legend(loc='lower right')
plt.show()
# + [markdown] id="20Tunkqin5WZ" colab_type="text"
# ---
# ## Part 4 - Density Plots
# ---
# + [markdown] id="2IdfsDTgoX-O" colab_type="text"
# ### Using density plots, perform the following:
#
# * Load the dataset ```iris```. This has been done for you.
# * Draw the density plots for all the features ```sepal_width```, ```sepal_length```, ```petal_width```, and ```petal_length```. You can required to use subplots for plotting all the density plots.
# * Try drawing a [CDF](https://en.wikipedia.org/wiki/Cumulative_distribution_function) of these features. There is a [blog on medium](https://medium.com/@rishav.jnit/exploratory-data-analysis-eda-on-iris-dataset-using-python-cadd850c1fc6) on how to plot CDF. It will be covered in tutorial.
# + id="hZEjXe29n4r4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="9913f775-173c-4f01-aee9-d5f23d1a1e72"
# library and dataset
df = sns.load_dataset('iris')
p1=sns.kdeplot(df['sepal_width'],color='r',shade=True)
p2=sns.kdeplot(df['sepal_length'],color='g',shade=True)
p3=sns.kdeplot(df['petal_width'],color='b',shade=True)
p4=sns.kdeplot(df['petal_length'],color='y',shade=True)
# + [markdown] id="nj5n8rdKKgKe" colab_type="text"
# ### Using violin plot, perform the following:
#
# * Load the dataset ```iris```. This has been done for you.
# * Draw the density plots of ```species``` versus ```sepal_length```.
# * Assign each group a color like
#
# * versicolor - green,
# * setosa - blue,
# * virginica - m
# + id="BRIpHxsgKgnG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 369} outputId="817e1ae5-fd95-41f3-9155-4de29eea5e7d"
# Draw the Scatter plots
sns.lmplot( x="sepal_length", y="sepal_width", data=df, fit_reg=False, hue='species', legend=False, palette=dict(setosa="blue", virginica="magenta", versicolor="green"))
plt.legend(loc='lower right')
plt.show()
# + id="zDRZgdJ8MMbs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="de318d73-86fe-4fa1-b901-47c6d15fe012"
# Draw a violin plot
# Change the color of each group
sns.violinplot(x=df["species"], y=df["sepal_length"],palette=dict(setosa="blue", virginica="magenta", versicolor="green"))
plt.show()
| 03-Assignment/DS_L3_20_April_Assignment_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.environ['CUDA_VISIBLE_DEVICES'] = ""
# +
import geomloss
import matplotlib.pyplot as plt
import numpy as np
import ot
import pandas as pd
import scipy.stats as st
import seaborn
import torch
import tqdm
from utils.fix_for_geomloss import sinkhorn_loop
geomloss.sinkhorn_samples.sinkhorn_loop = sinkhorn_loop
# This is to fix the missing gradient for weights
# -
# $X \sim \sum_i w_i, \delta_{X_i}$ where the $X_i$'s are i.i.d. $U([-2, 2])$ and $w_i \propto f(X_i)$ where $f$ is the pdf of a normal distribution $ \mathcal{N}(0, \theta ^ 2)$ where $\theta \in \mathbb{R}^+$
_ = torch.random.manual_seed(31415926)
def transport_from_potentials(x, f, g, eps, w, N):
C = (x.T - x) ** 2 / 2.
FG = f.T + g
T = torch.exp((FG - C)/eps**2) * w.unsqueeze(1)
return T.T @ x, torch.full_like(f, 1/N).squeeze()
# +
# def get_mean_and_grad(N, loc_val, times=10, seed = 27182):
# _ = torch.random.manual_seed(seed)
# res = []
# loc = torch.tensor(loc_val, requires_grad=True)
# scale = 1.
# norm_dist = torch.distributions.Normal(loc, scale)
# for _ in range(times):
# X = torch.rand(N, requires_grad=True) * 4 - 2.
# weights = norm_dist.log_prob(X)
# max_weight = weights.max()
# stable_weights = (weights - max_weight).exp()
# scaled_weights = stable_weights / stable_weights.sum()
# uniform_weights = torch.full_like(scaled_weights, 1/N, requires_grad=True)
# epsilon = 0.01
# biasedSampleLoss = geomloss.SamplesLoss(reach=None, potentials=True, debias=False, scaling=0.9, blur=epsilon)
# alpha, beta = biasedSampleLoss(uniform_weights, X.unsqueeze(1), scaled_weights, X.unsqueeze(1))
# X_tilde, w_tilde = transport_from_potentials(X.unsqueeze(1), alpha, beta, epsilon, scaled_weights, N)
# res.append((X_tilde.mean(), torch.autograd.grad(X_tilde.mean(), [loc])[0], np.average(X.detach().numpy(), weights=scaled_weights.detach().numpy())))
# return res
def get_grad(N, loc_val, scale_val, seed = 27182):
_ = torch.random.manual_seed(seed)
res = []
loc = torch.tensor(loc_val, requires_grad=True)
scale = scale_val
norm_dist = torch.distributions.Normal(loc, scale)
X = torch.rand(N, requires_grad=True) * 4 - 2.
weights = norm_dist.log_prob(X)
max_weight = weights.max()
stable_weights = (weights - max_weight).exp()
scaled_weights = stable_weights / stable_weights.sum()
uniform_weights = torch.full_like(scaled_weights, 1/N, requires_grad=True)
epsilon = 0.01
biasedSampleLoss = geomloss.SamplesLoss(reach=None, potentials=True, debias=False, scaling=0.9, blur=epsilon)
alpha, beta = biasedSampleLoss(uniform_weights, X.unsqueeze(1), scaled_weights, X.unsqueeze(1))
X_tilde, w_tilde = transport_from_potentials(X.unsqueeze(1), alpha, beta, epsilon, scaled_weights, N)
return torch.autograd.grad(X_tilde.mean(), [loc])[0].detach().numpy().sum()
# -
random_int = np.random.randint(1, 1e6, )
torch.Tensor
# +
finite_diff = []
auto_diff = []
seed = random_int
grad = get_grad(200, 0., 0.2, seed)
# -
def compute_grad_numerically(loc, scale):
linspace = np.linspace(-2, 2, 100)
weights = st.norm.pdf(linspace, loc, scale)
tab = linspace * (linspace - loc) / (scale ** 2) * weights
return np.mean(tab)
def compute_grad_diff(loc, scale, eps=1e-4):
linspace = np.random.uniform(-2, 2, 10000)
weights1 = st.norm.pdf(linspace, loc, scale)
avg_1 = np.average(linspace, weights=weights1)
weights2 = st.norm.pdf(linspace, loc + eps, scale)
avg_2 = np.average(linspace, weights=weights2)
return (avg_2 - avg_1) / eps
locs = [-0.5, -0.25, 0., 0.25, 0.5]
scales = [0.25, 0.5, 1., 1.5, 2.]
theoretical_gradients = [ [ compute_grad_numerically(loc, scale) for loc in locs ] for scale in scales ]
autodiff_gradients = [ [ get_grad(500, loc, scale ) for loc in locs ] for scale in scales ]
difference_gradients = [ [ compute_grad_diff(loc, scale) for loc in locs ] for scale in scales ]
np.array(theoretical_gradients)
np.array(difference_gradients)
autodiff_gradients = pd.DataFrame(np.array(autodiff_gradients), columns = locs, index = scales )
theoretical_gradients = pd.DataFrame(np.array(theoretical_gradients), columns = locs, index = scales )
print(autodiff_gradients.to_latex())
print()
print(theoretical_gradients.to_latex())
grads_plus.mean()
grads = np.array([ k.detach().numpy().sum() for k in grads ])
plt.hist(diff_grad / grads - 1)
diff_grad
grads
| TransportPlanGradient.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/elleneng/photo-colorization/blob/master/_notebooks/2020-08-03-post_no_blog.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="juR6VH7yBEbX" colab_type="code" colab={}
# + id="-5Wqx0NSBIYT" colab_type="code" colab={}
# + [markdown] id="WG5y9PJgBKlc" colab_type="text"
# # crianças tomando sorvete no verão de Nova Iorque
# foto tirada por xxx
# [fonte da imagem](https://i.redd.it/57ed044lifd51.jpg)
#
# - toc: false
# - branch: master
# - badges: true
# - comments: true
# - categories: crianças, sorvete
#
#
#
# + id="-HXSBdCOC9t7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="da57c1f1-2afd-4b51-cd85-9c81ddbf49f9"
# !wget https://raw.githubusercontent.com/awarischool/br-data-science/master/image-colorizer/deoldify_wrapper.py
# + id="OTguktexDFeG" colab_type="code" colab={}
# + id="BC9SaCYHDhjq" colab_type="code" colab={}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="g9Ebx2nhD5XA" colab_type="text"
# deoldify
# + id="sTFg9wXED-cp" colab_type="code" colab={}
from deoldify_wrapper import DeOldify
# + id="GiA2MhDcE2-G" colab_type="code" colab={}
#hide
deo = DeOldify()
# + id="37Z2QvMwG8QM" colab_type="code" colab={}
#hide
url = 'https://i.redd.it/57ed044lifd51.jpg'
deo.colorize(url)
# + id="Y3B1g7TaHoZT" colab_type="code" colab={}
| _notebooks/2020-08-03-post_no_blog.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# ## General Equilibrium: The Aiyagari Model with endogenous labor supply
#
# This code solves for the market clearing interest rate in the Aiyagari model. It uses the EGM routines for the policy functions and Young's method for the distribution, as well as a bisection procedure to obtain the interest rate.
# +
#this notebook solves for the general equilibrium of the Aiyagari model using EGM with endogenous labor supply
using NBInclude
@nbinclude("EGM_labor.ipynb") # include EGM functions (make sure it is in your working directory)
# +
Household = @with_kw (apoints = 500, #asset grid
amax = 150, # asset max
beta = 0.986, # discount factor
alpha = 0.11, # capital share
deprec = 0.025, # depreciation rate
gamma = 2.0, # inverse elasticity of substitution
bc = 0, # borrowing constraint (must be weakly negative)
rho = 0.966, # autocorr of income process
uncond_sd = 0.5, # unconditional sd
num_states = 7, # number of states for income process
sd = sqrt(1-rho^2)*uncond_sd, # stand. dev. of deviation process
mean = 0, # mean of income process
transition_matrix = rowenhorst(mean, uncond_sd, rho, num_states)[1], # transition matrix
ygrid = rowenhorst(mean, uncond_sd, rho, num_states)[2], # grid for income process
Amat = setgrids(bc, apoints, amax, num_states, ygrid)[1], # asset grid
Ymat = setgrids(bc, apoints, amax, num_states, ygrid)[2], # income grid
phi = 0.8, # disutility factor
frisch = 2.0) # inverse frisch elasticity for labor supply
hh = Household()
# -
iterate_egm(hh, r = 0.005) # making sure everything works
# +
function get_kernel(hh; policyfun)
"""
get transition kernel for asset-income distribution using Young's method
#### Fields
- 'hh': household tuple
- 'policyfun': savings function, array na x ny
#### Returns
- 'kernel': na*ny x na*ny array row-stochastic transition kernel
"""
@unpack apoints, Amat, num_states, Ymat, transition_matrix = hh
# construct transition matrix without labour stochasticity
Q = zeros(apoints, apoints, num_states)
# find which element to interpolate to
findnearest(A,t) = findmin(abs.(A.-t))[2]
# construct matrices to get neighboring indices
next = similar(Amat)
previous = similar(Amat)
for j = 1:num_states
for k = 1:apoints
ind = findnearest(Amat[:,j],policyfun[k,j])
if policyfun[k,j] - Amat[ind,j] > 0
previous[k,j] = Amat[ind,j]
if ind == apoints
next[k,j] = Amat[ind,j]
else
next[k,j] = Amat[ind+1,j]
end
elseif policyfun[k,j] == Amat[ind,j]
previous[k,j] = policyfun[k,j]
next[k,j] = policyfun[k,]
else
next[k,j] = Amat[ind,j]
if ind == 1
previous[k,j] = Amat[ind,j]
else
previous[k,j] = Amat[ind-1,j]
end
end
end
end
# construct transition matrix for assets.
# Q[i,j,k] is the probability of going to agrid[i] when current assets are agrid[j] and income is ygrid[k]
for k = 1:apoints
for j = 1:num_states
if next[k,j] == previous[k,j]
Q[Amat[:,1].==previous[k,j],k,j] .= 1
else
Q[Amat[:,1].==previous[k,j],k,j] .= (next[k,j]-policyfun[k,j])./(next[k,j]-previous[k,j])
Q[Amat[:,1].==next[k,j],k,j] .= 1 - (next[k,j]-policyfun[k,j])./(next[k,j]-previous[k,j])
end
end
end
# construct transition kernel by taking the kronecker product of assets and transition_matrix for income
kernel = zeros(apoints*num_states,apoints*num_states)
for j=1:num_states
for i=1:num_states
kernel[(i-1)*apoints+1:i*apoints,(j-1)*apoints+1:j*apoints]=Q[:,:,j].*transition_matrix[j,i];
end
end
kernel = kernel'
return kernel
end
# -
function histogram_method(hh; kernel, tol=1e-8, maxiter=10000)
#needs Markov transition kernel
@unpack apoints, num_states = hh
#initial guess for invariant distribution
init = ones(apoints*num_states)
init = init./sum(init)
dist = 1
counter = 0
for i=1:maxiter
initnext = (init'*kernel)'
if norm(init-initnext,Inf) < tol
println("distribution iteration successful!")
return initnext
else
init = initnext
end
end
error("distribution did not convergence!")
end
function ss(hh; r)
@unpack beta, Ymat = hh
@assert r < 1/(beta)-1 "r too large for convergence"
c, anext, l = iterate_egm(hh; r = r) # get converged policy function
kernel = get_kernel(hh; policyfun = anext) # get kernel
invariant_distribution = histogram_method(hh; kernel = kernel) # get invariant distribution
K = getagg(; policyfun = anext, dist = invariant_distribution) # get aggregate capital
L = getagg(; policyfun = l.*Ymat, dist = invariant_distribution) # get aggregate labor
return K/L
end
# +
function plot_market_clearing(hh)
@unpack alpha, deprec = hh
rgrid = -0.004:0.002:0.009
Ksupply = zeros(length(rgrid))
for (index,r) in enumerate(rgrid)
Ksupply[index] = ss(hh; r=r)
end
plot(rgrid, Ksupply, label = "capital supply per capita")
Z(r) = ((r + deprec)/alpha)^alpha # normalize so Y=1
K(r) = ((Z(r)*alpha)/(r+deprec))^(1/(1-alpha))
plot!(rgrid,K.(rgrid), label= "capital demand per capita")
xlabel!("r-δ")
ylabel!("capital")
end
# -
plot_market_clearing(hh)
function market_clearing(hh; r = 0.0, tol = 1e-5, maxiter = 20, bisection_param = 0.8)
@unpack alpha, deprec = hh
for iter = 1:maxiter
print("r=$r: ")
Ksupply = ss(hh; r=r)
Z = ((r + deprec)/alpha)^alpha
rsupply = Z*alpha*(1/Ksupply)^(1-alpha) - deprec
if abs(r-rsupply) < tol
return (r+rsupply)/2
else r = (bisection_param)*r + (1-bisection_param)*rsupply
end
end
error("no convergence: did not find market clearing real rate")
end
@time market_clearing(hh)
| Aiyagari_with_labor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
10==20
10!=10
1!=2
100>50
100<50
18>=10+10
15<=10+5
| Comparison Operators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tushare as ts
import plotly.graph_objects as go
import plotly.io as pio
import plotly.express as px
import pandas as pd
import numpy as np
import baostock as bao
from baostock.data.resultset import ResultData
import baostock.common.contants as bao_consts
from pandas import Timestamp
from sz.stock_data.toolbox.data_provider import ts_code
from datetime import date, datetime, timedelta
ts.set_token('<KEY>')
ts_api = ts.pro_api()
blg = bao.login()
print('baostock login => error_code: %s error_msg: %s\n' %
(blg.error_code, blg.error_msg))
from sz.stock_data.stock_data import StockData
StockData().setup(data_dir = '/Volumes/USBDATA/stock_data')
from sz.stock_data.stocks.stock_daily import StockDaily
from sz.stock_data.toolbox.helper import need_update_by_trade_date
# -
StockData().trade_calendar.latest_trade_day()
# $ y=x^{\frac{2}{3}}+0.9\sqrt{3.3-x^{2}}\sin \left( 24\pi x \right) $
StockData
| src/tmp_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''base'': conda)'
# name: python3710jvsc74a57bd0b3ba2566441a7c06988d0923437866b63cedc61552a5af99d1f4fb67d367b25f
# ---
# + _uuid="b3414a6501e086b4ae0c1e5e1d07c6063deb56a7"
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras import backend as K
import os
import numpy as np
import pandas as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + _uuid="f0cc54a0947052f0904611ce913fbb39d503a093"
img_name = 'NORMAL2-IM-0588-0001.jpeg'
img_normal = load_img('chest_xray/train/NORMAL/' + img_name)
print('NORMAL')
plt.imshow(img_normal)
plt.show()
# + _uuid="46397323a451df86eb2414bfeecb1e5fbc41a51d"
img_name = 'person63_bacteria_306.jpeg'
img_pneumonia = load_img('chest_xray/train/PNEUMONIA/' + img_name)
print('PNEUMONIA')
plt.imshow(img_pneumonia)
plt.show()
# + _uuid="a7b3d31411d932082af8199660bd47b6d6e4d7a4"
# dimensions of our images.
img_width, img_height = 150, 150
# + _uuid="707187c665591bd285e646b99c2fef818de42ea6"
train_data_dir = 'chest_xray/train'
validation_data_dir = 'chest_xray/val'
test_data_dir = 'chest_xray/test'
nb_train_samples = 5217
nb_validation_samples = 17
epochs = 11
batch_size = 16
# + _uuid="805bef231f3f9237172fe564ecd6586c8cd656bd"
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
# + [markdown] _uuid="8528318adfc90bcfbf1148b818ccf85dffd2a23e"
# ### Create Sequential model
# + _uuid="46a2e630a0cd7409e3fea068c24da29138d5b1ce"
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# + [markdown] _uuid="3058007ead9f68d02f4c5e37215973265ff08f85"
# ### Check information about model
# + _uuid="e46ec3381202bb48c4b5b8757ae216d3419fa461"
model.layers
# + _uuid="f8ad81e7e7fbaf8227caed6560c2eb6d0165d8d7"
model.input
# + _uuid="a377c5e20a51d28d97803c261243ae024fa9305b"
model.output
# + [markdown] _uuid="aaaed230c5eccc5d0e902156620d4cf765eea513"
# ### Compile
# + _uuid="b265d352b0452be0932a3f88dad1799aa863bb22"
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# + [markdown] _uuid="4cc666baf4aee1d9dd5b271433492fe49ed7a091"
# ### Upload img
# + _uuid="2554b9e92e24b54bd81b9e7bcdfc1020f1b2d013"
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# + _uuid="2b91d7ce6e027528166d0c3d57a0d27ecf91e202"
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
# + _uuid="e03c9d64258481f6219088322d415148be92d524"
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
# + _uuid="a12b0ea2b165084571020837b0ac850b58ad809b"
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
# + _uuid="c949cb908a73cd619e98e2e7681fdedd988e0630"
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
# + [markdown] _uuid="7273a0f54ce7c7eb42807b95a356574281bf67f8"
# ### Fit model
# + _uuid="2f308ed21aa10578b5e28426c739f9da842b5de0"
history = model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
# +
# plot model performance
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(1, len(history.epoch) + 1)
plt.figure(figsize=(15,5))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Train Set')
plt.plot(epochs_range, val_acc, label='Val Set')
plt.legend(loc="best")
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('Model Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Train Set')
plt.plot(epochs_range, val_loss, label='Val Set')
plt.legend(loc="best")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Model Loss')
plt.tight_layout()
plt.show()
# + _uuid="5cdacc7c7f29c9de102c8718721e1c2ee2831577"
model.save('pneumonia_model.h5')
# + _uuid="afcd3112828276f65d79b8bd6ae86c8bc190791b"
# evaluate the model
scores = model.evaluate_generator(test_generator)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# -
| Healthcure/pneumonia/keras-nn-x-ray-predict-pneumonia-86-54.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:lithosphere]
# language: python
# name: conda-env-lithosphere-py
# ---
# # Bending of the lithosphere under seamounts
#
# **Goal:** Implement the finite-difference solution to the 1D flexure equation and compare it's predictions against observations of gravity disturbances and bathymetry.
#
# With the appropriate boundary condition, we can use the 1D flexure equation to model the bending of the oceanic lithosphere under the load of a seamount or island chain. **Our assumption is that the plate is perfectly elastic and structures are infinite perpendicular to the profile.**
#
# <img style="width: 500px; margin: 10px auto;" src="https://github.com/leouieda/lithosphere/raw/be03c3400f4850de815a71b8536874ee2af4308b/lectures/assets/flexure-seamount.svg">
# <p style="text-align: left; width: 500px; max-width: 100%; margin: 30px auto;">
# Figure caption: Sketch of the elastic plate flexure for a load $q(x)$.
# </p>
# ## Governing equations
#
# The equation describing the deflection ($w$) of an elastic plate subject to a vertical load is:
#
# $$ D\dfrac{\partial^4 w}{\partial x^4} = q(x) - g (\rho_m - \rho_w) w $$
#
# in which $x$ is the position along the profile, $D$ is the flexural rigidity of the plate, $rho_w$ and $rho_m$ are the water and mantle densities, $g$ is gravity (for these purposes, assume $9.8\ m/s^2$), and $q(x)$ is a force density applied by the load on the plate (the weight of the topography). The term $g (\rho_m - \rho_w) w$ is the restoring hydrostatic force (buoyancy) from replacing mantle material at depth with water on top because of the flexure.
#
# The **flexural rigidity** $D$ can be described in terms of the plate's elastic parameters:
#
# $$ D = \dfrac{E T_e^3}{12(1 - \nu^2)} $$
#
# where $E$ is [Young's modulus](https://en.wikipedia.org/wiki/Young's_modulus), $\nu$ is [Poisson's ratio](https://en.wikipedia.org/wiki/Poisson's_ratio), and $T_e$ is the *effective elastic thickness*.
#
# The **load force per unit area** $q(x)$ is the weight of the load applied on the lithosphere. For a load of height $h$ and density $\rho_c$, the weight per unit area is (negative because the force is applied downwards):
#
# $$ q(x) = -g \rho_c h(x) $$
# ## Boundary conditions
#
# If we assume that the load is applied somewhere in the middle of the plate, we can consider the boundaries as stable lithosphere under no load. In that case, we don't expect to see any deflection at the boundaries. We can also impose that the first derivative of deflection is zero at the boundaries (the plate approaches the boundaries horizontally, not at an angle). This leads to the boundary conditions:
#
# $$
# \begin{align}
# w(x=0) &= 0 \\
# w(x=x_{max}) &= 0 \\
# \dfrac{\partial w}{\partial x}(x=0) &= 0 \\
# \dfrac{\partial w}{\partial x}(x=x_{max}) &= 0
# \end{align}
# $$
# ## The data
#
# **Download** (if you haven't already): [global-geophysical-data.nc](https://github.com/leouieda/lithosphere/raw/main/data/global-geophysical-data.nc) (place it in the **same folder** as this notebook)
#
# The data grids are stored in a [netCDF](https://en.wikipedia.org/wiki/NetCDF) file, which is a very common standard for storing and distributing gridded data. It's used throughout the Earth, ocean, and climate sciences and can be read an manipulated with most software platforms. It contains global grids of gravity, topography, and heat flow. All grids were interpolated onto the same resolution of 0.2 degrees (a compromise between resolution and size of the file) so that we can easily compare and use all of the data together.
#
# * **Gravity**: generated from the [EIGEN-6C4 spherical harmonic model](https://doi.org/10.5880/icgem.2015.1) through the [ICGEM webservice](http://icgem.gfz-potsdam.de/home). It includes two grids: the gravity disturbance calculated with the WGS84 ellipsoid and the Bouguer gravity disturbance calculated using a topography density of 2670 kg/m³. The data for both grids was generated on top of the Earth's surface (the water surface in the oceans and topography on land).
# * **Topography and bathymetry**: a downsampled version of [ETOPO1](https://doi.org/10.7289/V5C8276M) and includes a version smoothed using a Gaussian filter with 1 degree width (~100 km).
# * **Heat flow**: an upsampled version of the compilation by [Lucazeau (2019)](https://doi.org/10.1029/2019GC008389) (originally 0.5 degree resolution).
# * **Lithosphere age (oceanic)**: a downsampled version of the grid by [Müller et al. (2008)](https://doi.org/10.1029/2007GC001743) (originally 6 arc-minutes).
# ## Import the required libraries
#
# Load the required software to load the data, make maps, and perform calculations.
# For arrays and linear algebra
import numpy as np
# To make figures and maps
import matplotlib.pyplot as plt
# To load and manipulate grids
import xarray as xr
# Import our own module (lithosphere.py)
import lithosphere
# ## Load the data grids
#
# Once again, we'll use xarray to load the data from the netCDF file.
# ## Extracting a profile
#
# Once again, we'll extract a profile from our data grids. This time, we will use the function that we placed in `lithosphere.py` so we don't have to copy the code into this notebook.
#
# First, let's slice a region in the North Atlantic where there are some nice seamounts.
# Now extract a profile that cuts across the seamount chain.
# Plot the location of the profile.
# And the profile topography and gravity disturbance data.
# We will need the height of the seamount load above the background bathymetry for our modeling. So let's see if we can calculate that.
# ## Solving the flexure equation by finite-differences
#
# The finite-difference solution to the flexure equation is the solution (the $w_i$ terms) to the equation system:
#
# $$
# \begin{align}
# D w_{4} - 4 D w_{3} + \left[6D + \Delta x^4 (\rho_m - \rho_w) g\right] w_2 - 4 D w_{1} + D w_{0} &= \Delta x^4 q_2 \\
# D w_{5} - 4 D w_{4} + \left[6D + \Delta x^4 (\rho_m - \rho_w) g\right] w_3 - 4 D w_{2} + D w_{1} &= \Delta x^4 q_3 \\
# \vdots & \\
# D w_{N-1} - 4 D w_{N-2} + \left[6D + \Delta x^4 (\rho_m - \rho_w) g\right] w_{N-3} - 4 D w_{N-4} + D w_{N-5} &= \Delta x^4 q_{N-3} \\
# w_0 &= 0 \\
# w_{N-1} &= 0 \\
# w_1 - w_0 &= 0 \\
# w_{N-1} - w_{N-2} &= 0
# \end{align}
# $$
#
# Here, the continuous $w$ and $q$ have been discretized into $N$ values with a spacing of $\Delta x$ between them.
#
# Another way of writing this equation system is using a matrix notation:
#
# $$
# \underbrace{
# \begin{bmatrix}
# D & -4D & \left[6D + \Delta x^4 (\rho_m - \rho_w) g\right] & -4D & D & 0 & 0 & \ldots & 0 & 0 \\
# 0 & D & -4D & \left[6D + \Delta x^4 (\rho_m - \rho_w) g\right] & -4D & D & 0 & \ldots & 0 & 0 \\
# \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\
# 1 & 0 & 0 & 0 & 0 & 0 & 0 & \ldots & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 0 & \ldots & 0 & 1 \\
# -1 & 1 & 0 & 0 & 0 & 0 & 0 & \ldots & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 0 & \ldots & -1 & 1
# \end{bmatrix}
# }_{\bar{\bar{A}}}
# \underbrace{
# \begin{bmatrix}
# w_0 \\
# w_1 \\
# w_2 \\
# w_3 \\
# w_4 \\
# w_5 \\
# w_6 \\
# \vdots \\
# w_{N-1}
# \end{bmatrix}
# }_{\bar{w}}
# =
# \underbrace{
# \begin{bmatrix}
# \Delta x^4 q_2 \\
# \Delta x^4 q_3 \\
# \vdots \\
# 0\\
# 0\\
# 0\\
# 0
# \end{bmatrix}
# }_{\bar{b}}
# $$
#
# $$
# \bar{\bar{A}} \bar{w} = \bar{b}
# $$
#
# We'll need to figure out a way to build the $\bar{\bar{A}}$ matrix and $\bar{b}$ vector. Once we have those, we can the deflection $\bar{w}$ using `numpy.linalg.solve`.
#
# Let's write write a function that builds the matrix and vector and then solves the system.
# We'll assume that the flexural parameters of the lithosphere are:
#
# * Poisson's ratio: $\nu = 0.25$
# * Young's modulus: $E = 70 \times 10^9\ \frac{kg}{s^2 m}$
#
# We'll vary the effective elastic thickness $T_e$ as needed but it will probably be around 20-40 km.
# With these, we can calculate the flexural rigidity $D$.
# Now we can solve for $w$.
# ## Predicting gravity
#
# We can also predict the observed gravity disturbances from the calculated deflection. To do so, we derive the density anomalies caused by the downward deflection of the oceanic crust. The figure below illustrate a conceptual model of the density anomalies causing the observed data.
#
# <img style="width: 500px; margin: 10px auto;" src="https://github.com/leouieda/lithosphere/raw/be03c3400f4850de815a71b8536874ee2af4308b/lectures/assets/disturbance-flexure-model.svg">
# <p style="text-align: left; width: 500px; max-width: 100%; margin: 30px auto;">
# Figure caption: Model of the density anomalies causing the observed gravity disturbance due to the flexure and the presence of the seamount.
# </p>
#
# The gravitational effect of these density anomalies can be estimated using a **Bouguer plate approximation** by carefully assigning the correct density contrasts to each part of the model:
#
# $$ g \approx 2 \pi G \Delta\rho |w| $$
#
# To make things easier, we can split the effect into 3 parts which can be added together to produce the final result:
#
# 1. The seamount
# 2. The deflection at the surface
# 3. The deflection at the Moho
#
# Let's write a function that calculates the predicted gravity for our seamount flexure.
# Now we can predict the gravity disturbance and plot it agaisnt our observed data.
# The fit is not great. Our predictions seem to be a bit too small. There are 2 effects that we neglected in our gravity calculations above:
#
# 1. The effect of the surface deflection should not be included underneath the seamount
# 2. The downwards flexure at the surface is not filled with water (look at the bathymetry data). It's actually been packged with sediments.
#
# Let's make a new function that takes these 2 effects into account.
# Recalculate the predicted gravity and plot it along side the data and our previous estimate.
# ---
#
# ## **Your turn!**
#
# Try out the new modelling tools to investigate a group of seamounts in the Southwest Pacific ocean. The age of the lithosphere is much larger than the example from the Atlantic we just saw (which was next to the mid-ocean ridge). So we expect the lithosphere to be thicker, colder, and denser.
#
# In groups:
#
# 1. Extract a profile cutting across one (or more) seamounts
# 1. Plot the observed gravity and bathymetry along the profile
# 1. Calculate the deflection caused by the load of the seamount(s)
# 1. Use the deflection to predict the gravity distubance and compare against observations (you may have to adjust the values of density and effective elastic thickness)
# 1. Discuss:
# 1. What we can learn from these models about seamounts and elastic properties of the oceanic lithosphere
# 1. The main limitations of the 1D modelling approach and the approximations we used for the gravity calculations
#
# Share with the class:
#
# 1. The plots of your profile data, location, and model predictions
# 1. The key discussion points
pacific = data.sel(longitude=slice(150, 180), latitude=slice(5, 20))
pacific.topography.plot(figsize=(14, 8))
| practicals/practical4-empty.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# 
# # Automated Machine Learning
# **Continuous retraining using Pipelines and Time-Series TabularDataset**
# ## Contents
# 1. [Introduction](#Introduction)
# 2. [Setup](#Setup)
# 3. [Compute](#Compute)
# 4. [Run Configuration](#Run-Configuration)
# 5. [Data Ingestion Pipeline](#Data-Ingestion-Pipeline)
# 6. [Training Pipeline](#Training-Pipeline)
# 7. [Publish Retraining Pipeline and Schedule](#Publish-Retraining-Pipeline-and-Schedule)
# 8. [Test Retraining](#Test-Retraining)
# ## Introduction
# In this example we use AutoML and Pipelines to enable contious retraining of a model based on updates to the training dataset. We will create two pipelines, the first one to demonstrate a training dataset that gets updated over time. We leverage time-series capabilities of `TabularDataset` to achieve this. The second pipeline utilizes pipeline `Schedule` to trigger continuous retraining.
# Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.
# In this notebook you will learn how to:
# * Create an Experiment in an existing Workspace.
# * Configure AutoML using AutoMLConfig.
# * Create data ingestion pipeline to update a time-series based TabularDataset
# * Create training pipeline to prepare data, run AutoML, register the model and setup pipeline triggers.
#
# ## Setup
# As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
# +
import logging
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
# -
# This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
print("This notebook was created using version 1.30.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
# Accessing the Azure ML workspace requires authentication with Azure.
#
# The default authentication is interactive authentication using the default tenant. Executing the ws = Workspace.from_config() line in the cell below will prompt for authentication the first time that it is run.
#
# If you have multiple Azure tenants, you can specify the tenant by replacing the ws = Workspace.from_config() line in the cell below with the following:
# ```
# from azureml.core.authentication import InteractiveLoginAuthentication
# auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')
# ws = Workspace.from_config(auth = auth)
# ```
# If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the ws = Workspace.from_config() line in the cell below with the following:
# ```
# from azureml.core.authentication import ServicePrincipalAuthentication
# auth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')
# ws = Workspace.from_config(auth = auth)
# ```
# For more details, see aka.ms/aml-notebook-auth
# +
ws = Workspace.from_config()
dstor = ws.get_default_datastore()
# Choose a name for the run history container in the workspace.
experiment_name = 'retrain-noaaweather'
experiment = Experiment(ws, experiment_name)
output = {}
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Run History Name'] = experiment_name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
# -
# ## Compute
#
# #### Create or Attach existing AmlCompute
#
# You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.
#
# > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.
#
# #### Creation of AmlCompute takes approximately 5 minutes.
# If the AmlCompute with that name is already in your workspace this code will skip the creation process.
# As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
amlcompute_cluster_name = "cont-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',
max_nodes=4)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# -
# ## Run Configuration
# +
from azureml.core.runconfig import CondaDependencies, RunConfiguration
# create a new RunConfig object
conda_run_config = RunConfiguration(framework="python")
# Set compute target to AmlCompute
conda_run_config.target = compute_target
conda_run_config.environment.docker.enabled = True
cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'applicationinsights', 'azureml-opendatasets', 'azureml-defaults'],
conda_packages=['numpy==1.16.2'],
pin_sdk_version=False)
conda_run_config.environment.python.conda_dependencies = cd
print('run config is ready')
# -
# ## Data Ingestion Pipeline
# For this demo, we will use NOAA weather data from [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/). You can replace this with your own dataset, or you can skip this pipeline if you already have a time-series based `TabularDataset`.
#
# The name and target column of the Dataset to create
dataset = "NOAA-Weather-DS4"
target_column_name = "temperature"
#
# ### Upload Data Step
# The data ingestion pipeline has a single step with a script to query the latest weather data and upload it to the blob store. During the first run, the script will create and register a time-series based `TabularDataset` with the past one week of weather data. For each subsequent run, the script will create a partition in the blob store by querying NOAA for new weather data since the last modified time of the dataset (`dataset.data_changed_time`) and creating a data.csv file.
# +
from azureml.pipeline.core import Pipeline, PipelineParameter
from azureml.pipeline.steps import PythonScriptStep
ds_name = PipelineParameter(name="ds_name", default_value=dataset)
upload_data_step = PythonScriptStep(script_name="upload_weather_data.py",
allow_reuse=False,
name="upload_weather_data",
arguments=["--ds_name", ds_name],
compute_target=compute_target,
runconfig=conda_run_config)
# -
# ### Submit Pipeline Run
data_pipeline = Pipeline(
description="pipeline_with_uploaddata",
workspace=ws,
steps=[upload_data_step])
data_pipeline_run = experiment.submit(data_pipeline, pipeline_parameters={"ds_name":dataset})
data_pipeline_run.wait_for_completion(show_output=False)
# ## Training Pipeline
# ### Prepare Training Data Step
#
# Script to check if new data is available since the model was last trained. If no new data is available, we cancel the remaining pipeline steps. We need to set allow_reuse flag to False to allow the pipeline to run even when inputs don't change. We also need the name of the model to check the time the model was last trained.
# +
from azureml.pipeline.core import PipelineData
# The model name with which to register the trained model in the workspace.
model_name = PipelineParameter("model_name", default_value="noaaweatherds")
# -
data_prep_step = PythonScriptStep(script_name="check_data.py",
allow_reuse=False,
name="check_data",
arguments=["--ds_name", ds_name,
"--model_name", model_name],
compute_target=compute_target,
runconfig=conda_run_config)
from azureml.core import Dataset
train_ds = Dataset.get_by_name(ws, dataset)
train_ds = train_ds.drop_columns(["partition_date"])
# ### AutoMLStep
# Create an AutoMLConfig and a training step.
# +
from azureml.train.automl import AutoMLConfig
from azureml.pipeline.steps import AutoMLStep
automl_settings = {
"iteration_timeout_minutes": 10,
"experiment_timeout_hours": 0.25,
"n_cross_validations": 3,
"primary_metric": 'r2_score',
"max_concurrent_iterations": 3,
"max_cores_per_iteration": -1,
"verbosity": logging.INFO,
"enable_early_stopping": True
}
automl_config = AutoMLConfig(task = 'regression',
debug_log = 'automl_errors.log',
path = ".",
compute_target=compute_target,
training_data = train_ds,
label_column_name = target_column_name,
**automl_settings
)
# +
from azureml.pipeline.core import PipelineData, TrainingOutput
metrics_output_name = 'metrics_output'
best_model_output_name = 'best_model_output'
metrics_data = PipelineData(name='metrics_data',
datastore=dstor,
pipeline_output_name=metrics_output_name,
training_output=TrainingOutput(type='Metrics'))
model_data = PipelineData(name='model_data',
datastore=dstor,
pipeline_output_name=best_model_output_name,
training_output=TrainingOutput(type='Model'))
# -
automl_step = AutoMLStep(
name='automl_module',
automl_config=automl_config,
outputs=[metrics_data, model_data],
allow_reuse=False)
# ### Register Model Step
# Script to register the model to the workspace.
register_model_step = PythonScriptStep(script_name="register_model.py",
name="register_model",
allow_reuse=False,
arguments=["--model_name", model_name, "--model_path", model_data, "--ds_name", ds_name],
inputs=[model_data],
compute_target=compute_target,
runconfig=conda_run_config)
# ### Submit Pipeline Run
training_pipeline = Pipeline(
description="training_pipeline",
workspace=ws,
steps=[data_prep_step, automl_step, register_model_step])
training_pipeline_run = experiment.submit(training_pipeline, pipeline_parameters={
"ds_name": dataset, "model_name": "noaaweatherds"})
training_pipeline_run.wait_for_completion(show_output=False)
# ### Publish Retraining Pipeline and Schedule
# Once we are happy with the pipeline, we can publish the training pipeline to the workspace and create a schedule to trigger on blob change. The schedule polls the blob store where the data is being uploaded and runs the retraining pipeline if there is a data change. A new version of the model will be registered to the workspace once the run is complete.
# +
pipeline_name = "Retraining-Pipeline-NOAAWeather"
published_pipeline = training_pipeline.publish(
name=pipeline_name,
description="Pipeline that retrains AutoML model")
published_pipeline
# -
from azureml.pipeline.core import Schedule
schedule = Schedule.create(workspace=ws, name="RetrainingSchedule",
pipeline_parameters={"ds_name": dataset, "model_name": "noaaweatherds"},
pipeline_id=published_pipeline.id,
experiment_name=experiment_name,
datastore=dstor,
wait_for_provisioning=True,
polling_interval=1440)
# ## Test Retraining
# Here we setup the data ingestion pipeline to run on a schedule, to verify that the retraining pipeline runs as expected.
#
# Note:
# * Azure NOAA Weather data is updated daily and retraining will not trigger if there is no new data available.
# * Depending on the polling interval set in the schedule, the retraining may take some time trigger after data ingestion pipeline completes.
# +
pipeline_name = "DataIngestion-Pipeline-NOAAWeather"
published_pipeline = training_pipeline.publish(
name=pipeline_name,
description="Pipeline that updates NOAAWeather Dataset")
published_pipeline
# -
from azureml.pipeline.core import Schedule
schedule = Schedule.create(workspace=ws, name="RetrainingSchedule-DataIngestion",
pipeline_parameters={"ds_name":dataset},
pipeline_id=published_pipeline.id,
experiment_name=experiment_name,
datastore=dstor,
wait_for_provisioning=True,
polling_interval=1440)
| how-to-use-azureml/automated-machine-learning/continuous-retraining/auto-ml-continuous-retraining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Unit 1 Lecture Notes
# Feel free to follow along at TBD
# + [markdown] deletable=true editable=true
#
# ### Problem 1: Math!
#
# Use the math operations that ask for two integers and spits out the following computations:
# - Addition (+)
# - Subtraction (-)
# - Multiplication (*)
# - Division (/)
#
# Here are two examples of the program running. User input is shown in **bold**:
#
# First example
#
# >Please enter two integers.
# >
# >First integer: **3**
# >
# >Second integer: **7**
# >
# >3 + 7 = 10
# >
# >3-7 = -4
# >
# >3 * 7 = 21
# >
# >3 / 7 = 0.42857142857142855
#
# Second Example
#
# >Please enter two integers.
# >
# >First integer: **12**
# >
# >Second integer: **18**
# >
# >3 + 7 = 30
# >
# >3-7 = -6
# >
# >3 * 7 = 216
# >
# >3 / 7 = 0.666667
# + [markdown] deletable=true editable=true
# ### Problem 2 Digitial Mad Lib
#
# Create a madlib generator that follows the following format
#
# > Alas, poor PROPER NOUN! I knew him, Horatio: a fellow
# > of ADJECTIVE NOUN, of most excellent fancy: he hath
# > VERB me on his back a thousand times; and now, how
# > abhorred in my imagination it is!
#
# Your program will ask for the following
# - Noun
# - Adjective
# - Proper Noun
# - Verb
#
# -
# ### Problem 3 Tip Calculator
# Given a decimal number input representing a restaurant bill, tips.py should report tip amounts for 10%, 15%, and 20%, along with the totals. Note: you are not required to format decimals to round to the nearest penny. We haven’t learned how to do that yet.
#
# >How much was the bill? **10.00**
# >
# >
# >10% tip: \$1.0, Total: \$11.0
# >
# >15% tip: \$1.5, Total: \$11.5
# >
# >20% tip: \$2.0, Total: \$12.0
#
# ### Problem 4: Wooooooooooo!
# *Note* You need to know loops to do this one
#
# > How many wooos? **4**
# >
# > woo!
# >
# > wooo!!
# >
# > woooo!!!
# >
# > wooooo!!!!
#
# Your program should have the following features:
#
# - The user selects the number of lines to print. You can assume the user enters a positive integer.
#
# - Each line should consist of a "woo" followed by a number of exclamation points.
#
# - The first line should contain one exclamation point; the second line two exclamation points, and so on.
#
# - Each line should contain one more o than !.
# ### Problem 5: The Square
#
# The program should then
#
# - print out the side length for the square,
#
# - print out the perimeter of the square (the perimiter of a square is the sum of the side lengths), and
#
# - use @ symbols to print out a text square. The length of the text square should be the largest integer less than or equal to the actual side length of the square.
#
# > Enter the area of your square: **36**
# >
# > Your square has side length: 6.0.
# >
# > Your square has perimeter: 24.0.
# >
#
# > @@@@@@
# >
# > @@@@@@
# >
# > @@@@@@
# >
# > @@@@@@
# >
# > @@@@@@
# >
# > @@@@@@
#
| Unit_1/u1_activities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Astro
# language: python
# name: astro
# ---
# +
# %load_ext autoreload
# %autoreload 2
import numpy as np
import torch
import sys
sys.path.append("../src")
from plasticc_dataset_torch import get_plasticc_datasets
path_to_plasticc = "/home/shared/astro/PLAsTiCC/" # CHANGE ME
# -
torch_dataset_lazy = get_plasticc_datasets(path_to_plasticc)
torch_dataset_eager = get_plasticc_datasets(path_to_plasticc, lazy_loading=False)
np.allclose(torch_dataset_lazy.__getitem__(100)[0], torch_dataset_eager.__getitem__(100)[0])
# +
# %%timeit -r10
train_loader = torch.utils.data.DataLoader(torch_dataset_lazy, batch_size=256, shuffle=True, num_workers=0)
for lc_data, lc_labels, _ in train_loader:
break
# +
# %%timeit -r10
train_loader = torch.utils.data.DataLoader(torch_dataset_eager, batch_size=256, shuffle=True, num_workers=0)
for lc_data, lc_labels, _ in train_loader:
break
# -
| notebooks/dataset_eager_vs_lazy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_csv('dimensions-copy.csv')
df
condensed = pd.DataFrame([
df['ctr'],
df['country'],
df['continent'],
df['population'],
np.where(df['pdi'].str.isnumeric(), df['pdi'], df['pdi-x']),
np.where(df['idv'].str.isnumeric(), df['idv'], df['idv-x']),
np.where(df['mas'].str.isnumeric(), df['mas'], df['mas-x']),
np.where(df['uai'].str.isnumeric(), df['uai'], df['uai-x']),
np.where(df['ltowvs'].str.isnumeric(), df['ltowvs'], df['lto-x']),
np.where(df['ivr'].str.isnumeric(), df['ivr'], df['ivr-x'])]).T
condensed.columns = ['Country Code', 'Country Name', 'Continent', 'Population',
'Power Distance', 'Individualism', 'Masculinity', 'Uncertainty Avoidance',
'Long Term Orientation', 'Indulgence']
condensed
condensed.to_json('data.json', orient='records')
# +
# df = df.replace('#NULL!', np.NaN)
# df = df.rename(columns={'ctr': 'Country Code', 'country': 'Country',
# 'pdi': 'Power Distance',
# 'idv': 'Individualism',
# 'mas': 'Masculinity',
# 'uai': 'Uncertainty Avoidance',
# 'ltowvs': 'Long Term Orientation',
# 'ivr': 'Indulgence'})
# # df = df.set_index('Country Code')
# df.to_json(orient='records')
# df.to_json('data.json', orient='records')
# -
df = pd.read_json('./data.json')
df.to_json('data_new.json', orient='records')
df = pd.read_json('./dimension-data-old.json')
df.set_index('Country Code').to_json('dimension-data-new.json', orient='index')
| data-parsing/convert-table-format.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Processor temperature
#
# We have a temperature sensor in the processor of our company's server. We want to analyze the data provided to determinate whether we should change the cooling system for a better one. It is expensive and as a data analyst we cannot make decisions without a basis.
#
# We provide the temperatures measured throughout the 24 hours of a day in a list-type data structure composed of 24 integers:
# ```
# temperatures_C = [33,66,65,0,59,60,62,64,70,76,80,69,80,83,68,79,61,53,50,49,53,48,45,39]
# ```
#
# ## Goals
#
# 1. Treatment of lists
# 2. Use of loop or list comprenhention
# 3. Calculation of the mean, minimum and maximum.
# 4. Filtering of lists.
# 5. Interpolate an outlier.
# 6. Logical operators.
# 7. Print
# ## Temperature graph
# To facilitate understanding, the temperature graph is shown below. You do not have to do anything in this section. The test starts in **Problem**.
# +
# import
import matplotlib.pyplot as plt
# %matplotlib inline
# axis x, axis y
y = [33,66,65,0,59,60,62,64,70,76,80,81,80,83,90,79,61,53,50,49,53,48,45,39]
x = list(range(len(y)))
# plot
plt.plot(x, y)
plt.axhline(y=70, linewidth=1, color='r')
plt.xlabel('hours')
plt.ylabel('Temperature ºC')
plt.title('Temperatures of our server throughout the day')
# -
# ## Problem
#
# If the sensor detects more than 4 hours with temperatures greater than or equal to 70ºC or any temperature above 80ºC or the average exceeds 65ºC throughout the day, we must give the order to change the cooling system to avoid damaging the processor.
#
# We will guide you step by step so you can make the decision by calculating some intermediate steps:
#
# 1. Minimum temperature
# 2. Maximum temperature
# 3. Temperatures equal to or greater than 70ºC
# 4. Average temperatures throughout the day.
# 5. If there was a sensor failure at 03:00 and we did not capture the data, how would you estimate the value that we lack? Correct that value in the list of temperatures.
# 6. Bonus: Our maintenance staff is from the United States and does not understand the international metric system. Pass temperatures to Degrees Fahrenheit.
#
# Formula: F = 1.8 * C + 32
#
# web: https://en.wikipedia.org/wiki/Conversion_of_units_of_temperature
#
# +
temperatures_C = [33,66,65,0,59,60,62,64,70,76,80,69,80,83,68,79,61,53,50,49,53,48,45,39]
#Correcting the zero at 3:00, i=3 in temperatures_C:
new_temperatures_C = []
for i in range(0, len(temperatures_C)):
if i != 3:
new_temperatures_C.append(temperatures_C[i])
else:
new_temperatures_C.append((temperatures_C[i-1]+temperatures_C[i+1])/2)
print("The estimate temperature at 3:00 is ", new_temperatures_C[3], "ºC")
print("New temperatures list:", new_temperatures_C)
#Minumum and maximum
print("Minimum temperature:", min(new_temperatures_C), "ºC.")
print("Maximum temperature:", max(new_temperatures_C), "ºC.")
#Now, calculating average temperature:
mean_temperature = sum(new_temperatures_C)/len(new_temperatures_C)
print("Mean temperature: ", mean_temperature, "ºC.")
#Greater than 70ºC
greater_than_70 = []
temp_greater_than_70 = []
for i in range(0, len(new_temperatures_C)):
if new_temperatures_C[i] >=70:
greater_than_70.append(i)
temp_greater_than_70.append(new_temperatures_C[i])
print("The hours when the temperature was greater than 70ºC:", greater_than_70)
print("The temperatures above 70ºC: ", temp_greater_than_70)
print("Greater than 70ºC occurrences:", len(greater_than_70))
#Greater than 80ºC:
greater_than_80 = []
for i in range (0, len(new_temperatures_C)):
if new_temperatures_C[i] >= 80:
greater_than_80.append(i)
print("Greater than 80º occurrences:", len(greater_than_80))
# +
new_temperatures_F = []
ft = 0
for i in range(0, len(new_temperatures_C)):
ft = 1.8 * new_temperatures_C[i] + 32
new_temperatures_F.append(ft)
print("Temperatures list in Farenheit: ", new_temperatures_F)
# -
# ## Take the decision
# Remember that if the sensor detects more than 4 hours with temperatures greater than or equal to 70ºC or any temperature higher than 80ºC or the average was higher than 65ºC throughout the day, we must give the order to change the cooling system to avoid the danger of damaging the equipment:
# * more than 4 hours with temperatures greater than or equal to 70ºC
# * some temperature higher than 80ºC
# * average was higher than 65ºC throughout the day
# If any of these three is met, the cooling system must be changed.
#
change_needed = 0
# +
if len(greater_than_70) >= 4:
change_needed += 1
print("The average temperature is above 70ºC for", len(greater_than_70), " hours, what is above the maximum allowed.")
if len(greater_than_80) >= 1:
change_needed += 1
print("The temperature was above 80ºC.")
if mean_temperature > 65:
change_needed += 1
print("The average temperature is above 65ºC")
if change_needed >= 1:
print("The cooling system must be changed!")
else:
print("No change needed now.")
# -
# ## Future improvements
# 1. We want the hours (not the temperatures) whose temperature exceeds 70ºC
# 2. Condition that those hours are more than 4 consecutive and consecutive, not simply the sum of the whole set. Is this condition met?
# 3. Average of each of the lists (ºC and ºF). How they relate?
# 4. Standard deviation of each of the lists. How they relate?
#
print("The hours when the temperature was greater than 70ºC:", greater_than_70)
consecutive_hours_greater_than_70 = 0
for i in range(0,len(new_temperatures_C)):
if new_temperatures_C[i] >=70:
consecutive_hours_greater_than_70 +=1
if consecutive_hours_greater_than_70 >= 4:
print("The temperature exceeds 70ºC for 4 hours consecutive or more.")
else:
consecutive_hours_greater_than_70 = 0
# +
mean_temperature_F = 1.8 * mean_temperature + 32
mean_temperature_F_2 = sum(new_temperatures_F)/len(new_temperatures_F)
print("The mean temperature in Farenheit is ", mean_temperature_F, "ºF, which is exactly the same mean in Celsius, but converted.")
# +
# 4. Standard deviation of each of the lists. How they relate?
| temperature/temperature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Seismic acquisition fiddling
# The idea is to replicate what we've done so far but with 3 enhancements:
#
# - With a Survey object to hold the various features of a survey.
# - With more GeoPandas stuff, and less fussing with (x,y)'s directly.
# - Making bins and assigning midpoints to them.
# We'll start with the usual prelims...
# +
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point, LineString
import geopandas as gpd
import pandas as pd
from fiona.crs import from_epsg
# %matplotlib inline
# -
# ## Survey object
class Survey:
"""
A seismic survey.
"""
def __init__(self, params):
# Assign the variables from the parameter dict,
# using dict.items() for Python 3 compatibility.
for k, v in params.items():
setattr(self, k, v)
# These are just a convenience; we could use the
# tuples directly, or make objects with attrs.
self.xmi = self.corner[0]
self.ymi = self.corner[1]
self.x = self.size[0]
self.y = self.size[1]
self.SL = self.line_spacing[0]
self.RL = self.line_spacing[1]
self.si = self.point_spacing[0]
self.ri = self.point_spacing[1]
self.shiftx = -self.si/2.
self.shifty = -self.ri/2.
@property
def lines(self):
"""
Returns number of (src, rcvr) lines.
"""
slines = int(self.x/self.SL) + 1
rlines = int(self.y/self.RL) + 1
return slines, rlines
@property
def points_per_line(self):
"""
Returns number of (src, rcvr) points per line.
"""
spoints = int(self.y/self.si) + 2
rpoints = int(self.x/self.ri) + 2
return spoints, rpoints
@property
def src(self):
s = [Point(self.xmi+line*self.SL, self.ymi+s*self.si)
for line in range(self.lines[0])
for s in range(self.points_per_line[0])
]
S = gpd.GeoSeries(s)
S.crs = from_epsg(26911)
return S
@property
def rcvr(self):
r = [Point(self.xmi + r*self.ri + self.shiftx, self.ymi + line*self.RL - self.shifty)
for line in range(self.lines[1])
for r in range(self.points_per_line[1])
]
R = gpd.GeoSeries(r)
R.crs = from_epsg(self.epsg)
return R
@property
def layout(self):
"""
Provide a GeoDataFrame of all points,
labelled as columns and in hierarchical index.
"""
# Feels like there might be a better way to do this...
sgdf = gpd.GeoDataFrame({'geometry': self.src, 'station': 'src'})
rgdf = gpd.GeoDataFrame({'geometry': self.rcvr, 'station': 'rcvr'})
# Concatenate with a hierarchical index
layout = pd.concat([sgdf,rgdf], keys=['sources','receivers'])
layout.crs = from_epsg(self.epsg)
return layout
# Perhaps s and r should be objects too. I think you might want to have survey.receivers.x for the list of x locations, for example.
# ## Instantiate and plot
# +
params = {'corner': (5750000,4710000),
'size': (3000,1800),
'line_spacing': (600,600),
'point_spacing': (100,100),
'epsg': 26911 # http://spatialreference.org/ref/epsg/26911/
}
survey = Survey(params)
# -
s = survey.src
r = survey.rcvr
r[:10]
layout = survey.layout
layout[:10]
# With a hierarchical index you can do cool things, e.g. show the last five sources:
layout.ix['sources'][-5:]
layout.crs
ax = layout.plot()
# Export GeoDataFrames to GIS shapefile.
# +
# gdf.to_file('src_and_rcvr.shp')
# -
# ## Midpoint calculations
# We need midpoints. There is a midpoint between every source-receiver pair.
#
# Hopefully it's not too inelegant to get to the midpoints now that we're using this layout object thing.
midpoint_list = [LineString([r, s]).interpolate(0.5, normalized=True)
for r in layout.ix['receivers'].geometry
for s in layout.ix['sources'].geometry
]
# As well as knowing the (x,y) of the midpoints, we'd also like to record the distance from each *s* to each live *r* (each *r* in the live patch). This is easy enough to compute:
#
# Point(x1, y1).distance(Point(x2, y2))
#
# Then we can make a list of all the offsets when we count the midpoints into the bins.
offsets = [r.distance(s)
for r in layout.ix['receivers'].geometry
for s in layout.ix['sources'].geometry
]
azimuths = [(180.0/np.pi) * np.arctan((r.x - s.x)/(r.y - s.y))
for r in layout.ix['receivers'].geometry
for s in layout.ix['sources'].geometry
]
offsetx = np.array(offsets)*np.cos(np.array(azimuths)*np.pi/180.)
offsety = np.array(offsets)*np.sin(np.array(azimuths)*np.pi/180.)
# Make a Geoseries of the midpoints, offsets and azimths:
midpoints = gpd.GeoDataFrame({
'geometry' : midpoint_list,
'offset' : offsets,
'azimuth': azimuths,
'offsetx' : offsetx,
'offsety' : offsety
})
midpoints[:5]
ax = midpoints.plot()
# Save to a shapefile if desired.
# +
#midpt.to_file('CMPs.shp')
# -
# ## Spider plot
midpoints[:5].offsetx # Easy!
midpoints.ix[3].geometry.x # Less easy :(
# We need lists (or arrays) to pass into the [matplotlib quiver plot](http://matplotlib.org/examples/pylab_examples/quiver_demo.html). This takes four main parameters: *x, y, u,* and *v*, where *x, y* will be our coordinates, and *u, v* will be the offset vector for that midpoint.
#
# We can get at the GeoDataFrame's attributes easily, but I can't see how to get at the coordinates in the geometry GeoSeries (seems like a user error — it feels like it should be really easy) so I am resorting to this:
x = [m.geometry.x for i, m in midpoints.iterrows()]
y = [m.geometry.y for i, m in midpoints.iterrows()]
fig = plt.figure(figsize=(12,8))
plt.quiver(x, y, midpoints.offsetx, midpoints.offsety, units='xy', width=0.5, scale=1/0.025, pivot='mid', headlength=0)
plt.axis('equal')
plt.show()
# ## Bins
# The bins are a new geometry, related to but separate from the survey itself, and the midpoints. We will model them as a GeoDataFrame of polygons. The steps are:
#
# 1. Compute the bin centre locations with our usual list comprehension trick.
# 1. Buffer the centres with a square.
# 1. Gather the buffered polygons into a GeoDataFrame.
# +
# Factor to shift the bins relative to source and receiver points
jig = survey.si / 4.
bin_centres = gpd.GeoSeries([Point(survey.xmi + 0.5*r*survey.ri + jig, survey.ymi + 0.5*s*survey.si + jig)
for r in range(2*survey.points_per_line[1] - 3)
for s in range(2*survey.points_per_line[0] - 2)
])
# Buffers are diamond shaped so we have to scale and rotate them.
scale_factor = np.sin(np.pi/4.)/2.
bin_polys = bin_centres.buffer(scale_factor*survey.ri, 1).rotate(-45)
bins = gpd.GeoDataFrame(geometry=bin_polys)
bins[:3]
# -
ax = bins.plot()
# ## New spatial join
# Thank you to <NAME> and <NAME> for this code snippet, and many pointers.
#
# This takes about 20 seconds to run on my iMac, compared to something close to 30 minutes for the old nested loops.
reindexed = bins.reset_index().rename(columns={'index':'bins_index'})
joined = gpd.tools.sjoin(reindexed, midpoints)
bin_stats = joined.groupby('bins_index')['offset']\
.agg({'fold': len, 'min_offset': np.min})
bins = gpd.GeoDataFrame(bins.join(bin_stats))
joined[:10]
bins[:10]
ax = bins.plot(column="fold")
ax = bins.plot(column="min_offset")
| Seismic_acquisition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# ## Gattiker: Demo a method for running parallel chains.
# -
# ### This uses the setup for the ball_drop_1 notebook example, but will not focus on the setup, rather only the sampling.
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os.path
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
from GenDataBallDrop1 import gen_data, plot_data
from sepia.SepiaModel import SepiaModel
from sepia.SepiaData import SepiaData
import sepia.SepiaPlot as SepiaPlot
from sepia.SepiaPredict import SepiaEmulatorPrediction
from time import time, sleep
# +
#
# Set up ball-drop-1 example dataset
#
et = 0.01 # observation error
data_dict = gen_data(et)
# field data
R = data_dict['R'] # radii of balls .1,.2,.4 (m)
h_field = data_dict['h_field'] # observed heights 5,10,15,20 (m)
y_field = data_dict['y_field'] # observed times
# sim data
sim_design = data_dict['sim_design']
R_sim = sim_design[:,0]
C_sim = sim_design[:,1]
h_sim = data_dict['h_sim']
y_sim = data_dict['y_sim']
data = SepiaData(x_sim = np.reshape(R_sim,(len(R_sim),1)),
t_sim = np.reshape(C_sim,(len(C_sim),1)),
y_sim = y_sim, y_ind_sim = h_sim,
x_obs = np.reshape(R,(len(R),1)), y_obs = y_field, y_ind_obs=h_field)
data.transform_xt()
data.standardize_y()
# +
data.create_K_basis(2)
# Generate D matrix with normal kernels
D_grid = h_sim # locations on which the kernels are centered
D_width = 1.5 # width of each kernel
pv = len(D_grid)
D_obs = np.zeros(shape=(data.obs_data.y_ind.shape[0],pv))
D_sim = np.zeros(shape=(data.sim_data.y_ind.shape[0],pv))
h_dense = data_dict['h_dense']
D_dense = np.zeros(shape=(h_dense.shape[0],pv))
for j in range(pv):
D_obs[:,j] = norm.pdf(h_field, D_grid[j], D_width)
D_sim[:,j] = norm.pdf(h_sim, D_grid[j],D_width)
D_dense[:,j] = norm.pdf(h_dense, D_grid[j],D_width)
data.create_D_basis(D_obs=D_obs.T,D_sim=D_sim)
#
# Data setup completed
#
# +
nmcmc=1000
#
# Standard mcmc reference model setup and sampling
#
model_ref = SepiaModel(data)
model_ref.tune_step_sizes(50, 20, verbose=False)
# burn in the model. This is qualitative, and needs to be assessed on trace plots
# This model is actually OK, but do 10 samples for 'burn-in'
model_ref.do_mcmc(10, prog=False)
# and discard those samples
model_ref.clear_samples()
tref=time() # timing start
model_ref.do_mcmc(nmcmc)
sleep(0.1) # This is strictly for output formatting - tqdm seems to need time to recover.
print('\nSingle-process mcmc took %f s \n'%(time()-tref), flush=True)
#
# Multiprocessing - perform the same operations with parallel chains
#
import multiprocess as mp
print('Note that the multiprocessing library is developing quickly, and may require a recent python version')
print('This example was created in v3.8')
#
# identical model setup
#
model = SepiaModel(data) # new model instance
model.tune_step_sizes(50, 20, verbose=False) # optimize step sizes
model.do_mcmc(10, prog=False) # The same burn-in process
model.clear_samples() # reset the model's sample set, leaving the model state
tref0=time() # timing checkpoint
# user needs to decide how many parallel tasks to use
# It is not productive to use "virtual" or "hyperthreading" cores for this,
# so typically, divide the total number of cores reported by 2
ptasks=int(mp.cpu_count()/2) # number of parallel jobs
# split up the mcmc loops
total_mcmc=nmcmc # total samples desired
each_mcmc=int(total_mcmc/ptasks) # the number of samples desired from each worker
# define a worker function to do some mcmc and return the samples dictionary in a queue
def worker_mcmc(model,nmcmc,res): # Worker gets a copy of the model, the number of mcmc, and a results list
np.random.seed() # Worker must randomize the random number generator to not get identical results
model.do_mcmc(10, prog=False) # should do at least one effective sample to diverge from other worker states
model.clear_samples() # and discard the divergence-phase samples
model.do_mcmc(nmcmc) # worker does the work
res.append(model.get_samples()) # put the results in the multiprocessing shared list that was passed in
# Create a manager for shared data with the processes
manager=mp.Manager()
# and a shared list to take the results
resList=manager.list()
# It's not necessary to have the original datasets for sampling; and it could
# be a problem to have them if they're large. So, remove them from model (temporarily)
sim_data_ref=model.data.sim_data
model.data.sim_data=[]
obs_data_ref=model.data.obs_data
model.data.obs_data=[]
# Run the mcmc worker processes [could be more compact with listcomprehensions]
# First, define the processes as ptasks number of workers with appropriate arguments
procs=[]
for _ in range(ptasks):
procs.append( mp.Process(target=worker_mcmc, args=(model,each_mcmc,resList)) )
# Start the processes
tref1=time() # timing checkpoint
for p in procs:
p.start()
# Wait for process completion
for p in procs:
p.join()
tref2=time() # timing checkpoint
# Add the samples to the model object (which currently has no samples)
for r in resList:
model.add_samples(r)
model.get_last_sample_ind()
# Set the model state to the last sample inserted
model.set_model_to_sample()
model.data.sim_data=sim_data_ref
model.data.obs_data=obs_data_ref
tref3=time()
print('Multi-process worker mcmc and samples return took %f s'%(tref2-tref1))
print(' with overhead of %f s'%( (tref1-tref0) + (tref3-tref2) ) )
print(' total = %f s'%(tref3-tref0))
#
# The samples from the parallel chains are in the Sepia model object now
# Can proceed with the Sepia model object as normal
#
# Compare the trace plots for qualitative equivalence of samples
# between the "regular" model object and the model object constituted with samples
# +
samples_dict_ref = model_ref.get_samples()
samples_dict = model.get_samples()
theta_names = ['C']
mcmc_trace_ref = SepiaPlot.mcmc_trace(samples_dict_ref,theta_names)
mcmc_trace = SepiaPlot.mcmc_trace(samples_dict ,theta_names)
p_stats_ref = SepiaPlot.param_stats(samples_dict_ref,theta_names=theta_names,q1=.05,q2=.95,digits=4)
p_stats = SepiaPlot.param_stats(samples_dict ,theta_names=theta_names,q1=.05,q2=.95,digits=4)
print(p_stats_ref)
print(p_stats)
pass # finished
# -
| examples/Ball_Drop/ball_drop_1_parallelchains.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from selenium import webdriver
# +
driver = webdriver.Firefox()
driver.set_page_load_timeout(50)
driver.get('https://www.amazon.com/')
driver.find_element_by_id("twotabsearchtextbox").send_keys('')
# -
driver.fin
| Untitled2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## kNN近邻算法
#
# **欧式距离**:二维空间,两点间距离公式A(x1,y1),B(x2,y2)
#
# $$\sqrt{(x_{2}-x_{1})^{2}+(y_{2}-y_{1})^{2}}$$
import numpy as np
import matplotlib.pyplot as plt
# 原始数据 list
raw_data_X = [[3.393533211, 2.331273381],
[3.110073483, 1.781539638],
[1.343808831, 3.368360954],
[3.582294042, 4.679179110],
[2.280362439, 2.866990263],
[7.423436942, 4.696522875],
[5.745051997, 3.533989803],
[9.172168622, 2.511101045],
[7.792783481, 3.424088941],
[7.939820817, 0.791637231]
]
raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
X_train = np.array(raw_data_X)
y_train = np.array(raw_data_y)
# 每一个点在特征平面中的位置
plt.scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], color='g')
plt.scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], color='r')
# 属于哪一类(良性、恶性肿瘤)
x = np.array([8.093607318, 3.365731514])
# 单独绘制
plt.scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], color='g')
plt.scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], color='r')
plt.scatter(x[0], x[1], color='b')
# ### kNN的过程
distance = []
for x_train in X_train:
# 欧拉距离
d = np.sqrt(np.sum((x - x_train)**2))
distance.append(d)
distance
# +
# [np.sqrt(np.sum((x - x_train)**2)) for x_train in X_train]
# [np.linalg.norm(x - x_train) for x_train in X_train]
# 以后计算欧拉距离 直接拿去用!
# -
# 对结果进行排序,返回排序结果的索引
nearest = np.argsort(distance)
# 最近4个点 8,7,5,6
nearest
k = 6
topK_y = [y_train[i] for i in nearest[:k]]
topK_y
from collections import Counter
# 统计频数
Counter(topK_y)
# 投票
votes = Counter(topK_y)
votes.most_common(1)
# 最终结果
predict_y = votes.most_common(1)[0][0]
predict_y
| ch02-KNN/01-kNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Numpy Introduction
# What makes numpy popular: it provides a convenient Python interface for working with multi-dimension array data structures efficiently; the numpy array data structure is also called <b>ndarray</b>, which is short for n-dimensional array
# ### Why numpy is efficient for numerical computations?
#
# Numpy arrays use contiguous blocks of memory that can be efficiently cached by the CPU. In contrast, python lists are arrays of pointers to objects in random locations in memory, which can not be easily cached and come with a more expansive memory-look-up.
#
#
# Drawback of numpy:numpy have a fixed size and are homogenous, which means that all elements must have the same type. Homogenous ndarray objects have the advantages that numpy can carray out expansive using efficient C loops and avoid expansive type checks and other python APIs. Altering the size of numpy array is expansive since it requires creating a new array and carrying over the contents of the old array that we want to expand or shrink.
#
# ### N-dimensional Arrays
#
# we can think of a one-dimensional numpy array as a data structure to represent a vector of elements. Likewise, we can think of a two-dimensional array as a data struture to represent a matrix or a Python list of lists. Numpy can have up to 32 dimensions, if it was compiled without alternations to the source code.
# array function example
import numpy as np
lst = [[1,2,3],[4,5,6]]
aryld = np.array(lst)
print(aryld)
# ### Handy Attributes of Numpy
# itemsize attribute
ary2d = np.array([[1,2,3],[4,5,6]], dtype = 'int64')
ary2d.itemsize # returns the number of bytes, 1 byte equals to 8 bits.
ary2d.dtype
ary = np.array([[1,2,3],[4,5,6]])
ary.dtype
# size attribute
ary2d.size # returns the number of the elements
# shape attribute
print(ary2d.shape) # return the dimensions of a array
print(np.array([1,2,3]).shape)
print(np.array(5).shape)
print(np.array(5).ndim)
# ### Array Construction Routines
# the array function works with most iterables in Python, including lists, tuples, and range objects; however, array does not support generator expressions. We can use the <b>fromiter</b> function as demonstrated below to parse generators directly:
# +
# fromiter function
def generator():
for i in range(10):
if i%2 :
yield i
gen = generator()
np.fromiter(gen, dtype=int)
# -
# generator expression is equivalent to the code below
generator_expression = (i for i in range(10) if i%2)
np.fromiter(generator_expression, dtype = int)
# ones function
np.ones((3,3))
# zeros function
np.zeros((3,3))
# empty function
np.empty((3,3))
# The empty function creates the array with non-sensical values from memory. We can think of zeros as a function that creates the array via empty and then sets all its values to 0.
# eye function
np.eye(3)
# diag function
np.diag((3,3,3))
# arange function
np.arange(4,10)
np.arange(5)
np.linspace(0,1,num=5)
np.linspace(0,1) # the default num is 50
# ### Array Indexing
ary = np.array([1,2,3])
ary[0]
ary[:2]
ary = np.array([[1,2,3],[4,5,6]])
ary[0,0]
ary[-1,-1]
ary[0] # entire first row
ary[:,0] # entire first column
# ### Array Math and Universal Functions
#
# One of the features of NumPy that makes working with ndarray so efficient and convenient: <b>vectorization</b>. Vectorization is used to speed up the code without using loops. NumPy provides vectorized wrappers for performing element-wise operations implicitly via so-called ufuncs --short for universal functions.
#
#
# There are more than 60 ufuncs available in NumPy; ufuncs are implemented in compiled C code and very fast and efficient to vanilla Python.
# +
# for-loop approach
lst = [[1,2,3],[4,5,6]]
for row_idx, row_val in enumerate(lst):
for col_idx, col_val in enumerate(row_val):
lst[row_idx][col_idx] += 1
lst
# -
# list comprehensions approach
lst = [[1,2,3],[4,5,6]]
[[cell + 1 for cell in row] for row in lst]
# NumPy's ufunc approach
ary = np.array([[1,2,3],[4,5,6]])
ary = np.add(ary,1)
ary
# The ufuncs for basic arithmetic operations are add, subtract, divide, multiply, and exp. We can use mathematical operators (+, -, /, *, and **)
ary + 1
ary ** 2
# Terms:
#
#
# <b>unary funcs</b>: perform computations on a single array
#
#
# <b>binary funcs</b>: perform computations between two input arguments
# +
# ufunc's reduce operation
# role: compute the sum or product of array element along a given axis
ary = np.array([[1,2,3],[4,5,6]])
np.add.reduce(ary) # by default, reduce applies an operation along the first axis(axis=0)
# -
np.add.reduce(ary, axis = 1) # compute the row sums of the array
# sum -- equivalent to add.reduce
print(ary.sum(axis = 0))
print(ary.sum(axis = 1))
# np.sum(ary, ...) = ary.sum(...)
# product or sum both compute the product or sum of the entire array if we do not specify an axis
print(ary.sum())
print(np.sum(ary, axis = 0))
print(np.sum(ary))
# +
## other useful unary ufuncs are:
# mean
# std
# var
# np.sort
# np.arysort (returns indices that would sort an array)
# np.min
# np.max
# np.argmin
# np.argmax
# array_equal (checks if two arrays have the same shape and elements)
# -
# ### Broadcasting
#
# Broadcasting allows us to perform vectorized operations between two arrays even if their dimensions do not match by creating implicit multidimensional grids.
# +
ary1 = np.array([1,2,3])
ary2 = np.array([4,5,6])
print(ary1 + ary2)
ary3 = np.array([[4,5,6],[7,8,9]])
print(ary3 + ary1)
# -
ary3 + np.array([[1],[2]])
np.array([[1],[2]]).shape
np.array([1,2]).shape # Attention!: we cannot broadcast it to ary3
np.array([1,2,3]).shape
# ### Advanced Indexing - Memory Views and Copies
# Basic integer-based indexing and slicing create views of NumPy arrays in memory. Working with views can be highly desirable since it avoids making unnecessary copies of arrays to save memory space.
ary = np.array([[1,2,3],[4,5,6]])
first_row = ary[0]
first_row += 99
ary
# We can see that changing the value of first_row also affected the original array. The reason for this is that ary[0] created a view of the first row, not a copy.
#
#
# The same concept applies to slicing operations. <b>Slicing creates views</b>. Sometimes it is desirable since it can speed up our code and save computational resources by avoiding to create unnecessary copies in memory.
# # copy method to get a copy of an array
ary = np.array([[1,2,3],[4,5,6]])
second_row = ary[1].copy()
second_row += 99
ary
# We can use NumPy's <b>may_share_memory</b> function to check if two arrays might share memory.
print(np.may_share_memory(first_row, ary))
print(np.may_share_memory(second_row, ary))
# In addition to basic single-integer indexing and slicing operations, NumPy supports advanced indexing routines called <b>fancy indexing</b>. Via fancy indexing, we can use tuple or list objects of non-contiguous integer indices to return desired array elements. Since fancy indexing can be performed with non-contiguous sequences, it cannot return a view - a contiguous slice from memory. Thus, fancy indexing returns a copy of an array.
ary = np.array([[1,2,3],[4,5,6]])
ary[:,[0,2]]
# We can also use Boolean masks for indexing.
mask = ary > 3
print(mask)
print(ary[mask])
print((ary > 3) & (ary % 2 == 0))
# Using Boolean indexing also returns a copy of the array and not a view.
# ### Comparison Operators and Mask
ary = np.array([1,2,3,4])
mask = ary > 2 # ary>2 create a boolean mask of that array which consists of True and False elements
ary[mask]
# Boolean masks have some handy and useful methods
# sum method
mask = ary > 2
mask.sum() # count how many elements in an array meet a certain condition
# nonzero method
mask.nonzero() # get the index positions of array elements that meet a certain condition
# An alternative approach to the index selection by a condition is using the <b>np.where</b> method:
np.where(ary>2)
# np.where(condition, x, y) can be interpreted as: If condition is True, yield x, otherwise yield y.
np.where(ary > 2, 1, 0)
# This can also be achieved by using Boolean masks "manually"
ary[mask] = 1
ary[~mask] = 0
ary
# We can also use logical operators to create more complex Boolean masks.
ary = np.array([1,2,3,4])
print((ary > 3) | (ary < 2))
print(~((ary > 3) | (ary < 2)))
# ### Random Number Generators
#
# NumPy has a random subpackage to create random numbers and samples.
# np.random.seed to generate pseudo-random number
np.random.seed(123)
np.random.rand(3)
np.random.rand(3)
np.random.seed(123)
np.random.rand(3)
# np.random.RandomState
np.random.RandomState(seed = 123).rand(3)
np.random.RandomState(seed = 123).rand(3)
rng = np.random.RandomState(seed = 123)
rng.rand(3)
print(rng.rand(3))
print(rng.rand(3))
rng = np.random.RandomState(seed = 123)
rng.rand(3)
rng2 = np.random.RandomState(seed = 123)
z_scores = rng2.randn(100,2)
import matplotlib.pyplot as plt
plt.scatter(z_scores[:,0], z_scores[:,1])
plt.show()
rng3 = np.random.RandomState(seed = 123)
scores = 2 * rng3.randn(100, 2) + 5
plt.scatter(scores[:,0], scores[:,1])
plt.show()
# ### Reshaping Arrays
#
# The size of arrays is fixed but the shape is not. We can use <b>reshape</b> method to obtain a view of an array with a different shape.
aryld = np.array([1,2,3,4,5,6])
ary2d_view = aryld.reshape(2,3)
ary2d_view
np.may_share_memory(ary2d_view, aryld)
# When using reshape method, we do not need to specify the number of elements in each axis.
aryld.reshape(2,-1)
# use reshape to flatten an array
aryld.reshape(-1)
# use ravel() to flatten an array
aryld.ravel()
# A function related to ravel is flatten. In contrast to ravel, flatten returns a copy.
np.may_share_memory(aryld.flatten(), aryld)
np.may_share_memory(aryld.ravel(), aryld)
# When merging different arrays, we have to create a new array, since NumPy arrays have a fixed size.
# concatenata function to combine two or more arrays
ary = np.array([1,2,3])
np.concatenate((ary, ary))
ary = np.array([[1,2,3]])
np.concatenate((ary, ary), axis = 0)
np.concatenate((ary, ary), axis = 1)
# ### Linear Algebra with NumPy Arrays
# <i>Attention</i>:
#
# NumPy matrix objects are analogous to numpy arrays but are restricted to two dimensions. Also, matrices define certain operations differently than arrays, ie... the * operator performs matrix multiplication instead of element-wise multiplication. However, NumPy matrix is less popular than array data structure. So we will only focus on array data structures.
row_vector = np.array([1,2,3])
# Below lists three ways to reshape a one-dimensional array into a two-dimensional one
print(row_vector.reshape(-1,1))
print(row_vector[:,np.newaxis]) # adding a new axis
print(row_vector[:, None]) # adding a new axis behaves like None
row_vector
# In NumPy, we can perform matrix multiplication via the <b>matmul</b> function (@)
matrix = np.array([[1,2,3],[4,5,6]])
column_vector = np.array([[1],[2],[3]])
np.matmul(matrix, column_vector)
# If we work with matrices and vectors, NumPy can be quite forgiving if the dimensionas of matrices and one-dimensional array do not match exactly -- thanks to broadcasting. But it returns a one-dimensional array instead a two-dimensional one
np.matmul(matrix, row_vector)
# Similarly, we can compute the dot-product between two vectors.
np.matmul(row_vector, row_vector)
row_vector @ row_vector
# NumPy has a special <b>dot</b> function that behaves similar to matmul on pairs of one- or two-dimensional arrays. (When dimensions > 3, dot differs from matmul)
print(np.dot(row_vector, row_vector))
print(np.dot(matrix, row_vector))
print(np.dot(matrix, column_vector))
# transpose function (T)
matrix = np.array([[1,2,3],[4,5,6]])
print(matrix.transpose())
print(matrix.T)
# ### Set Operations
# A set is essentially a collection of unique elements. Given an array, we can generate such a "set" using the <b>np.unique</b> function.
ary = np.unique([1,1,2,3,1,5])
ary_set = np.unique(ary)
ary_set
# Remember: the output of np.unique is a regular NumPy array, not specialized data structure that does not allow for duplicate entries. The set operations for example, set union (np.union1d), set difference (np.setdiff1d), or set intersection (np.intersect1d) would return the same results whether array elements are unique or not. However, setting their optional <i>assum_unique</i> argument can speed up the computation.
ary1 = np.array([1,2,3])
ary2 = np.array([3,4,5,6])
np.intersect1d(ary1, ary2, assume_unique = True)
np.setdiff1d(ary1, ary2, assume_unique = True)
np.union1d(ary1, ary2)
# Note that NumPy does not have a function for the symmetric set difference, but it can be computed by composition.
np.union1d(np.setdiff1d(ary1, ary2, assume_unique = True), np.setdiff1d(ary2, ary1, assume_unique = True))
# ### Serializing NumPy Arrays
# In computer science, serializtion refers to storing data or objects in a different format that can be used for reconstruction later. For example, in Python, we can use the pickle library to write Python objects as bytecode to a local drive. Numpy offers a data storage format (NPy) that is especially well-suited (compared to regular pickle files) for storing array data.
# np.array function -- save a single array to a so-called .npy file:
ary1 = np.array([1, 2, 3])
np.save('ary-data.npy', ary1)
np.load('ary-data.npy')
# The np.savez is slightly more powerful than the np.save function as it generates an archive consisting of 1 or more .py files and thus allow us to save multiple arrays at once.
ary2 = np.array([4, 5, 6])
np.savez('ary-data.npz', ary1, ary2)
d = np.load('ary-data.npz')
d['arr_0']
| NumPy_Intro.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
# ## Generalized phase sensitivity function of modified Stuart-Landau (MSL) model
#
# $$
# \begin{aligned}
# \dot{x} &= e^{2I}(x-y-I)-\left((x-I)^2+y^2\right)(x-I)\\
# \dot{y} &= e^{2I}(x+y-I)-\left((x-I)^2+y^2\right)y
# \end{aligned}
# $$
# ### Setup
using GeneralizedPhaseReduction
using DifferentialEquations, ForwardDiff, PyPlot
# +
dxdt(X, I) = exp(2I)*(X[1]-X[2]-I)-((X[1]-I)^2+X[2]^2)*(X[1]-I)
dydt(X, I) = exp(2I)*(X[1]+X[2]-I)-((X[1]-I)^2+X[2]^2)*X[2]
F(X, I) = [dxdt(X, I), dydt(X, I)]
# +
Imin = 0.0; Imax = 1.0; dI = 0.2
D = 2
Nθ = 1000
Irange = Imin:dI:Imax
dt = 1e-4
alg = Tsit5()
# analytical phase zero points
origin_val_idx = 2
origin_thr = 0.0
itp = "linear" # interpolation mode
# -
# Normally you need not specify a point of phase zero, but define it here for later comparison with the analytical solution.
# ### `generalized_phase_sensitivity_func` function
# +
ωI, ζθI, ξθI, _ = generalized_phase_sensitivity_func(F, Imin, Imax, dI, D, Nθ, nothing, dt, alg, origin_val_idx, origin_thr, itp)
println("ωI:", size(ωI))
println("ζθI:", size(ζθI))
println("ξθI:", size(ξθI))
# -
# These results are interpolated with `Interpolations.jl`.
println("ω(I=0.4):", ωI(0.4), "=", ωI[3]) # I = 0.4
println("ω(I=0.6):", ωI(0.6), "=", ωI[4]) # I = 0.4
println("ω(I=0.5):", ωI(0.5)) # I = 0.5
# ### Comparing numerical and analytical results of MSL model
figure(figsize=(5, 3))
title("Comparing numerical and analytical ω(I) of MSL model")
plot(Irange, ωI, "o-", label="numerical")
plot(Irange, exp.(2*Irange), "k--", label="analytical")
grid(); xlabel("I"); ylabel(L"$\omega(I)$"); legend(); tight_layout()
# +
θ = range(0, 2π, length=Nθ)
figure(figsize=(8, 3))
subplot(1,2,1)
suptitle("Comparing numerical and analytical ζ(θ, I) and ξ(θ, I) of MSL model \n (solid: numerical, dashed: analytical)")
for i = 1:size(Irange)[1]
I = Irange[i]
ζ_ = 2exp(2I) .- exp(I) * cos.(θ)
plot(θ, ζθI(θ, Irange[i]), label="I="*string(I))
plot(θ, ζ_, "k--")
end
legend(); xlim(0, 2pi); xticks([0, 0.5pi, pi, 1.5pi, 2pi], ["0", "π/2", "π", "3/2π", "2π"]);
grid(); xlabel(L"$\theta$"); ylabel(L"\zeta(\theta, I)")
subplot(1,2,2)
for i = 1:size(Irange)[1]
I = Irange[i]
ξ_ = exp(-I) * sin.(θ)
plot(θ, ξθI(θ, Irange[i]), label="I="*string(I))
plot(θ, ξ_, "k--")
end
xlim(0, 2pi); xticks([0, 0.5pi, pi, 1.5pi, 2pi], ["0", "π/2", "π", "3/2π", "2π"]);
grid(); xlabel(L"$\theta$"); ylabel(L"\xi(\theta, I)");
tight_layout(rect=[0,0,1,0.9])
# -
# ### Comparing appriximated numerical and analytical results of MSL model
# +
Irange_ = Imin:(dI/4):Imax
figure(figsize=(9, 3))
suptitle("Comparing appriximated numerical and analytical ω(I), ζ(θ, I) and ξ(θ, I) of MSL model")
subplot(1,3,1)
plot(Irange_, ωI(Irange_), "o-", label="numerical approx.")
plot(Irange_, exp.(2*Irange_), "k--", label="analytical")
grid(); xlabel("I"); ylabel(L"$\omega(I)$"); legend()
subplot(1,3,2)
plot(Irange_, ζθI(π, Irange_), "o-", label="numerical approx.")
plot(Irange_, 2exp.(2Irange_) .- exp.(Irange_) * cos(π), "k--", label="analytical")
grid(); xlabel("I"); ylabel(L"$\zeta(\pi, I)$")
subplot(1,3,3)
plot(Irange_, ξθI(3π/2, Irange_), "o-", label="numerical approx.")
plot(Irange_, exp.(-Irange_) * sin(3π/2), "k--", label="analytical")
grid(); xlabel("I"); ylabel(L"$\xi(3/2\pi, I)$");
tight_layout(rect=[0,0,1,0.95])
# -
# ### Appendix : analytical solutions of MSL model
#
# #### Model definement
#
# $$
# \begin{aligned}
# \dot{x} &= e^{2I}(x-y-I)-\left((x-I)^2+y^2\right)(x-I)\\
# \dot{y} &= e^{2I}(x+y-I)-\left((x-I)^2+y^2\right)y
# \end{aligned}
# $$
#
# #### Vector fields
#
# $$
# F(X, I):=\left[e^{2I}(x-y-I)-\left((x-I)^2+y^2\right)(x-I), e^{2I}(x+y-I)-\left((x-I)^2+y^2\right)y\right]^\top
# $$
#
# #### Stable solution
# $$
# \begin{aligned}
# x_0 &= e^{I}\cos \theta+I\\
# y_0 &= e^{I}\sin \theta
# \end{aligned}
# $$
#
# and let $X_0(\theta, I):=[x_0, y_0]$.
#
# #### Phase
# $$
# \Theta(X, I)=\tan^{-1}\left(\frac{y}{x-I}\right)
# $$
#
# In general, $\Theta$ cannot be obtained in numerical models or real experimental systems.
#
# #### Angular frequency
#
# $$
# \begin{aligned}
# \omega(I)&=\frac{\partial \Theta(X, I)}{\partial X}\cdot F(X, I)\\
# &=\left[\frac{\partial \Theta(X, I)}{\partial x}, \frac{\partial \Theta(X, I)}{\partial y}\right]^\top\cdot F(X, I)\\
# &=\left[-\frac{y}{(x-I)^2+y^2}, \frac{x-I}{(x-I)^2+y^2}\right]^\top \cdot \left[e^{2I}(x-y-I)-\left((x-I)^2+y^2\right)(x-I), e^{2I}(x+y-I)-\left((x-I)^2+y^2\right)y\right]^\top\\
# &=e^{2I}
# \end{aligned}
# $$
# where $\cdot$ means dot product.
#
# #### Phase sensitivity function
#
# $$
# \begin{aligned}
# Z(\theta, I)&=\frac{\partial \Theta(X, I)}{\partial X}\biggm|_{\left(X_0(\theta, I), I\right)}=\left[-\frac{y}{(x-I)^2+y^2}, \frac{x-I}{(x-I)^2+y^2}\right]^\top\biggm|_{\left(X_0(\theta, I), I\right)}=\left[-e^{-I}\sin \theta, e^{-I}\cos\theta \right]^\top\\
# \xi(\theta, I)&=\frac{\partial \Theta(X, I)}{\partial I}\biggm|_{\left(X_0(\theta, I), I\right)}=\frac{y}{(x-I)^2+y^2}\biggm|_{\left(X_0(\theta, I), I\right)}=e^{-I}\sin\theta\\
# G\left(X(\theta, I), I\right)&=\left[\frac{\partial F_i(X, I)}{\partial I_j}\right]_{ij}=\left[e^{2I}(2x-2I-2y-1)+3(x-I)^2+y^2, e^{2I}(2x-2I+2y-1)+2y(x-I)\right]^\top\\
# G\left(X_0(\theta, I), I\right)&=\left[2e^{3I}(\cos\theta-\sin\theta)+2e^{2I}\cos^2\theta, 2e^{3I}(\cos\theta+\sin\theta)+e^{2I}(2\cos\theta\sin\theta-1)\right]^\top\\
# \zeta(\theta, I)&=G\left(X_0(\theta, I), I\right)^\top Z(\theta, I)=2e^{2I}-e^{I}\cos\theta
# \end{aligned}
# $$
#
| examples/tutorials/MSLmodel_generalized_phase_sensitivity_func.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
#Complex()
#Complex() accepts either a string or a pair of
#numbers as an imput and returns a complex number.
#Full definition:
#complex() returns a complex number with the
#value real + imag*1j or converts a string or number
#to a complex number.
#If the first parameter is a string, it will be
#interpreted as a complex number and the function
#must be called without a second parameter. The second
#parameter can never be a string. Each argument may
#be any numeric type (including complex). If imag is
#omitted, it defaults to zero and the constructor
#serves as a numeric conversion like int and float.
#If both arguments are omitted, returns 0j.
#If you are doing math or engineering that requires
#complex numbers (such as dynamics,control systems,
#or impedence of a circuit) this is a useful tool to
#have in Python.
# +
#Easier with examples...put real part first then imaginary (3
#here):
complex(2,3)
# -
#Complex() can read strings as well. If passing a string,
#dont pass a second parameter since you are already
#passing the imaginary part inside the string.
complex('10+2j')
# +
#That's all.
# -
| Section7.7 complex().ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as py
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("IPL Matches 2008-2020.csv")
data.head(-5)
data.info
# ### Most Wins in IPL
# +
temp = pd.DataFrame({"Winner": data['winner']})
count_wins = temp.value_counts()
print(count_wins)
labels = [x[0] for x in count_wins.keys()]
labels
bar , ax = plt.subplots(figsize=(20,12))
ax = plt.pie(x = count_wins, autopct = "%.1f%%" ,labels= labels)
plt.title("Most Win in IPL", fontsize = 17)
plt.show()
# -
# ### Most wins in Eliminator
sns.countplot(data[ 'winner' ][data[ 'eliminator' ]=='Y'], data = data)
plt.title("Most wins in Eliminator", fontsize = 17)
plt.xticks(rotation = 90)
plt.show()
# ### Toss Decision
teams = data['toss_winner'].unique()
decision_making = pd.DataFrame([], columns= ['Toss Winner', 'Decision', 'Times'])
for id, element in enumerate(teams):
temp_bat = data[(data['toss_winner']== element) & (data['toss_decision'] == 'bat')]
temp_fields = data[(data['toss_winner']== element) & (data['toss_decision'] == 'fields ')]
decision_making = decision_making.append({'Toss Winner': element, 'Decision' : 'bat', 'Times' : temp_bat['toss_winner'].count()}, ignore_index= True)
decision_making = decision_making.append({'Toss Winner': element, 'Decision' : 'fields', 'Times' : temp_bat['toss_winner'].count()}, ignore_index= True)
decision_making
# +
sns.catplot(x= "Toss Winner" , y = 'Times' , hue= "Decision" , data = decision_making, kind= 'bar', height= 5, aspect= 2 )
plt.xticks(rotation = 90)
plt.title("Toss Decision of Teams")
plt.xlabel("IPL Teams ")
plt.ylabel("Toss Decision ")
plt.show()
# -
# ### Famous Venue
# +
sns.barplot(x = data['venue'].value_counts().head(8).values,
y = data['venue'].value_counts().head(8).index,
data = data
)
plt.title(" Famous Venue ")
plt.xlabel("Venue Count")
plt.ylabel("Venue")
# -
# ### Top 5 Umpire_1
sns.barplot(x = data['umpire1'].value_counts().head().index,
y = data['umpire1'].value_counts().head().values,
data = data
)
plt.xlabel("umpire 1")
plt.ylabel("Match Count")
plt.title(" Top 5 Umpire_1")
plt.show()
sns.barplot(x = data['umpire2'].value_counts().head().values,
y = data['umpire2'].value_counts().head().index,
data = data
)
plt.xticks(rotation = 90)
plt.xlabel("umpire 1")
plt.ylabel("Match Count")
plt.title("Top 5 Umpire_2")
plt.show()
| Ipl Matches.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.11 64-bit (''test'': conda)'
# language: python
# name: python3
# ---
# # Automatic Transcription by Wav2Vec2, HuBERT, & Speech2Text
import gradio as gr
import librosa
import torch
from gradio.mix import Parallel
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC, HubertForCTC, Speech2TextProcessor, Speech2TextForConditionalGeneration
# The `Wav2Vec2Processor` can be used for tokenization as well as feature extraction depending on the `__call__`. More info [here](https://huggingface.co/transformers/model_doc/wav2vec2.html#transformers.Wav2Vec2Processor.__call__)
# load the models and their processor
processor_wav = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-robust-ft-libri-960h")
model_wav = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-robust-ft-libri-960h")
processor_hub = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft")
model_hub = HubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft")
model_s2t = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-large-librispeech-asr")
processor_s2t = Speech2TextProcessor.from_pretrained("facebook/s2t-large-librispeech-asr")
# define speech-to-text function for wav2vec2
def wav_asr_transcript(audio_file):
transcript = ""
# Stream over 20 seconds chunks
stream = librosa.stream(
audio_file, block_length=20, frame_length=16000, hop_length=16000
)
for speech in stream:
if len(speech.shape) > 1:
speech = speech[:, 0] + speech[:, 1]
input_values = processor_wav(speech, sampling_rate=16_000, return_tensors="pt").input_values
with torch.no_grad():
logits = model_wav(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor_wav.batch_decode(predicted_ids)[0]
transcript += transcription.lower() + ". "
return transcript
# define speech-to-text function for Hubert
def hub_asr_transcript(audio_file):
transcript = ""
# Stream over 20 seconds chunks
stream = librosa.stream(
audio_file, block_length=20, frame_length=16000, hop_length=16000
)
for speech in stream:
if len(speech.shape) > 1:
speech = speech[:, 0] + speech[:, 1]
input_values = processor_hub(speech, sampling_rate=16_000, return_tensors="pt").input_values
with torch.no_grad():
logits = model_hub(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor_hub.batch_decode(predicted_ids)[0]
transcript += transcription.lower() + ". "
return transcript
# define speech-to-text function for Speech2Text
def s2t_asr_transcript(audio_file):
transcript = ""
# Stream over 20 seconds chunks
stream = librosa.stream(
audio_file, block_length=20, frame_length=16000, hop_length=16000
)
for speech in stream:
if len(speech.shape) > 1:
speech = speech[:, 0] + speech[:, 1]
inputs = processor_s2t(speech, sampling_rate=16_000, return_tensors="pt")
generated_ids = model_s2t.generate(input_ids=inputs["input_features"], attention_mask=inputs["attention_mask"])
transcription = processor_s2t.batch_decode(generated_ids)[0]
transcript += transcription + ". "
return transcript
wav_summ = gr.Interface(
fn=wav_asr_transcript,
inputs=gr.inputs.Textbox(),
outputs=gr.outputs.Textbox(label="Transcribed by facebook/wav2vec2-large-robust-ft-libri-960h"),
)
hub_summ = gr.Interface(
fn=hub_asr_transcript,
inputs=gr.inputs.Textbox(),
outputs=gr.outputs.Textbox(label="Transcribed by facebook/hubert-large-ls960-ft"),
)
s2t_summ = gr.Interface(
fn=s2t_asr_transcript,
inputs=gr.inputs.Textbox(),
outputs=gr.outputs.Textbox(label="Transcribed by facebook/s2t-large-librispeech-asr"),
)
Parallel(
wav_summ,
hub_summ,
s2t_summ,
title="Automatic Transcription by Wav2Vec2, HuBERT & Speech2Text",
inputs=gr.inputs.Audio(label="Upload Audio File", type="filepath"),
).launch()
| notebooks/gradio_app_w_3_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn import metrics
# %matplotlib inline
plt.style.use('fivethirtyeight')
# # Load data
# +
y_score_30_linreg = pd.read_csv("plot/y_score_linear.csv",header = None)
y_score_30_logreg = pd.read_csv("plot/y_score_lg.csv",header = None)
y_score_30_GNB = pd.read_csv("plot/y_score_gnb.csv",header = None)
y_score_30_svm = pd.read_csv("plot/y_score_svm.csv",header = None)
y_score_30_kNN = pd.read_csv("plot/y_score_knn.csv",header = None)
# -
y_test_30 = pd.read_csv("plot/y_test.csv",header = None)
y_score_30_kNN.shape
# # Plot ROC of different models predicting 1 year mortality.
def plot_ROC(fpr, tpr, auc,name='', title = 'ROC curve'):
# sns.set_style('whitegrid')
# plt.figure(figsize=(8,6))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr, label='{}: (AUC = {:.3f})'.format(name,auc))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title(title)
plt.legend(loc='best')
plt.rcParams.update({'font.size': 18})
# plt.show()
pass
# +
# plt.rcParams.update({'font.size': 18})
# plt.figure(figsize=(16,12))
# fpr, tpr, thresholds = metrics.roc_curve(y_test_1y, y_score_1y_vgg)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'VGG')
# fpr, tpr, thresholds = metrics.roc_curve(y_test_1y, y_score_1y_simple)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'Simple CNN')
# fpr, tpr, thresholds = metrics.roc_curve(y_test_1y, y_score_1y_linreg)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'Linear Reg')
# fpr, tpr, thresholds = metrics.roc_curve(y_test_1y, y_score_1y_logreg)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'Logistic Reg')
# fpr, tpr, thresholds = metrics.roc_curve(y_test_1y, y_score_1y_kNN)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'kNN')
# fpr, tpr, thresholds = metrics.roc_curve(y_test_1y, y_score_1y_GNB)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'GNB')
# fpr, tpr, thresholds = metrics.roc_curve(y_test_1y, y_score_1y_xgtree)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'xgtree')
# fpr, tpr, thresholds = metrics.roc_curve(y_test_1y, y_score_1y_rf)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'Random Forest',title = 'ROC of different model predicting 1 year mortality')
# -
# # Plot ROC of different models predicting 30 day mortality.
# +
plt.figure(figsize=(16,12))
fpr, tpr, thresholds = metrics.roc_curve(y_test_30, y_score_30_linreg)
auc = metrics.auc(fpr, tpr)
plot_ROC(fpr, tpr, auc,name = 'Linear ')
fpr, tpr, thresholds = metrics.roc_curve(y_test_30, y_score_30_logreg)
auc = metrics.auc(fpr, tpr)
plot_ROC(fpr, tpr, auc,name = 'Logistic ')
fpr, tpr, thresholds = metrics.roc_curve(y_test_30, y_score_30_kNN)
auc = metrics.auc(fpr, tpr)
plot_ROC(fpr, tpr, auc,name = 'kNN ')
fpr, tpr, thresholds = metrics.roc_curve(y_test_30, y_score_30_GNB)
auc = metrics.auc(fpr, tpr)
plot_ROC(fpr, tpr, auc,name = 'NB ')
fpr, tpr, thresholds = metrics.roc_curve(y_test_30, y_score_30_svm)
auc = metrics.auc(fpr, tpr)
plot_ROC(fpr, tpr, auc,name = 'SVM ', title = 'ROC for all classifiers')
# fpr, tpr, thresholds = metrics.roc_curve(y_test_30, y_score_30_rf)
# auc = metrics.auc(fpr, tpr)
# plot_ROC(fpr, tpr, auc,name = 'Random Forest',title = 'ROC of different model predicting 30 day mortality')
# -
# # Plot Precision-Recall Curve of different models predicting 1 year mortality.
# +
def plot_PRC(precision, recall, ap, name = '',title= 'Precision-Recall Curve'):
# sns.set_style('whitegrid')
# plt.figure()
plt.plot(recall, precision, lw=2, label='{}: (AP = {:.4f})'.format(name,ap))
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title(title)
plt.legend(loc='lower left')
plt.rcParams.update({'font.size': 18})
return
# -
# # Plot Precision-Recall Curve of different models predicting 30 day mortality.
# +
plt.figure(figsize=(16,12))
precision, recall, _ = metrics.precision_recall_curve(y_test_30, y_score_30_logreg)
average_precision = metrics.average_precision_score(y_test_30, y_score_30_logreg)
plot_PRC(precision, recall, average_precision,name = 'Logistic ')
precision, recall, _ = metrics.precision_recall_curve(y_test_30, y_score_30_linreg)
average_precision = metrics.average_precision_score(y_test_30, y_score_30_linreg)
plot_PRC(precision, recall, average_precision,name = 'Linear ')
precision, recall, _ = metrics.precision_recall_curve(y_test_30, y_score_30_kNN)
average_precision = metrics.average_precision_score(y_test_30, y_score_30_kNN)
plot_PRC(precision, recall, average_precision,name = 'kNN ')
precision, recall, _ = metrics.precision_recall_curve(y_test_30, y_score_30_GNB)
average_precision = metrics.average_precision_score(y_test_30, y_score_30_GNB)
plot_PRC(precision, recall, average_precision,name = 'NB ')
precision, recall, _ = metrics.precision_recall_curve(y_test_30, y_score_30_svm)
average_precision = metrics.average_precision_score(y_test_30, y_score_30_svm)
plot_PRC(precision, recall, average_precision,name = 'SVM '
, title = 'Precision-Recall Curve for all classifers')
# -
# # Confusion matrix
# +
def probability_to_label(probabilities, threshold=0.5):
probabilities = list(probabilities)
th = threshold
predictions = [1 if i > th else 0 for i in probabilities]
return predictions
def plot_cm(y_test, y_score, title='Confusion Matrix', cmap=plt.cm.Blues):
y_pred = probability_to_label(y_score, threshold=0.5)
sns.set_style('white')
cm = metrics.confusion_matrix(y_test, y_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # normarlize
from sklearn.utils.multiclass import unique_labels
classes = unique_labels(y_test, y_pred)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
plt.rcParams.update({'font.size': 10})
# Loop over data dimensions and create text annotations.
fmt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
# +
y_pred_30_vgg = probability_to_label(y_score_30_vgg.values, threshold=0.5)
plot_cm(y_test_30, y_pred_30_vgg,title = 'Confusion Matrix of VGG for 30 days')
y_pred_30_rf = probability_to_label(y_score_30_rf.values, threshold=0.5)
plot_cm(y_test_30, y_pred_30_rf,title = 'Confusion Matrix of Random Forest for 30 days')
y_pred_30_simple = probability_to_label(y_score_30_simple.values, threshold=0.5)
plot_cm(y_test_30, y_pred_30_simple,title = 'Confusion Matrix of CNN for 30 days')
y_pred_30_linreg = probability_to_label(y_score_30_linreg.values, threshold=0.5)
plot_cm(y_test_30, y_pred_30_linreg,title = 'Confusion Matrix of Linear Regression for 30 days')
y_pred_30_logreg = probability_to_label(y_score_30_logreg.values, threshold=0.5)
plot_cm(y_test_30, y_pred_30_logreg,title = 'Confusion Matrix of Logistic Regression for 30 days')
y_pred_30_kNN = probability_to_label(y_score_30_kNN.values, threshold=0.5)
plot_cm(y_test_30, y_pred_30_kNN,title = 'Confusion Matrix of k-NN for 30 days')
y_pred_30_GNB = probability_to_label(y_score_30_GNB.values, threshold=0.5)
plot_cm(y_test_30, y_pred_30_GNB,title = 'Confusion Matrix of GNB for 30 days')
y_pred_30_xgtree = probability_to_label(y_score_30_xgtree.values, threshold=0.5)
plot_cm(y_test_30, y_pred_30_xgtree,title = 'Confusion Matrix of xgtree for 30 days')
# -
| Plot ROC and PRC CM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: work
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mission to Mars:
# ## Jet Propulsion Laboratory
# ### California Institute of Technology
#Import Splinter, BeautifulSoup, and Pandas
from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import time
from webdriver_manager.chrome import ChromeDriverManager
#Set the executable path and initialize the chrome browser in splinter
path = {"executable_path": ChromeDriverManager().install()}
browser = Browser("chrome",**path,headless=False)
# ## Visit the NASA mars news site
# +
#Visit the mars nasa news site
url_news = "https://mars.nasa.gov/news/"
browser.visit(url_news)
#Add delay for loading the page
browser.is_element_present_by_css("ul.item_list li.slide", wait_time=2)
# +
#Converting the browser html to a soup object
html_news = browser.html
soup_news = bs(html_news, 'html.parser')
article_one = soup_news.select_one('ul.item_list li.slide')
# -
#Find NASA News Titles with content title
nasa_news = article_one.find('div', class_='content_title')
nasa_news
#Find NASA News content title text
nasa_news_title = article_one.find('div', class_='content_title').get_text()
nasa_news_title
#Find NASA News Stories with article teaser body
nasa_story = soup_news.find('div', class_='article_teaser_body')
nasa_story
#Find NASA News Story paragraph text
nasa_story_article = soup_news.find('div', class_='article_teaser_body').get_text()
nasa_story_article
# ## JPL Space Images Featured Image
#Visit JPL space images Mars URL
url_img = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html'
browser.visit(url_img)
# +
#Find and click the full image button, doesn't work
#full_image_elem = browser.find_by_tag('button')[1]
#full_image_elem
# +
#Find and click the full image button, doesn't work
# full_img = browser.find_by_id('full_image')
# full_img.is_empty()
# +
#Find and click the full image button, doesn't work
# full_image = browser.find_by_xpath('/html/body/div[1]/div/a')
# full_image.is_empty()
# -
#Find and click the full image button
find_image = browser.find_by_css('a[class="showimg fancybox-thumbs"]')
find_image.is_empty()
#Find the image url
html_img= find_image[0]['href']
html_img
# +
#Find and click more info, more info button no longer exists
# +
#Parse the resulting html with soup
# html_img = browser.html
# soup_img = bs(html_img, 'html.parser')
# +
#Find the relative image url
# img_url = soup_img.select_one('figure.lede a img').get("src")
# img_url
# -
# Use the base url to create an absolute url
featured_image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/image/featured/mars3.jpg'
# ## Mars Facts
#Visit the mars facts site
url_facts = "https://space-facts.com/mars/"
browser.visit(url_facts)
#Dataframe of space-facts.com mars page
df = pd.read_html('https://space-facts.com/mars/')[0]
df
#Format dataframe and export to HTML
df.columns=['Description','Value']
# df.set_index('Description',inplace=True)
df
#Make dataframe an html
mars_facts = df.to_html(formatters={'Description': lambda x: '<b>' + x + '</b>'}, escape=False, index=False)
mars_facts
# ## Hemispheres
#Visit the USGS astrogeology page for hemisphere data from Mars
url_hem = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url_hem)
# +
#Create for loop to grab hemisphere titles and url links
hem_urls = []
#Find list of hemisphers
links = browser.find_by_css("a.product-item h3")
#Start loop, click link, find anchor and return href and title
for title in range(len(links)):
hemisphere ={}
#Each individual hemisphere on click
browser.find_by_css("a.product-item h3")[title].click()
#Each hemisphere on click name
hem_link = browser.find_link_by_text('Sample').first
#Find hemisphere title
hemisphere['title'] = browser.find_by_css("h2.title").text
#Find hemisphere url
hemisphere['img_url'] = hem_link['href']
#Create dictionary of objects to list
hem_urls.append(hemisphere)
#Navigate backwards
browser.back()
# -
#Print/view hemisphere titles and urls
hem_urls
#Quit Browser
browser.quit()
| mission_to_mars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
### Required Libraries ###
from datetime import datetime
from dateutil.relativedelta import relativedelta
### Functionality Helper Functions ###
def parse_int(n):
"""
Securely converts a non-integer value to integer.
"""
try:
return int(n)
except ValueError:
return float("nan")
def build_validation_result(is_valid, violated_slot, message_content):
"""
Define a result message structured as Lex response.
"""
if message_content is None:
return {"isValid": is_valid, "violatedSlot": violated_slot}
return {
"isValid": is_valid,
"violatedSlot": violated_slot,
"message": {"contentType": "PlainText", "content": message_content},
}
def validate_data(age, investment_amount, intent_request):
"""
Validates the data provided by the user.
"""
# Validate that the user is less than 65 years old
if age is not None:
if age >= 65 or age <= 0:
return build_validation_result(
False,
"age",
"You should be less than 65 years old and greater than 0 years old to use this investing service, "
"please provide another age.",
)
# Validate the investment amount,
# it should be equal or greater than 5000
if investment_amount is not None:
investment_amount = parse_int(
investment_amount
) # Since parameters are strings it's important to cast values
if investment_amount < 5000:
return build_validation_result(
False,
"investmentAmount",
"The amount to invest should be more than 5000, "
"kindly provide a proper amount in USD to invest.",
)
# A True results is returned if age or amount are valid
return build_validation_result(True, None, None)
### Dialog Actions Helper Functions ###
def get_slots(intent_request):
"""
Fetch all the slots and their values from the current intent.
"""
return intent_request["currentIntent"]["slots"]
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
"""
Defines an elicit slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "ElicitSlot",
"intentName": intent_name,
"slots": slots,
"slotToElicit": slot_to_elicit,
"message": message,
},
}
def delegate(session_attributes, slots):
"""
Defines a delegate slot type response.
"""
return {
"sessionAttributes": session_attributes,
"dialogAction": {"type": "Delegate", "slots": slots},
}
def close(session_attributes, fulfillment_state, message):
"""
Defines a close slot type response.
"""
response = {
"sessionAttributes": session_attributes,
"dialogAction": {
"type": "Close",
"fulfillmentState": fulfillment_state,
"message": message,
},
}
return response
### Intents Handlers ###
def recommend_portfolio(intent_request):
"""
Performs dialog management and fulfillment for recommending a portfolio.
"""
# Gets slots' values
first_name = get_slots(intent_request)["firstName"]
age = get_slots(intent_request)["age"]
investment_amount = get_slots(intent_request)["investmentAmount"]
risk_level = get_slots(intent_request)["riskLevel"]
source = intent_request["invocationSource"]
if age:
age = int(age)
if risk_level:
risk_level = risk_level.lower()
if source == "DialogCodeHook":
# This code performs basic validation on the supplied input slots.
### YOUR DATA VALIDATION CODE STARTS HERE ###
# Gets all the slots
slots = get_slots(intent_request)
# Validates user's input using the validate_data function
validation_result = validate_data(age, investment_amount, intent_request)
# Perform basic validation on the supplied input slots.
# Use the elicitSlot dialog action to re-prompt
# for the first violation detected.
if not validation_result["isValid"]:
slots[validation_result["violatedSlot"]] = None # Cleans invalid slot
# Returns an elicitSlot dialog to request new data for the invalid slot
return elicit_slot(
intent_request["sessionAttributes"],
intent_request["currentIntent"]["name"],
slots,
validation_result["violatedSlot"],
validation_result["message"],
)
### YOUR DATA VALIDATION CODE ENDS HERE ###
# Fetch current session attibutes
output_session_attributes = intent_request["sessionAttributes"]
# Once all slots are valid, a delegate dialog is returned
# to Lex to choose the next course of action.
return delegate(output_session_attributes, get_slots(intent_request))
# Get the initial investment recommendation
### YOUR FINAL INVESTMENT RECOMMENDATION CODE STARTS HERE ###
if risk_level == 'very high':
initial_recommendation = '0% bonds (AGG), 100% equities (SPY)'
elif risk_level == 'high':
initial_recommendation = '20% bonds (AGG), 80% equities (SPY)'
elif risk_level == 'medium':
initial_recommendation = '40% bonds (AGG), 60% equities (SPY)'
elif risk_level == 'low':
initial_recommendation = '60% bonds (AGG), 40% equities (SPY)'
elif risk_level == 'very low':
initial_recommendation = '80% bonds (AGG), 20% equities (SPY)'
elif risk_level == 'none':
initial_recommendation = '100% bonds (AGG), 0% equities (SPY)'
### YOUR FINAL INVESTMENT RECOMMENDATION CODE ENDS HERE ###
# Return a message with the initial recommendation based on the risk level.
return close(
intent_request["sessionAttributes"],
"Fulfilled",
{
"contentType": "PlainText",
"content": """{} thank you for your information;
based on the risk level you defined, our recommendation is to select an investment portfolio with {}
""".format(
first_name, initial_recommendation
),
},
)
### Intents Dispatcher ###
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
intent_name = intent_request["currentIntent"]["name"]
# Dispatch to bot's intent handlers
if intent_name == "RecommendPortfolio":
return recommend_portfolio(intent_request)
raise Exception("Intent with name " + intent_name + " not supported")
### Main Handler ###
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
return dispatch(event)
| Lambda.Code.Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## What don't you like about python?
#
# **Note this is a personal type question, but you should have a great understanding of the language to be able to criticize it well.**
| worded_questions/answers/what-dont-you-like-about-python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Text Corpora and Lexical Resources
#
# based on the NLTK book:
#
# ["Accessing Text Corpora and Lexical Resources"](https://www.nltk.org/book/ch02.html)
import nltk
# ## NLTK Text Corpora
#
# NLTK includes many text collections (corpora) and other language resources, listed here: http://www.nltk.org/nltk_data/
#
# Additional information:
# * [NLTK Corpus How-to](http://www.nltk.org/howto/corpus.html)
#
# In order to use these resources you may need to download them using `nltk.download()`
#
# ---
#
# **NLTK book: ["Text Corpus Structure"](https://www.nltk.org/book/ch02#text-corpus-structure)**
#
# There are different types of corpora:
# * simple collections of text (e.g. Gutenberg corpus)
# * categorized (texts are grouped into categories that might correspond to genre, source, author)
# * temporal, demonstrating language use over a time period (e.g. news texts)
#
# 
#
# There are also annotated text corpora that contain linguistic annotations, representing POS tags, named entities, semantic roles, etc.
# ### 1) Gutenberg Corpus
#
# NLTK includes a small selection of texts (= multiple files) from the Project Gutenberg electronic text archive:
# +
# let's explore its contents:
nltk.corpus.gutenberg.fileids()
# +
# "Emma" by <NAME>
emma = nltk.corpus.gutenberg.words('austen-emma.txt')
print(emma)
# +
# you can access corpus texts as characters, words (tokens) or sentences:
file_id = 'austen-emma.txt'
print("\nSentences:")
print( nltk.corpus.gutenberg.sents(file_id)[:3] )
print("\nWords:")
print( nltk.corpus.gutenberg.words(file_id)[:10] )
print("\nChars:")
print( nltk.corpus.gutenberg.raw(file_id)[:50] )
# -
# See https://www.nltk.org/book/ch02#gutenberg-corpus on how to compute statistics of words, sentences and characters (e.g. avg words per sentence).
#
# ---
# ### 2) Brown corpus
#
# The Brown Corpus was the first million-word electronic corpus of English, created in 1961 at Brown University.
#
# This corpus contains text from 500 sources, and the sources have been categorized by genre, such as news, editorial, and so on.
# +
# Brown corpus categories list:
from nltk.corpus import brown
brown.categories()
# +
# We can filter the corpus by (a) one or more categories or (b) file IDs:
print(brown.sents(categories='science_fiction')[:2])
# -
print(brown.sents(categories=['news', 'editorial', 'reviews']))
print(brown.words(fileids=['cg22']))
# We can use NLTK **ConditionalFreqDist** to collect statistics on the corpus distribution across genres and other properties:
# +
cfd = nltk.ConditionalFreqDist(
(genre, word)
for genre in brown.categories()
for word in brown.words(categories=genre))
genres = ['news', 'religion', 'hobbies', 'science_fiction', 'romance', 'humor']
modals = ['can', 'could', 'may', 'might', 'must', 'will']
cfd.tabulate(conditions=genres, samples=modals)
# -
# #### Brown corpus contains tags with part-of-speech information
#
# [Working with Tagged Corpora](https://www.nltk.org/book/ch05#tagged-corpora) (NLTK book)
words = nltk.corpus.brown.tagged_words(tagset='universal')
words
# +
# islice() lets us read a part of the corpus
from itertools import islice
words = islice(words, 300)
# let's convert it to a list
word_list = list(words)
word_list
# +
# find all words with POS tag "ADJ"
tag = 'ADJ'
[item[0] for item in word_list if item[1] == tag]
# -
# **Additional examples** (using FreqDist, ...):
#
# [Working with Tagged Corpora](https://www.nltk.org/book/ch05#tagged-corpora)
# ### 3) NLTK Corpus functionality
# * fileids() = the files of the corpus
# * fileids([categories]) = the files of the corpus corresponding to these categories
#
# * categories() = the categories of the corpus
# * categories([fileids]) = the categories of the corpus corresponding to these files
#
# * raw() = the raw content of the corpus
# * raw(fileids=[f1,f2,f3]) = the raw content of the specified files
# * raw(categories=[c1,c2]) = the raw content of the specified categories
#
# * words() = the words of the whole corpus
# * words(fileids=[f1,f2,f3]) = the words of the specified fileids
# * words(categories=[c1,c2]) = the words of the specified categories
#
# * sents() = the sentences of the whole corpus
# * sents(fileids=[f1,f2,f3]) = the sentences of the specified fileids
# * sents(categories=[c1,c2]) = the sentences of the specified categories
#
# * abspath(fileid) = the location of the given file on disk
# * encoding(fileid) = the encoding of the file (if known)
# * open(fileid) = open a stream for reading the given corpus file
# * root = if the path to the root of locally installed corpus
#
# * readme() = the contents of the README file of the corpus
#
# **Note: if you want to explore these corpora using `nltk.Text` functionality (e.g. as in the Introduction part) you will need to load them into `nltk.Text`**
# # Your turn!
#
# Choose one of NLTK corpora and **explore it using NLTK** (following examples here and in the NLTK book).
#
# Also apply what you learned (FreqDist, ...) in section "Computing with Language: Statistics".
#
# ---
#
# **Write code in notebook cells below**.
# * add more cells (use "+" icon) if necessary
# ## Lexical Resources
#
# A lexicon, or lexical resource, is a collection of words and/or phrases along with associated information such as part of speech and sense definitions.
#
# https://www.nltk.org/book/ch02#lexical-resources
#
# We already used NLTK lexical resources (stopwords and common English words).
# ## WordNet
#
# WordNet is a semantically-oriented dictionary of English, similar to a traditional thesaurus but with a richer structure. NLTK includes the English WordNet, with 155,287 words and 117,659 synonym sets.
from nltk.corpus import wordnet as wn
# +
# a collection of synonym sets related to "wind"
wn.synsets('wind')
# +
# words (lemmas) in one of synsets:
wn.synset('wind.n.08').lemma_names()
# -
wn.synset('wind.n.08').definition()
wn.synset('wind.n.08').examples()
# +
# let's explore all the synsets for this word
for synset in wn.synsets('wind'):
print(synset.lemma_names())
# +
# see all synsets that contain a given word
wn.lemmas('curve')
# -
# ### Try it yourself!
# ---
#
# **Additional WordNet examples:**
# * https://www.nltk.org/book/ch02#wordnet
| nlp/NLTK - Using NLTK Corpora.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="xyKUpqG3HlOp"
# # **Proyek : Klasifikasi Gambar**
# Deteksi jaringan saraf tiruan menggunakan TensorFlow bentuk tangan yang membentuk gunting, batu, atau kertas
#
# 1. Dataset dibagi menjadi train set dan validation set
# 2. Mengimplementasikan augmentasi gambar
# 3. Menggunakan image data generator
# 4. Menggunakan model sequential
# 5. Akurasi dari model minimal 85%
# + [markdown] id="EyjswI4E7rjw"
# # **Biodata**
#
# Nama : <NAME>
#
# email : <EMAIL>
#
# Google Collab Link : https://colab.research.google.com/drive/1PfGYjVUsvOJvzptwSFhIOJQW8BFF0eGn?usp=sharing
# + [markdown] id="Yw5LwvN076Pl"
# # **Import Library**
# + id="1jq_zmTJqcPU"
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
import zipfile, os, shutil
# + [markdown] id="lzMjGySP7-mB"
# # **Download Dataset**
# + colab={"base_uri": "https://localhost:8080/"} id="-6Kr_42l7pMa" outputId="1193e080-5466-4df0-a145-4066ee809b00"
# !wget --no-check-certificate https://dicodingacademy.blob.core.windows.net/picodiploma/ml_pemula_academy/rockpaperscissors.zip -O rockpaperscissors.zip
# + [markdown] id="dp8vss_r8KyP"
# # **Ekstract & Inisialisasi Data**
# + colab={"base_uri": "https://localhost:8080/"} id="N6b53Lu-8EHs" outputId="2c71cedb-2796-4ee8-87a9-7bb40daa2406"
local_zip = 'rockpaperscissors.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/submission')
zip_ref.close()
base_dir = '/submission/rockpaperscissors'
os.listdir(base_dir)
# + id="bvmZ2Gn7RKDV" colab={"base_uri": "https://localhost:8080/"} outputId="d6e0a3ed-5549-42fa-e192-f1cf7c0afd84"
shutil.rmtree(os.path.join(base_dir, 'rps-cv-images'))
os.remove(os.path.join(base_dir, 'README_rpc-cv-images.txt'))
os.listdir(base_dir)
# + [markdown] id="_GMs_tZk8rNk"
# # **Split Folder**
# + id="lrGkDgIB8p0u"
train_dir = os.path.join(base_dir,'train')
validation_dir = os.path.join(base_dir, 'val')
train_rock_dir = os.path.join(base_dir,'rock')
train_paper_dir = os.path.join(base_dir, 'paper')
train_scissors_dir = os.path.join(base_dir, 'scissors')
validation_rock_dir = os.path.join(validation_dir, 'rock')
validation_scissors_dir = os.path.join(validation_dir, 'scissors')
validation_paper_dir = os.path.join(validation_dir, 'paper')
# + [markdown] id="m0oeVpPkWDGm"
# # **Image Generator**
# + colab={"base_uri": "https://localhost:8080/"} id="J6WGwV9oXHAp" outputId="f809911d-f165-44e9-ee65-3befc137c60d"
train_generator = train_datagen.flow_from_directory(
base_dir, # direktori data latih
target_size=(150, 150),
batch_size=32,
class_mode='categorical',
shuffle = True,
subset='training')
val_generator = train_datagen.flow_from_directory(
base_dir, # direktori data validasi
target_size=(150, 150),
batch_size=32,
class_mode='categorical',
shuffle = True,
subset='validation')
# + [markdown] id="F428LVQdYY2a"
# # **CNN**
# + colab={"base_uri": "https://localhost:8080/"} id="CPvdKbH3XwBJ" outputId="3a596f2a-8de2-47e5-baf2-800237299f9b"
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(512, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
model.summary()
# + [markdown] id="nIskkKS-YnQh"
# # **Compile model**
# + id="uZy1r0MPXy0T"
model.compile(loss='categorical_crossentropy',
optimizer=tf.optimizers.Adam(),
metrics=['accuracy'])
# + [markdown] id="VQ3uHdpE9iZh"
# # **Callback**
# + id="3kd5UN_r9JzN"
class Callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy') > 0.97):
print("\nAkurasi di atas 97%, training dihentikan")
self.model.stop_training = True
callback = Callback()
# + [markdown] id="_jZQqUeX1LnD"
# # **Train Model**
# + colab={"base_uri": "https://localhost:8080/"} id="asHFtIbjYVIh" outputId="dd54575f-176d-4b05-edc2-863619615f35"
hasil = model.fit(
train_generator,
steps_per_epoch=25,
epochs=20,
validation_data=val_generator,
validation_steps=5,
verbose=2,
callbacks = [callback]
)
# + [markdown] id="-NniRIQwwiX1"
# # **Hasil Prediksi**
# + id="6BIfgJS4jmNp"
import matplotlib.pyplot as plt
akurasi = hasil.history['accuracy']
validasi_akurasi = hasil.history['val_accuracy']
loss = hasil.history['loss']
validasi_loss = hasil.history['val_loss']
# + [markdown] id="SbE_E0IFw5BB"
# Akurasi Model
# + id="Oyay8nuhu3Hj" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="f5554b87-8260-4b45-855c-48f539594265"
from matplotlib import rcParams
rcParams['figure.figsize']= 5 , 3
plt.plot(akurasi, color='purple')
plt.plot(validasi_akurasi, color='red')
plt.title('Model Akurasi')
plt.ylabel('Akurasi')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# + [markdown] id="lV3fa1S8w_4T"
# Model Loss
# + id="uz_Xj9vju5XN" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="321a33ac-8ca5-4121-a7ce-846ed4ce3fba"
plt.plot(loss, color='green')
plt.plot(validasi_loss, color='red')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
# + [markdown] id="E1cdKOf-wxHf"
# # **Test Model**
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 341} id="yma3eDa9llmq" outputId="3c95cfbd-daf1-4f71-bae6-5fecbab9ad25"
import numpy as np
from google.colab import files
from keras.preprocessing import image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = fn
img = image.load_img(path, target_size=(150,150))
imgplot = plt.imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
prob = model.predict(images)
classes = prob.argmax(axis=1)
if classes == 0:
print("Paper")
elif classes == 1:
print("Rock")
elif classes == 2:
print("Scissor")
| rockpaperscissors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # langages de script – Python
#
# ## Modules et packages
#
# ### M1 Ingénierie Multilingue – INaLCO
#
# <EMAIL>
# + [markdown] slideshow={"slide_type": "slide"}
# Les modules et les packages permettent d'ajouter des fonctionnalités à Python
#
# Un module est un fichier (```.py```) qui contient des fonctions et/ou des classes.
# <small>Et de la documentation bien sûr</small>
#
# Un package est un répertoire contenant des modules et des sous-répertoires.
#
# C'est aussi simple que ça. Évidemment en rentrant dans le détail c'est un peu plus compliqué.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Un module
# + slideshow={"slide_type": "-"}
# %%file operations.py
# -*- coding: utf-8 -*-
"""
Module pour le cours sur les modules
Opérations arithmétiques
"""
def addition(a, b):
""" Ben une addition quoi : a + b """
return a + b
def soustraction(a, b):
""" Une soustraction : a - b """
return a - b
# + [markdown] slideshow={"slide_type": "slide"}
# Pour l'utiliser on peut :
# * l'importer par son nom
# + slideshow={"slide_type": "-"}
import operations
operations.addition(2, 4)
# + [markdown] slideshow={"slide_type": "fragment"}
# * l'importer et modifier son nom
# + slideshow={"slide_type": "-"}
import operations as op
op.addition(2, 4)
# + [markdown] slideshow={"slide_type": "fragment"}
# * importer une partie du module
# + slideshow={"slide_type": "-"}
from operations import addition
addition(2, 4)
# + [markdown] slideshow={"slide_type": "fragment"}
# * importer l'intégralité du module
# + slideshow={"slide_type": "-"}
from operations import *
addition(2, 4)
soustraction(4, 2)
# + [markdown] slideshow={"slide_type": "slide"}
# En réalité seules les fonctions et/ou les classes ne commençant pas par '_' sont importées.
# + [markdown] slideshow={"slide_type": "fragment"}
# L'utilisation de `import *` n'est pas recommandée. Parce que, comme vous le savez « *explicit is better than implicit* ». Et en ajoutant les fonctions dans l'espace de nommage du script vous pouvez écraser des fonctions existantes.
# + [markdown] slideshow={"slide_type": "fragment"}
# Ajoutez une fonction `print` à votre module pour voir (attention un module n'est chargé qu'une fois, vous devrez relancer le kernel ou passer par la console).
# + [markdown] slideshow={"slide_type": "subslide"}
# Autre définition d'un module : c'est un objet de type ``module``.
# + slideshow={"slide_type": "-"}
import operations
type(operations)
# + [markdown] slideshow={"slide_type": "fragment"}
# ``import`` ajoute des attributs au module
# + slideshow={"slide_type": "-"}
import operations
print(f"name : {operations.__name__}")
print(f"file : {operations.__file__}")
print(f"doc : {operations.__doc__}")
# + [markdown] slideshow={"slide_type": "slide"}
# ## Un package
# + slideshow={"slide_type": "-"}
# ! tree operations_pack
# + [markdown] slideshow={"slide_type": "fragment"}
# Un package python peut contenir des modules, des répertoires et sous-répertoires, et bien souvent du non-python : de la doc html, des données pour les tests, etc…
# + [markdown] slideshow={"slide_type": "fragment"}
# Le répertoire principal et les répertoires contenant des modules python doivent contenir un fichier `__init__.py`
# + [markdown] slideshow={"slide_type": "fragment"}
# `__init__.py` peut être vide, contenir du code d'initialisation ou contenir la variable `__all__`
# + slideshow={"slide_type": "subslide"}
import operations_pack.simple
operations_pack.simple.addition(2, 4)
# + slideshow={"slide_type": "fragment"}
from operations_pack import simple
simple.soustraction(4, 2)
# + [markdown] slideshow={"slide_type": "fragment"}
# ``__all__`` dans ``__init__.py`` définit quels seront les modules qui seront importés avec ``import *``
#
# + slideshow={"slide_type": "-"}
from operations_pack.avance import *
multi.multiplication(2,4)
# + [markdown] slideshow={"slide_type": "slide"}
# # Pas de main en Python ?
#
# Vous trouverez fréquemment le test suivant dans les scripts Python :
#
# + slideshow={"slide_type": "-"}
if __name__ == '__main__':
instruction1
instruction2
# + [markdown] slideshow={"slide_type": "-"}
# ou
# + slideshow={"slide_type": "-"}
def main():
instruction
if __name__ == '__main__':
main()
# + [markdown] slideshow={"slide_type": "subslide"}
# Cela évite que le code sous le test ne soit exécuté lors de l'import du script :
# __name__ est une variable créée automatiquement qui vaut __main__ si le script a été appelé en ligne de commande, le nom du script s'il a été importé.
# + [markdown] slideshow={"slide_type": "fragment"}
# Accessoirement cela permet d'organiser son code et de le rendre plus lisible
# Désormais je vous recommande vivement demande de l'inclure dans tous vos scripts
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Où sont les modules et les packages ?
# + [markdown] slideshow={"slide_type": "-"}
# Pour que ``import`` fonctionne il faut que les modules soient dans le PATH.
# + slideshow={"slide_type": "-"}
import sys
sys.path
# + [markdown] slideshow={"slide_type": "fragment"}
# ``sys.path`` est une liste, vous pouvez la modifier
# + slideshow={"slide_type": "-"}
sys.path.append("[...]") # le chemin vers le dossier operations_pack
sys.path
# + [markdown] slideshow={"slide_type": "slide"}
# ## Installer des modules et des packages
# + [markdown] slideshow={"slide_type": "-"}
# Dans les distributions Python récentes `pip` est installé, tant mieux.
# + [markdown] slideshow={"slide_type": "subslide"}
# Avec `pip` vous pouvez :
# * installer un module `pip install module` ou `pip install --user module`
# `pip` va trouver le module sur Pypi et l'installer au bon endroit s'il existe. Il installera les dépendances aussi.
# * désinstaller un module `pip uninstall module`
# * mettre à jour `pip install module --upgrade`
# * downgrader dans une version particulière `pip install module=0.9 --upgrade`
# * sauvegarder votre environnement de dév, la liste de vos modules `pip freeze > requirements.txt`
# Ce qui vous permettra de le réinstaller sur une autre machine `pip install -r requirements.txt`
# + [markdown] slideshow={"slide_type": "slide"}
# ## S'en sortir avec les versions
# + [markdown] slideshow={"slide_type": "-"}
# Python évolue au fil des versions, les packages aussi. Ça peut poser des problèmes quand vous voulez partager votre code ou même quand vous voulez utiliser un code qui a besoin d'une version particulière.
#
# Il existe un outil pour isoler les environnement de développement : ``virtualenv``
# ``virtualenv /path/mon_projet`` ou ``python3 -m venv /path/mon_projet`` va créer un dossier avec plein de trucs dedans, y compris un interpréteur python.
# Vous pouvez spécifier la version de python avec ``virtualenv /path/mon_projet -p /usr/bin/python3.6``
#
# + [markdown] slideshow={"slide_type": "slide"}
# Pour activer l'environnement : ``source /path/mon_projet/bin/activate`` (``/path/mon_projet/Scripts/activate.bat`` sous Windows (je crois))
# Pour en sortir : ``deactivate``
# + [markdown] slideshow={"slide_type": "fragment"}
# Quand vous travaillez dans un venv les modules que vous installerez avec pip seront isolés dans le venv et pas ailleurs.
# Si vous utilisez ``python`` ce sera la version de l'interpréteur du venv et les modules du venv.
# Avec cet outil on doit installer à chaque fois les modules désirés mais au moins on ne s'embrouille pas. Et vous pouvez communiquer un fichier ``requirements.txt`` à un collègue qui pourra reproduire le venv sur sa machine.
# + [markdown] slideshow={"slide_type": "fragment"}
# Il existe aussi ``pipenv``, un outil plus récent qui combine ``pip`` et ``virtualenv``.
| modules.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Задача 1
#
# Фильтр по квартирам. Есть информация о квартире, проверить следующие условия:
# * Цена меньше 3млн и 2 комнатная, но не в центре
# * В центре, не на первых этажах (1-3) и должен быть лифт
# * Без лифта и цена от 1млн до 2.5 млн
# +
# Цена квартиры
price = 2_000_000
# Кол-во комнат
rooms = 1
# Расположение в городе
location = "center"
# Наличие лифта
elevator = True
# Этаж
floor = 7
# +
# Пишите здесь свой код
# -
# # Задача 2
#
# * Вывести числа от 100 до 0, с шагом в 5
# +
# пишите здесь свой код
# -
# # Задача 3
# * Напечатать список простых чисел от 2 до 1000 - http://ru.math.wikia.com/wiki/%D0%9F%D1%80%D0%BE%D1%81%D1%82%D0%BE%D0%B5_%D1%87%D0%B8%D1%81%D0%BB%D0%BE
# +
# пишите здесь свой код
| module_001_python/lesson_002_operators/student_tasks/HomeWork.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 16204} colab_type="code" executionInfo={"elapsed": 7169, "status": "ok", "timestamp": 1547623008426, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-uEP16YICEZo/AAAAAAAAAAI/AAAAAAAAAd8/ugJ62_b60ic/s64/photo.jpg", "userId": "16313968429539863887"}, "user_tz": -330} id="3jgJRyqloroo" outputId="63d936b4-0862-4590-b57d-007ac645a925"
import requests
from pprint import pprint
locationUrlFromLatLong = "https://developers.zomato.com/api/v2.1/geocode?lat=28.5246&lon=77.2066"
header = {"User-agent": "curl/7.43.0", "Accept": "application/json", "user_key": "<KEY>"}
response = requests.get(locationUrlFromLatLong, headers=header)
pprint(response.json())
# + [markdown] colab_type="text" id="x1ptkDUCorov"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 936, "status": "ok", "timestamp": 1547623026836, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-uEP16YICEZo/AAAAAAAAAAI/AAAAAAAAAd8/ugJ62_b60ic/s64/photo.jpg", "userId": "16313968429539863887"}, "user_tz": -330} id="OoEi2K1dorox" outputId="40977276-0725-4c5d-f427-33de7f88e89f"
Delhi_geodict= {"28.7041":"77.1025","28.5246":"77.2066"}
for key,value in Delhi_geodict.items():
print("https://developers.zomato.com/api/v2.1/geocode?lat="+key+"&lon="+value)
# + colab={} colab_type="code" id="B_UQaOrO6CEv"
# Test CODE
#response = requests.get("https://developers.zomato.com/api/v2.1/geocode?lat="+key+"&lon="+value, headers=header).json()
#normalized = json_normalize(response)
#df.append(response)
#pprint(response.json())
#headers = {'Accept': 'application/json', 'user-key': self.user_key}
# + colab={"base_uri": "https://localhost:8080/", "height": 3403} colab_type="code" executionInfo={"elapsed": 12078, "status": "ok", "timestamp": 1547631156109, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-uEP16YICEZo/AAAAAAAAAAI/AAAAAAAAAd8/ugJ62_b60ic/s64/photo.jpg", "userId": "16313968429539863887"}, "user_tz": -330} id="pWJV3sVXoro1" outputId="2f2abdb3-1a0b-4daf-c0d0-35a8def82457"
# !pip install simplejson
import requests
import simplejson as json
header = {"User-agent": "curl/7.43.0", "Accept": "application/json", "user_key": "<KEY>"}
Delhi_geodict= {"28.7041":"77.1025","28.5246":"77.2066"}
for key,value in Delhi_geodict.items():
r = (requests.get("https://developers.zomato.com/api/v2.1/geocode?lat="+key+"&lon="+value, headers=header).content).decode("utf-8")
a = json.loads(r)
nearby_restaurants = []
for nearby_restaurant in a['nearby_restaurants']:
nearby_restaurants.append([nearby_restaurant['restaurant']['id'],
nearby_restaurant['restaurant']['name'],
nearby_restaurant['restaurant']['location']['country_id'],
nearby_restaurant['restaurant']['location']['city'],
nearby_restaurant['restaurant']['location']['address'],
nearby_restaurant['restaurant']['location']['locality'],
nearby_restaurant['restaurant']['location']['locality_verbose'],
nearby_restaurant['restaurant']['location']['longitude'],
nearby_restaurant['restaurant']['location']['latitude'],
nearby_restaurant['restaurant']['cuisines'],
nearby_restaurant['restaurant']['average_cost_for_two'],
nearby_restaurant['restaurant']['currency'],
nearby_restaurant['restaurant']['has_table_booking'],
nearby_restaurant['restaurant']['has_online_delivery'],
nearby_restaurant['restaurant']['is_delivering_now'],
nearby_restaurant['restaurant']['switch_to_order_menu'],
nearby_restaurant['restaurant']['price_range'],
nearby_restaurant['restaurant']['user_rating']['aggregate_rating'],
nearby_restaurant['restaurant']['user_rating']['rating_color'],
nearby_restaurant['restaurant']['user_rating']['rating_text'],
nearby_restaurant['restaurant']['user_rating']['votes']])
nearby_restaurants
# + colab={"base_uri": "https://localhost:8080/", "height": 1091} colab_type="code" executionInfo={"elapsed": 953, "status": "ok", "timestamp": 1547631163589, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-uEP16YICEZo/AAAAAAAAAAI/AAAAAAAAAd8/ugJ62_b60ic/s64/photo.jpg", "userId": "16313968429539863887"}, "user_tz": -330} id="KqYM-JQeoro6" outputId="0bcd6679-82c7-4a7a-af85-9af481c8b237"
import pandas as pd
dataset = pd.DataFrame(nearby_restaurants)
dataset.columns = ['Restaurant ID','Restaurant Name','Country Code','City','Address','Locality','Locality Verbose','Longitude','Latitude',
'Cuisines','Average Cost for two','Currency','Has Table booking','Has Online delivery','Is delivering now',
'Switch to order menu','Price range','Aggregate rating','Rating color','Rating text','Votes']
dataset
# + colab={} colab_type="code" id="tkhcUGXNvcEo"
base_url = "https://developers.zomato.com/api/v2.1/"
import ast, json
def get_nearby_restaurants(latitude, longitude):
"""
Takes the latitude and longitude as inputs.
Returns a dictionary of Restaurant IDs and their corresponding Zomato URLs.
"""
try:
float(latitude)
float(longitude)
except ValueError:
raise ValueError('InvalidLatitudeOrLongitude')
headers = {'Accept': 'application/json', 'user-key':'874629c888aeff465f2b12f518379b20' }
r = (requests.get(base_url + "geocode?lat=" + str(latitude) + "&lon=" + str(longitude), headers=headers).content).decode("utf-8")
a = json.loads(r)
nearby_restaurants = {}
for nearby_restaurant in a['nearby_restaurants']:
nearby_restaurants.update({nearby_restaurant['restaurant']['id'] : nearby_restaurant['restaurant']['url']})
return nearby_restaurants
df1=get_nearby_restaurants(28.7041,77.1025)
# -
# + colab={"base_uri": "https://localhost:8080/", "height": 190} colab_type="code" executionInfo={"elapsed": 4907, "status": "ok", "timestamp": 1547609780405, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-uEP16YICEZo/AAAAAAAAAAI/AAAAAAAAAd8/ugJ62_b60ic/s64/photo.jpg", "userId": "16313968429539863887"}, "user_tz": -330} id="NGAqq58BxZyG" outputId="068ad197-abe4-44a1-8a13-a8fafff8cf24"
# !pip install zomatopy
# +
geodict1= {"18.5074":"73.8077","18.4669": "73.8265"}
#geodict2= {}
geodict3= {"18.4898":"73.8203","18.5156":"73.7819"}
#geodict4= {} #bavdhan
geodict5= {"18.5018":"73.8636"}#swargate
geoList=[geodict1,geodict3,geodict5]
# -
for i in geoList:
print(i)
# +
geodictPune={"18.5074" :"73.8077","18.4874" :"74.1334","18.4669" :"73.8265","18.4898" :"73.8203",
"18.4865" :"73.7968","18.5176" :"73.8417","18.4616" :"73.8505","18.5156" :"73.7819",
"18.5416" :"73.8024","18.5018" :"73.8636","18.5167" :"73.8562","18.5699" :"73.8506",
"18.5596" :"73.8171","18.4972" :"73.7960","18.6261" :"73.7390","18.5529" :"73.8796",
"18.4848" :"73.8860","18.5636" :"73.9326","18.5679" :"73.9143","18.6084" :"73.7856",
"18.5188" :"73.8303","18.5515" :"73.9348","18.5726" :"73.8782","18.5293" :"73.9100",
"18.5042" :"73.9014","18.5089" :"73.9260","18.4829" :"73.9017","18.5789" :"73.7707",
"18.5535" :"73.7547","19.1383" :"77.3210","18.4454" :"73.7801","18.4422" :"73.8096",
"18.7381" :"73.6389","18.4667" :"73.7804","18.5229" :"73.7610","18.4966" :"73.9416",
"18.4475" :"73.8232","18.4923" :"73.8547","18.5122" :"73.8860","18.5362" :"73.8940"}
geodictMumbai={"19.1136" :"72.8697","19.0607" :"72.8362","19.1465" :"72.9305","19.2372" :"72.8441",
"18.9718" :"72.8436","18.9067" :"72.8147","18.5255" :"73.8795","18.9477" :"72.8342",
"18.9703" :"72.8061","18.9127" :"72.8213","19.0328" :"72.8964","18.9322": "72.8264",
"19.0213" :"72.8424","19.4946" :"72.8604","19.0075": "72.8360","18.9572": "72.8197",
"19.1551" :"72.8679","18.9327" :"72.8316","19.1405": "72.8422","19.1106": "72.8326",
"19.1998" :"72.8426","19.0717" :"72.8341","19.0600": "72.8900","19.0269" :"72.8553",
"19.1802" :"72.8554","19.1726": "72.9425","15.9414": "77.4257","18.9256" :"72.8242",
"18.9561" :"72.8157","19.0022": "72.8416","19.0158": "72.8280","19.0949" :"72.8865",
"19.0843" :"72.8360","19.0390": "72.8619","19.0771": "72.9990","19.1351" :"72.8146",
"19.1031" :"72.8467","19.0800": "72.8988","19.0912": "72.9209","18.9746" :"72.8065",
"18.9872" :"72.8290"}
geodictBanglore={"12.970900":"77.604800","13.031359":"77.570240","13.004200":"77.604600",
"12.970900":"77.576300","12.991170":"77.585500","12.971270":"77.666940",
"13.184570":"77.479280","12.978680":"77.577130","12.957980":"77.605600",
"13.202540":"78.931260","12.649710":"77.200370","12.987940":"77.609590",
"12.972180":"77.586730","13.184570":"77.479280","12.980940":"77.586110",
"13.184570":"77.479280","13.026440":"77.909550","12.983590":"77.434850",
"12.707950":"77.746020","12.969290":"77.587760","13.184570":"77.479280",
"13.184570":"77.479280","12.869560":"74.866860","15.872160":"74.528570",
"13.184570":"77.479280","13.099670":"80.231290","12.476620":"76.765110"}
geodictDelhi={"28.8540":"77.0918","28.7535":"77.1948","28.7004":"77.2208","28.7193":"77.1736",
"28.7324":"77.1442","28.7192":"77.1007","28.8055":"77.0463","28.6823":"77.0349",
"28.6968":"77.0644","28.6959":"77.0805","28.6841":"77.0633","28.7495":"77.0565",
"28.7164":"77.1546","28.6818":"77.1285","28.6780":"77.1581","28.7002":"77.1638",
"28.7159":"77.1911","28.6570":"77.2122","28.6506":"77.2303","28.6486":"77.2340",
"28.6139":"77.2090","28.5821":"77.2485","28.5778":"77.2244","28.5335":"77.2109",
"28.5212":"77.1790","28.4959":"77.1848","28.4962":"77.2376","28.5374":"77.2597",
"28.5049":"77.2739","28.5040":"77.3018","28.5603":"77.2913","28.6046":"77.3068",
"28.6123":"77.3255","28.6200":"77.2924","28.6903":"77.2657","28.6890":"77.2815",
"28.7034":"77.2840","30.1994":"77.1456","28.6388":"77.0738","28.6213":"77.0613",
"28.5921":"77.0460","28.6094":"77.0543","28.6090":"76.9855","28.5349":"77.0558",
"28.5901":"77.0888","28.5961":"77.1587","28.6415":"77.1209","28.6249":"77.1109",
"28.6391":"77.0868","28.6219":"77.0878","28.6544":"77.1689","28.6623":"77.1411",
"28.6721":"77.1205"}
geodictGoa={"15.5994":"73.8390","15.496777":"73.827827","15.6002":"73.8125",
"15.5889":"73.9654","15.5959":"74.0594","15.7087":"73.8184",
"15.5723":"73.8184","15.3991":"74.0124","15.3874":"73.8154",
"15.2832":"73.9862","15.3841":"74.1181","14.9931":"74.0476",
"15.2302":"74.1504"}
geodictKolkata={"22.5477":"88.3553","22.5176":"88.3840","22.5159":"88.3651",
"22.4981":"88.3108","22.5867":"88.4171","22.6250":"88.4386",
"22.5577":"88.3867","22.5975":"88.3707","22.5609":"88.3541",
"22.5184":"88.3535","22.5170":"88.3658","22.5332":"88.3459",
"22.4940":"88.3707","22.6218":"88.4180","22.5437":"88.3549",
"22.5765":"88.4796"}
geodictIndore={"22.7533":"75.8937","22.7244":"75.8839","22.6928":"75.8684",
"22.6400":"75.8040","22.7182":"75.8749","22.7143":"75.8687",
"22.6709":"75.8275","22.7147":"75.8520","22.7198":"75.8571",
"22.7066":"75.8770","22.7217":"75.8628","22.6980":"75.8683",
"22.7368":"75.9086","22.6745":"75.8326"}
geodictHyderabad={"17.3984":"78.5583","17.4096":"78.5441","17.4265":"78.4511",
"17.4930":"78.4058","17.4615":"78.5004","17.5125":"78.3522",
"17.3807":"78.3245","17.542881":"78.481445","17.3930":"78.4730",
"17.3730":"78.5476","17.4375":"78.4483","17.4237":"78.4584",
"17.3990":"78.4153","17.4948":"78.3996","17.4447":"78.4664",
"17.4483":"78.3915","17.4622":"78.3568"}
geodictNorth={"29.3803":"79.4636","29.3780":"79.4662","29.3844":"79.4563",
"29.3919":"79.4605",
"27.0467":"88.2619","27.0615":"88.2765",
"32.732998":"74.864273"}
geodictVijaywada={"16.5028":"80.6396","16.5218":"80.6091","16.5424":"80.5800",
"16.5179":"80.6507","16.5179":"80.6507","16.5129":"80.7020",
"16.5140":"80.6285","16.5226":"80.6672","16.4779":"80.7020",
"16.5515":"80.6521","16.5209":"80.6829","16.5209":"80.6829"}
geodictVisakhapatnam={"17.690474":"83.231049","17.7107":"83.3135","17.7425":"83.3389",
"17.7307":"83.3087","17.6881":"83.2131","17.7262":"83.3155",
"17.7409":"83.2493","17.7409":"83.2493","17.7447":"83.2319",
"17.7358":"83.2733","17.7384":"83.3015","17.9075":"83.4270",
"17.7742":"83.2319","17.8059":"83.2131"}
geodictNashik={"20.0059":"73.7934","19.997454":"73.789803","19.9469":"73.7654"}
geodictNagpur={"21.1397":"79.0631","21.1477":"79.0843","21.1358":"79.0765",
"21.1313":"79.0800","21.1500":"79.1376","21.1491":"79.0550",
"21.1557":"79.0942","21.1821":"79.0860","21.1856":"79.0805",
"21.146633":"79.088860"}
geodictAurangabad={"19.8812":"75.3820","19.8757":"75.3442","19.901054":"75.352478",
"19.839911":"75.236237","19.9298":"75.3536"}
geoList=[geodictPune,geodictMumbai,geodictBanglore,geodictDelhi,geodictGoa,geodictKolkata,
geodictIndore,geodictHyderabad,geodictNorth,geodictVijaywada,geodictVisakhapatnam,
geodictNashik,geodictNagpur,geodictAurangabad]
# +
import requests
import simplejson as json
header = {"User-agent": "curl/7.43.0", "Accept": "application/json", "user_key": "5017afc6ac73011c60c8d23be5996a03"}
nearby_restaurants = []
for i in geoList:
for key,value in i.items():
r = (requests.get("https://developers.zomato.com/api/v2.1/geocode?lat="+key+"&lon="+value, headers=header).content).decode("utf-8")
a = json.loads(r)
for nearby_restaurant in a['nearby_restaurants']:
nearby_restaurants.append([nearby_restaurant['restaurant']['id'],
nearby_restaurant['restaurant']['name'],
nearby_restaurant['restaurant']['location']['country_id'],
nearby_restaurant['restaurant']['location']['city'],
nearby_restaurant['restaurant']['location']['address'],
nearby_restaurant['restaurant']['location']['locality'],
nearby_restaurant['restaurant']['location']['locality_verbose'],
nearby_restaurant['restaurant']['location']['longitude'],
nearby_restaurant['restaurant']['location']['latitude'],
nearby_restaurant['restaurant']['cuisines'],
nearby_restaurant['restaurant']['average_cost_for_two'],
nearby_restaurant['restaurant']['currency'],
nearby_restaurant['restaurant']['has_table_booking'],
nearby_restaurant['restaurant']['has_online_delivery'],
nearby_restaurant['restaurant']['is_delivering_now'],
nearby_restaurant['restaurant']['switch_to_order_menu'],
nearby_restaurant['restaurant']['price_range'],
nearby_restaurant['restaurant']['user_rating']['aggregate_rating'],
nearby_restaurant['restaurant']['user_rating']['rating_color'],
nearby_restaurant['restaurant']['user_rating']['rating_text'],
nearby_restaurant['restaurant']['user_rating']['votes']])
nearby_restaurants
# -
import pandas as pd
dataset= pd.DataFrame(nearby_restaurants)
dataset.columns = ['Restaurant ID','Restaurant Name','Country Code','City','Address','Locality','Locality Verbose','Longitude','Latitude',
'Cuisines','Average Cost for two','Currency','Has Table booking','Has Online delivery','Is delivering now',
'Switch to order menu','Price range','Aggregate rating','Rating color','Rating text','Votes']
dataset
# +
#Combining dataframes
#data=[datasetSinhagad,datasetKothrud,datasetKarvenagar]
#r=pd.concat(data)
# -
dataset.to_csv("/home/pratik/Desktop/ZomatoDataSet.csv",index=False)
dataset.shape
sum=0
for i in geoList:
sum=sum+len(i.keys())
print(sum)
| 04-Zomato/Data_Ingestion_Generation1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.0 64-bit
# metadata:
# interpreter:
# hash: cf85b414d3663472de89104473c842eaab37d7b845999caf56a47ccda76ea2f8
# name: python3
# ---
# ### 1. Se pide:
#
# A partir del archivo "USA_Housing.csv", realizar la regresión no lineal entre cada columna numérica con la columna 'price' utilizando:
#
# - Regresión polinómica no lineal con grados del 2 al 10. ¿El score se estanca a partir de cierto grado?
# - SVR
# +
import pandas as pd
df = pd.read_csv("data1/USA_Housing.csv")
df
# -
df.info()
# ### Regresión polinómica no lineal
# +
# Import function to create training and test set splits
from sklearn.model_selection import train_test_split
# Import function to automatically create polynomial features!
from sklearn.preprocessing import PolynomialFeatures
# Import Linear Regression
from sklearn.linear_model import LinearRegression
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score, mean_absolute_error
import numpy as np
# +
y = df['Price'].values.reshape(-1,1)
for col in df.columns[:5]:
X = df[col].values.reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
train_score_l = []
test_score_l = []
for degree in list(range(2,11)):
poly = PolynomialFeatures(degree=degree)
X_poly = poly.fit_transform(X_train)
lin_reg_model = LinearRegression()
lin_reg_model.fit(X_poly, y_train)
train_score = lin_reg_model.score(X_poly, y_train)
X_test_poli = poly.fit_transform(X_test)
test_score = lin_reg_model.score(X_test_poli, y_test)
train_score_l.append(train_score)
test_score_l.append(test_score)
print(f'Column : {col}')
print('Highest training score was -->', max(train_score_l), 'for degree :', train_score_l.index(max(train_score_l)))
print('Highest test score was -->', max(test_score_l), 'for degree :', test_score_l.index(max(test_score_l)))
plt.plot(list(range(2,11)), train_score_l, color='r')
plt.plot(list(range(2,11)), test_score_l, color='y')
plt.legend(['Train line', 'Test line'])
plt.title('{}'.format(col))
plt.xlabel('Degreee')
plt.ylabel('Score')
plt.show()
print('-------------------', '\n', '-------------------')
# -
# Dependiendo de las columnas el score se estanca o incluso decrece
# +
y = df['Price'].values.reshape(-1,1)
X = df.loc[:,'Avg. Area Income': 'Area Population']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
train_score_l = []
test_score_l = []
for degree in list(range(2,11)):
poly = PolynomialFeatures(degree=degree)
X_poly = poly.fit_transform(X_train)
lin_reg_model = LinearRegression()
lin_reg_model.fit(X_poly, y_train)
train_score = lin_reg_model.score(X_poly, y_train)
X_test_poli = poly.fit_transform(X_test)
test_score = lin_reg_model.score(X_test_poli, y_test)
train_score_l.append(train_score)
test_score_l.append(test_score)
print('Highest training score was -->', max(train_score_l), 'for degree :', train_score_l.index(max(train_score_l)))
print('Highest test score was -->', max(test_score_l), 'for degree :', test_score_l.index(max(test_score_l)))
plt.plot(list(range(2,11)), train_score_l, color='r')
plt.plot(list(range(2,11)), test_score_l, color='y')
plt.legend(['Train line', 'Test line'])
plt.title('All numeric Columns')
plt.xlabel('Degreee')
plt.ylabel('Score')
plt.show()
# -
# When considering all columns the score seems to be pretty much constant until it reaches a polinomical degree of 10 wen it suddenly drops to negative numbers.
# +
# SVR
from sklearn.svm import SVR
y = df['Price'].values.reshape(-1,1)
train_score_d = {}
test_score_d = {}
for col in df.columns[:5]:
X = df[col].values.reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
svr_poly = SVR(kernel='poly', C=100)
svr_poly.fit(X_train, y_train)
train_score_d[f'{col}'] = svr_poly.score(X_train, y_train)
test_score_d[f'{col}'] = svr_poly.score(X_test, y_test)
print('Train scores --->', train_score_d)
print('Test scores --->', test_score_d)
# +
y = df['Price'].values.reshape(-1,1)
X = df.loc[:,'Avg. Area Income': 'Area Population']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
svr_poly = SVR(kernel='poly', C=100)
svr_poly.fit(X_train, y_train)
print('Train score =', svr_poly.score(X_train, y_train))
print('Test score =', svr_poly.score(X_test, y_test))
# -
# ### 2. Se pide:
#
# En el anterior dataset, la columna "Avg. Area Number of Bedrooms" está en un rango numérico limitado:
#
# 1. Cambia todos los datos para que sean enteros redondeando hacia el entero más cercano. Si es .5 exacto, se redondea hacia abajo.
#
# 2. Cuando lo hayas hecho, tendrás una columna con números enteros en un rango limitado. Ahora trata a esa columna como una serie de valores discretos (classes, labels).
#
# 3. Usando las demás columnas numéricas y una a una, utiliza SVM (SVC) para clasificar esa columna modificada.
#
# 4. Usando todas las demás columnas a la vez, utiliza SVM para clasificar esa columna modificada. Es decir, "Avg. Area Income" con nuestro target, después "Avg. Area House Age", ... ¿hace aumentar el score? ¿por qué?
#
# 5. ¿Qué columna tiene mejor correlación con "Avg. Area Number of Bedrooms" una vez modificada? ¿qué columna da mejor score para nuestro target?
#
# --------------------------
#
# ACLARACIÓN del apartado 2 del ejercicio 2:
#
# - El punto 3, se espera que tu X sea una sola columna e y también, 'Avg. Area Number of Bedrooms'.
#
# - El punto 4, espera que tu X sean todas las columnas que cumplen con las características especificadas e y la columna 'Avg. Area Number of Bedrooms'
df['Rounded numb. bedrooms'] = np.array([round(x) for x in df['Avg. Area Number of Bedrooms'].values])
df.columns
# +
# Classification by column
from sklearn.svm import SVC
Xcols = df[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms', 'Area Population', 'Price']]
y = df['Rounded numb. bedrooms'].values.reshape(-1,1)
train_score_d = {}
test_score_d = {}
for col in Xcols.columns[:5]:
X = df[col].values.reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
svr_poly = SVC(C=1000, kernel='rbf', gamma=10)
svr_poly.fit(X_train, y_train)
train_score_d[f'{col}'] = svr_poly.score(X_train, y_train)
test_score_d[f'{col}'] = svr_poly.score(X_test, y_test)
print('Train scores --->', train_score_d)
print('Test scores --->', test_score_d)
# -
# No tiene sentido hablar de correlación cuando se tienen en cuenta valores categóricos. La columna que mejor clasificación ha hecho de Avg. Area Number of Bedrooms modificado es la Avg. Area House Age.
# +
# Classification with all numeric columns
y = df['Rounded numb. bedrooms'].values.reshape(-1,1)
Xcols = df[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms', 'Area Population', 'Price']].values
X_train, X_test, y_train, y_test = train_test_split(Xcols, y, test_size=0.2)
svr_poly = SVC(C=1000, kernel='rbf', gamma=10)
svr_poly.fit(X_train, y_train)
print('Train score =', svr_poly.score(X_train, y_train))
print('Test score =', svr_poly.score(X_test, y_test))
# -
| 2_Ejercicios/Modulo2/4.svm_and_nonLinearRegression/2.houses_non_linear.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/concaption/PepsiCo-Lab-Potato-Quality-Control/blob/main/Potato_Starter_Code_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Yg0adGuaY-80"
import numpy as np
import datetime
import tensorflow as tf
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras import Model
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
# + id="0Oh3IXZwaC1y"
# -- Global Variables --
TRAIN_PATH = '/content/drive/MyDrive/Pepsico RnD Potato Lab Dataset/Train'
TEST_PATH = '/content/drive/MyDrive/Pepsico RnD Potato Lab Dataset/Test'
BATCH_SIZE = 32
COLOR_MODE = 'rgb'
TARGET_SIZE = (255, 255)
GRAY_SCALL = (3,)
INPUT_SIZE = TARGET_SIZE + GRAY_SCALL
EPOCHS = 10
CLASSES = ['Defective','Non-Defective']
# + id="VKeMH4hivKoj"
# -- Data Normalization --
data_generator = ImageDataGenerator(samplewise_center=True, #making sure that each image has a mean of 0
samplewise_std_normalization=True, #and standard deviation 1
horizontal_flip=True, #Randomly flip inputs horizontally
validation_split=0.3,
)
# + colab={"base_uri": "https://localhost:8080/"} id="qpKwI-gieHGs" outputId="035bc2b3-7008-4237-ce59-d97c36d55d93"
# -- Data iterators --
train_data = data_generator.flow_from_directory(directory=TRAIN_PATH,
target_size=TARGET_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
color_mode=COLOR_MODE,
subset='training',
shuffle=True)
validation_data = data_generator.flow_from_directory(directory=TRAIN_PATH,
target_size=TARGET_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
color_mode=COLOR_MODE,
subset='validation',
shuffle=True)
test_data = data_generator.flow_from_directory(directory=TEST_PATH,
target_size=TARGET_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
color_mode=COLOR_MODE,
shuffle=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="O8T1o3LFndP_" outputId="286cae72-ef0c-4fa1-a16b-cc55f331708c"
# -- plot random batch --
images, labels = train_data.next()
classes = np.asarray(CLASSES)
_, axs = plt.subplots(4, 8, figsize=(12,12))
axs = axs.flatten()
for img, label, ax in zip(images, labels, axs):
ax.imshow(img)
ax.axis('off')
label = label.astype(int)
ax.set_title(classes[label == 1])
plt.show()
# + id="tqj-MHY92Mtw"
def my_model():
vgg19_model = VGG19(weights='imagenet',include_top=False,input_shape=INPUT_SIZE)
vgg19_model.trainable = False
flatten =Flatten()(vgg19_model.layers[-1].output)
fc1 = Dense(units=4096, activation ='relu')(flatten)
dropout = Dropout(0.2)(fc1)
fc2 = Dense(units=1024,activation='relu')(dropout)
output = Dense(2, activation='softmax')(fc2)
model = Model(inputs = vgg19_model.input, outputs=output)
model.summary()
return model
# + colab={"base_uri": "https://localhost:8080/"} id="gpCiq_PhA7cq" outputId="4f9333ab-4f07-4cce-b140-a6d430a5af30"
model = my_model()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="xKWixX7OWKT3" outputId="0a0860e7-bd69-4376-9a4a-7fbb07deced8"
tf.keras.utils.plot_model(
model, to_file='model.png', show_shapes=True, show_dtype=False,
show_layer_names=True, rankdir='T', expand_nested=False, dpi=96
)
# + id="uS8JGgTqDFBc"
# -- Define optimizer and loss --
opt = tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
loss = tf.keras.losses.CategoricalCrossentropy()
# + id="J49bvHSBDLdx"
# -- Compile model --
model.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
# + id="aUJ4I3fNDNk_"
# -- Callbacks --
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath='my_model.h5',
monitor='accuracy', verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
save_freq='epoch')
earlystoping = tf.keras.callbacks.EarlyStopping(monitor='accuracy',
min_delta=0,
patience=5, #Number of epochs with no improvement after which training will be stopped.
verbose=1,
mode='auto')
log_dir = './logs/fit/' + datetime.datetime.now().strftime('%m.%d.%Y--%H-%M-%S')
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
histogram_freq=1,
write_graph=True,
write_images=False,
update_freq='epoch')
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="txuAFiFeDUWQ" outputId="2df849ad-2a10-4559-ae89-7df1115abbe9"
# -- Train model --
history = model.fit(x=train_data,
epochs=EPOCHS,
steps_per_epoch=len(train_data),
verbose=1,
validation_data=validation_data,
validation_steps=1,
callbacks=[checkpoint, earlystoping, tensorboard])
# -- Save model --
model.save('my_model.h5')
# + colab={"background_save": true} id="T6zfLD1hDWSS"
def learning_curves(history):
'''plot learning curves'''
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(10, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Loss - Cross Entropy')
plt.xlabel('epoch')
plt.ylim([0,1.6])
plt.title('Training and Validation Loss')
plt.show()
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="POedw4t_Daup" outputId="ffc4be09-c38c-49ad-c92a-c37a4c6ea18b"
# -- Plot learning curves --
learning_curves(history)
# + id="teTuRZ3WDcZz"
# -- Evaluate the model on the test data --
loss, accuracy = model.evaluate(x=test_data)
print("test loss: ", loss, ", test acc: " , 100*accuracy, "%")
# + id="5ow6LebBDd-W"
def defective_or_not(img_path):
img = tf.keras.preprocessing.image.load_img(img_path, target_size=(255,255,3))
img = np.asarray(img)
img = np.expand_dims(img, axis=0)
model = tf.keras.models.load_model('my_model.h5')
output = model.predict(img)
print(classes[output[0]==1])
| Potato_Starter_Code_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# This script will compile shape/texture measure for later voxelwise analyses.
# +
import os
import pandas as pd
import numpy as np
import readline
import rpy2
import rpy2.robjects as robjects
r = robjects.r
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
from sklearn.preprocessing import scale
# +
# %matplotlib inline
import matplotlib.pyplot as plt
from tqdm import tqdm
from menpo.shape import PointCloud
# -
# ## Load
#
# Load the preprocessing steps
idir = "/data1/famface01/analysis/encoding/12_Features/identity_shape_analysis"
ifile = '%s/a01_preproc_shape_stim.npz' % idir
data = np.load(ifile)
# Load the texture PCA comps
# +
idir = "/data1/famface01/analysis/encoding/12_Features/identity_pca"
texture_scores = pd.read_csv(os.path.join(idir, "texture_pca_scores.csv"))
texture_sym_scores = pd.read_csv(os.path.join(idir, "texture_sym_pca_scores.csv"))
t_eigs = np.loadtxt(os.path.join(idir, "texture_pca_eigs.txt"))
ts_eigs = np.loadtxt(os.path.join(idir, "texture_sym_pca_eigs.txt"))
# -
# Load the shape PCA comps
# +
idir = "/data1/famface01/analysis/encoding/12_Features/identity_pca"
shape_scores = pd.read_csv(os.path.join(idir, "shape_pca_scores.csv"))
shape_sym_scores = pd.read_csv(os.path.join(idir, "shape_sym_pca_scores.csv"))
s_eigs = np.loadtxt(os.path.join(idir, "shape_pca_eigs.txt"))
ss_eigs = np.loadtxt(os.path.join(idir, "shape_sym_pca_eigs.txt"))
# -
# Remove the video names in the first column.
vnames = data['vidnames']
print(np.alltrue(vnames == texture_sym_scores.ix[:,0]))
print(np.alltrue(vnames == shape_scores.ix[:,0]))
print(np.alltrue(vnames == shape_sym_scores.ix[:,0]))
# +
texture_scores = texture_scores.ix[:,1:]
texture_sym_scores = texture_sym_scores.ix[:,1:]
shape_scores = shape_scores.ix[:,1:]
shape_sym_scores = shape_sym_scores.ix[:,1:]
# -
pd.DataFrame(data['unfam_df']).head()
t_eigs[:,1].round(2)[:10]
sorted(data.keys())
# +
subj = "sub02"
infile = "/data1/famface01/analysis/encoding/ShapeAnalysis/data/roi_n_more_%s.rda" % subj
r.load(infile)
# Variables
onsets = np.array(r.dat.rx2('basics').rx2('timing').rx2('onset'))
questions = np.array(r['as.character'](r.dat.rx2('basics').rx2('timing').rx2('question')))
runs = np.array(r.dat.rx2('basics').rx2('timing').rx2('run'))
uruns = np.unique(runs)
timing = pandas2ri.ri2py(r.dat.rx2('basics').rx2('timing'))
# -
print(r.names(r.dat.rx2('basics')))
#print(r.dat.rx2('basics').rx2('frame.timing'))
pandas2ri.ri2py_dataframe(r.dat.rx2('basics').rx2('frame.timing')).head()
# ## Measures
#
# 1. Scale
# 2. Pose Scores (4)
# 3. Mouth Scores (2)
#
sorted(data.keys())
# +
def eucdist(mat1, mat2):
return np.mean(np.sqrt(np.sum((mat1 - mat2)**2, axis=1)))
def shape_framewise_diff(frame_dat):
"""Gets the framewise displacement for each frame to the next within a given video"""
n = frame_dat.shape[2]
fds = []
for i in range(n-1):
fd = eucdist(frame_dat[:,:,i], frame_dat[:,:,i+1])
fds.append(fd)
fds = np.array(fds)
return fds
# -
# ### Absolute Mean Measures
#
# We average the values across the time (2s) for each video.
# We loop through the starting indices to average
start_inds0 = np.array(r.seq(0,data['step01_scale'].shape[0]-1,8))
start_inds1 = np.array(r.seq(1,data['step05_frame_shapes'].shape[2]-1,8))
# #### 1: Scale
# +
print data['step01_scale'].shape
r01_scale = []
for si in tqdm(start_inds0):
ret = data['step01_scale'][si:(si+8)].mean()
r01_scale.append(ret)
r01_scale = np.array(r01_scale)
# -
r01_scale[:10]
# #### 2: Pose
# +
print data['step02_pose_scores'].shape
r02_pose_scores = []
for si in tqdm(start_inds1):
ret = data['step02_pose_scores'][si:(si+8)].mean(axis=0)
r02_pose_scores.append(ret)
r02_pose_scores = np.array(r02_pose_scores)
# +
print data['step02_pose_mse'].shape
r02_pose_mse = []
for si in tqdm(start_inds0):
ret = data['step02_pose_mse'][si:(si+8)].mean(axis=0)
r02_pose_mse.append(ret)
r02_pose_mse = np.array(r02_pose_mse)
# -
print r02_pose_scores[:10]
print r02_pose_mse[:10]
# #### 3: Mouth
# +
print data['step04_mouth'].shape
r03_mouth_scores = []
for si in tqdm(start_inds1):
ret = data['step04_mouth'][si:(si+8)].mean(axis=0)
r03_mouth_scores.append(ret)
r03_mouth_scores = np.array(r03_mouth_scores)
# -
# #### 4: Left Over
#
# We take the difference of each frame from the mean frame. This reflects anything that we haven't captured with our pose or mouth removal business.
# +
print data['step05_frame_shapes'].shape
print data['step06_mean_shapes'].shape
r04_mean_fds = []
for i,si in tqdm(enumerate(start_inds1), total=len(start_inds1)):
frame_shapes = data['step05_frame_shapes'][:,:,si:(si+8)]
fds = []
for j in range(8):
ret = eucdist(data['step06_mean_shapes'][i], frame_shapes[:,:,j])
fds.append(ret)
r04_mean_fds.append(np.array(fds).mean())
r04_mean_fds = np.array(r04_mean_fds)
# -
r04_mean_fds[:10]
# #### 5: Face Asymmetry
#
# Compare the non-mirror to the mirror face.
# +
print data['step06_mean_shapes'].shape
r05_asym = []
for i in tqdm(range(data['step06_mean_shapes'].shape[0])):
asym = eucdist(data['step06_mean_shapes'][i], data['step07_mean_sym_shapes'][i])
r05_asym.append(asym)
r05_asym = np.array(r05_asym)
# -
pd.Series(r05_asym).plot()
# #### 6: Mean Sym Face
#
# Compare to reference.
r06_mean_face = []
for i in tqdm(range(data['step06_mean_shapes'].shape[0])):
d = eucdist(data['step07_mean_sym_shapes'][i], data['mirror_ref_shape'])
r06_mean_face.append(d)
r06_mean_face = np.array(r06_mean_face)
pd.Series(r06_mean_face).plot()
# #### 7: PCA Comps
from sklearn.metrics.pairwise import euclidean_distances
r07_pca_texture = euclidean_distances(texture_sym_scores.ix[:,:200], np.zeros((1,200)))[:,0]
r07_pca_shape = euclidean_distances(shape_sym_scores.ix[:,:50], np.zeros((1,50)))[:,0]
# Note that since pca shape is very related to the mean shape, we won't use it (or maybe could use that instead of mean face)
print np.corrcoef(r07_pca_shape, r06_mean_face)
print np.corrcoef(r07_pca_texture, r06_mean_face)
# #### Save Mean Measures
rs = np.hstack((r01_scale.reshape(-1,1), r02_pose_scores, r02_pose_mse.reshape(-1,1), r03_mouth_scores,
r04_mean_fds.reshape(-1,1), r05_asym.reshape(-1,1), r06_mean_face.reshape(-1,1),
r07_pca_texture.reshape(-1,1), r07_pca_shape.reshape(-1,1)))
cols = ["scale"] + ["pose_scores"]*4 + ["pose_mse"] + ["mouth_scores"]*2 + ["mean_fds"] + ["asym"] + ["mean_face"] + ["pca_texture"] + ["pca_shape"]
cols = np.array(cols).flatten()
rs = pd.DataFrame(rs, columns=cols)
rs['vids'] = vnames
odir = "/data1/famface01/command/misc/face_representations/300_task_activity/100_face_deviations_unfam"
rs.to_csv('%s/measures/z_mean_vid_vals.csv' % odir)
rs.ix[:,:].head()
# ### Relative Measures (within vid)
# #### 1: Scale
# +
print data['step01_scale'].shape
d01_scale = []
for si in tqdm(start_inds0):
vdat = data['step01_scale'][si:(si+8)]
ret = np.diff(vdat).mean()
d01_scale.append(ret)
d01_scale = np.array(d01_scale)
# -
d01_scale[:10]
# #### 2: Pose
# +
print data['step02_pose_scores'].shape
d02_pose_scores = []
for si in tqdm(start_inds1):
vdat = data['step02_pose_scores'][si:(si+8)]
ret = np.diff(vdat, axis=0).mean(axis=0)
d02_pose_scores.append(ret)
d02_pose_scores = np.array(d02_pose_scores)
# +
print data['step02_pose_mse'].shape
d02_pose_mse = []
for si in tqdm(start_inds0):
vdat = data['step02_pose_mse'][si:(si+8)]
ret = np.diff(vdat).mean()
d02_pose_mse.append(ret)
d02_pose_mse = np.array(d02_pose_mse)
# -
print r02_pose_scores[:10]
print r02_pose_mse[:10]
# #### 3: Mouth
# +
print data['step04_mouth'].shape
d03_mouth_scores = []
for si in tqdm(start_inds1):
vdat = data['step04_mouth'][si:(si+8)]
ret = np.diff(vdat, axis=0).mean(axis=0)
d03_mouth_scores.append(ret)
d03_mouth_scores = np.array(d03_mouth_scores)
# -
# #### 4: Left Over
# +
print data['step05_frame_shapes'].shape
print data['step06_mean_shapes'].shape
d04_mean_fds = []
for i,si in tqdm(enumerate(start_inds1), total=len(start_inds1)):
frame_shapes = data['step05_frame_shapes'][:,:,si:(si+8)]
fds = shape_framewise_diff(frame_shapes)
d04_mean_fds.append(np.array(fds).mean())
d04_mean_fds = np.array(d04_mean_fds)
# -
# ### Save Relative Measures
ds = np.hstack((d01_scale.reshape(-1,1), d02_pose_scores, d02_pose_mse.reshape(-1,1),
d03_mouth_scores, d04_mean_fds.reshape(-1,1)))
cols = ["rel_scale"] + ["rel_pose_scores"]*4 + ["rel_pose_mse"] + ["rel_mouth_scores"]*2 + ["rel_mean_fds"]
cols = np.array(cols).flatten()
ds = pd.DataFrame(ds, columns=cols)
ds['vids'] = vnames
odir = "/data1/famface01/command/misc/face_representations/300_task_activity/100_face_deviations_unfam"
ds.to_csv('%s/measures/z_mean_rel_vid_vals.csv' % odir)
print r.round(r.cor(rs.ix[:,:-1]), 3)
print r.round(r.cor(ds.ix[:,:-1]), 3)
print r.round(r.cor(rs.ix[:,:-1], ds.ix[:,:-1]), 3) # note: correlation of rel.mean_fds is very high with the original one (only keep rel measure)
rs.ix[:,:-2].head()
| 300_task_activity/100_face_deviations_unfam/00_generate_measures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pytorch-v1.2.0-gpu [conda env:root] *
# language: python
# name: conda-root-pytorch-v1.2.0-gpu
# ---
# +
# # %pip install --upgrade pip --user
# # %pip install zarr --user
# # %pip install tables --user
# # %pip install git+https://github.com/simpeg/simpeg.git@simulation-tdem --user
# # %pip install dask dask_jobqueue --user
# # %pip install git+https://github.com/simpeg-research/casingSimulations.git@simulation --user
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, SymLogNorm
import discretize
from scipy import sparse as sp
from scipy.constants import mu_0
import pandas as pd
from pymatsolver import Pardiso, SolverCG
import os
import json
import dask
import dask_jobqueue
from dask.distributed import Client
import casingSimulations as casing_sim
import torch
# we are in the midst of upgrading the API, so this is
# more closely in-line with the upcoming changes
from SimPEG.electromagnetics import time_domain as tdem
# %matplotlib inline
# -
Solver = Pardiso
solver_opts = {} #{"maxiter": 10}
data_directory = "./experiment1"
df = pd.read_hdf(f"{data_directory}/trial_data.h5", "data")
# +
fig, ax = plt.subplots(1,len(df.keys()), figsize=(20, 4))
for i, key in enumerate(df.keys()):
ax[i].hist(df[key])
ax[i].set_title(f"{key}".replace("_", " "))
plt.tight_layout()
# +
# pick a single model to try training on
trial_index = 5 # a 1200 m long well (relatively short --> fast simulations)
trial_directory = f"{data_directory}/trial_{trial_index}"
# +
# generate the 2D model
with open(f"{trial_directory}/approx_casing.json") as f:
params = json.load(f)
model = casing_sim.model.CasingInHalfspace.deserialize(params, trusted=True)
with open(f"{trial_directory}/simulation_approx_casing.json") as f:
simulation_params = json.load(f)
sim3D = tdem.Problem3D_j.deserialize(simulation_params, trusted=True)
mesh3D = sim3D.mesh
# -
# create a 2D simulation
mesh = discretize.CylMesh([mesh3D.hx, 1, mesh3D.hz], x0=mesh3D.x0)
sim = tdem.Problem3D_j(mesh=mesh, time_steps=sim3D.time_steps, solver=Solver, solver_opts=solver_opts, sigma=model.sigma(mesh))
fig, ax = plt.subplots(1, 1)
plt.colorbar(
mesh.plotImage(
sim.sigma, ax=ax, pcolorOpts={"norm":LogNorm()}, mirror=True
)[0], ax=ax
)
ax.set_xlim([-1, 1])
ax.set_ylim([-2000, 10])
def getRHS(sim, src):
# full source term
# rhs = -1./dt * (s_e - s_en1) + C * MeMuI * s_m
# we are setting s_e to zero
rhs = sim.mesh.edgeCurl * (sim.MeMuI * src)
if sim._makeASymmetric:
return sim.MfRho.T * rhs
return rhs
def getRHS_deriv(sim, v=None, adjoint=False):
# full source term
# rhs = -1./dt * (s_e - s_en1) + C * MeMuI * s_m
# we are setting s_e to zero
mesh = sim.mesh
if adjoint:
if sim._makeASymmetric:
if v is not None:
rhs = sim.MfRho * v
else:
rhs = sim.MfRho
else:
rhs = v if v is not None else sp.eye(mesh.nF)
return sim.MeMuI.T * (mesh.edgeCurl.T * rhs)
if v is not None:
rhs = sim.mesh.edgeCurl * (sim.MeMuI * v)
else:
rhs = sim.mesh.edgeCurl * sim.MeMuI
if sim._makeASymmetric:
return sim.MfRho.T * rhs
return rhs
# solve the forward problem
def fields(sim, source):
f = np.zeros((sim.mesh.nF, sim.nT+1))
# timestep to solve forward
Ainv = None
for tInd, dt in enumerate(sim.timeSteps):
# keep factors if dt is the same as previous step b/c A will be the
# same
if Ainv is not None and (
tInd > 0 and abs(dt-sim.timeSteps[tInd - 1]) >
sim.dt_threshold
):
Ainv.clean()
Ainv = None
if Ainv is None:
A = sim.getAdiag(tInd)
Ainv = Pardiso(A)
rhs = getRHS(sim, source[:, tInd+1]) # this is on the nodes of the time mesh
Asubdiag = sim.getAsubdiag(tInd)
# taking a step
sol = Ainv * (rhs - Asubdiag * f[:, tInd])
f[:, tInd+1] = sol
# clean factors and return
Ainv.clean()
return f
def fields_deriv(sim, v=None, adjoint=False):
if adjoint:
return fields_deriv_adjoint(sim, v=v)
df_dm_v = np.zeros((sim.mesh.nF, sim.nT+1))
# timestep to solve forward
Ainv = None
for tInd, dt in enumerate(sim.timeSteps):
# keep factors if dt is the same as previous step b/c A will be the
# same
if Ainv is not None and (
tInd > 0 and abs(dt-sim.timeSteps[tInd - 1]) > sim.dt_threshold
):
Ainv.clean()
Ainv = None
if Ainv is None:
A = sim.getAdiag(tInd)
Ainv = Pardiso(A)
rhs_deriv = getRHS_deriv(sim, v[:, tInd+1]) # this is on the nodes of the time mesh
Asubdiag = sim.getAsubdiag(tInd)
# taking a step
sol = Ainv * (rhs_deriv - Asubdiag * df_dm_v[:, tInd])
df_dm_v[:, tInd+1] = sol
# clean factors and return
Ainv.clean()
return df_dm_v
# +
def fields_deriv_adjoint(sim, v=None):
df_dmT_v = np.zeros((sim.mesh.nE, sim.nT+1)) # the source is defined on edges
# timestep to solve forward
ATinv = None
for tInd in reversed(range(sim.nT)):
dt = sim.time_steps[tInd]
# keep factors if dt is the same as previous step b/c A will be the
# same
if ATinv is not None and (
tInd <= sim.nT and abs(dt-sim.timeSteps[tInd + 1]) > sim.dt_threshold
):
ATinv.clean()
ATinv = None
if ATinv is None:
AT = sim.getAdiag(tInd).T
ATinv = Pardiso(AT)
# ATinv_v = ATinv * v[:, tInd+1]
if tInd < sim.nT - 1:
AsubdiagT = sim.getAsubdiag(tInd+1).T
sol = ATinv * (v[:, tInd+1] - AsubdiagT * sol)
else:
sol = ATinv * v[:, tInd+1]
rhs_deriv = getRHS_deriv(sim, sol, adjoint=True) # this is on the nodes of the time mesh
df_dmT_v[:, tInd+1] = rhs_deriv
# clean factors and return
ATinv.clean()
return df_dmT_v
# -
def create_source(sim, model, s, trial_directory):
# interpolate on to the spatial mesh (lets use exact time for now)
z_source = np.load(f"{trial_directory}/z_currents.npy")
mesh = sim.mesh
src = np.zeros((mesh.nEy, sim.nT+1))
csx = mesh.hx.min()
xinds = (mesh.gridEy[:, 0] < model.casing_b + csx/2) & (mesh.gridEy[:, 0] > model.casing_b - csx/2)
zinds = (mesh.gridEy[:, 2] >= model.casing_z.min()) & (mesh.gridEy[:, 2] <= model.casing_z.max())
src_inds_bool = xinds & zinds
src_inds = np.where(src_inds_bool)[0]
P = discretize.utils.interpmat(mesh.gridEy[src_inds, 2], z_source)
src[src_inds, :] = P * s
def grad(dy, adjoint=True):
if adjoint:
return P.T * dy[src_inds, :]
grd = np.zeros((mesh.nEy, sim.nT+1))
grd[src_inds, :] = P * dy
return grd
return src, grad
def load_trial(trial_directory):
# load up the data
with open(f"{trial_directory}/approx_casing.json") as f:
params = json.load(f)
model = casing_sim.model.CasingInHalfspace.deserialize(params, trusted=True)
with open(f"{trial_directory}/simulation_approx_casing.json") as f:
simulation_params = json.load(f)
sim3D = tdem.Problem3D_j.deserialize(simulation_params, trusted=True)
mesh3D = sim3D.mesh
mesh = discretize.CylMesh([mesh3D.hx, 1, mesh3D.hz], x0=mesh3D.x0)
sim = tdem.Problem3D_j(mesh=mesh, time_steps=sim3D.time_steps, solver=Solver, solver_opts=solver_opts, sigma=model.sigma(mesh))
return model, mesh, sim
def get_j_interpolation_mat(
trial_directory, mesh,
log10tmin=-6, log10tmax=-2, ntimes=128 # hard-coded from "pipeline_synthetic_data"
):
xsample = np.load(f"{trial_directory}/x.npy")
zsample = np.load(f"{trial_directory}/z.npy")
xz_grid = discretize.utils.ndgrid(xsample, np.r_[0], zsample)
Px = mesh.getInterpolationMat(xz_grid, 'Fx')
Pz = mesh.getInterpolationMat(xz_grid, 'Fz')
tsample = np.logspace(log10tmin, log10tmax, ntimes)
Pt = sim.time_mesh.getInterpolationMat(tsample, 'N')
# construct full P
Pxt = sp.kron(Pt, Px)
Pzt = sp.kron(Pt, Pz)
P = sp.vstack([Pxt, Pzt])
return P
def run_forward(trial_ind, source_vec):
trial_directory = f"{data_directory}/trial_{trial_ind}"
model, mesh, sim = load_trial(trial_directory)
source, source_grad = create_source(sim, model, source_vec, trial_directory)
f = fields(sim, source)
P = get_j_interpolation_mat(trial_directory, mesh)
j_compare = P * discretize.utils.mkvc(f)
def grad(dy, adjoint=True):
if adjoint:
v = P.T * dy
v = v.reshape(mesh.nF, sim.nT+1, order="F")
f_deriv = fields_deriv_adjoint(sim, v)
return source_grad(f_deriv, adjoint=True)
f_deriv = fields_deriv(sim, source_grad(dy, adjoint=False))
return P * discretize.utils.mkvc(f_deriv)
return j_compare, grad
# # set up a simple test example
# +
def waveform(t, t_peak=5e-3, width=10, amplitude=1):
t = np.log10(t)
t_peak = np.log10(t_peak)
width = np.log10(width)
return amplitude * np.exp(-(t - t_peak)**2/(2*width**2))
def sigmoid(x, x0=0, slope=1):
return np.arctan(slope * (x-x0))/np.pi + 0.5
def depth_distribution(z, dz=200, slope=1e-1):
return sigmoid(z, model.casing_z.min() + dz, slope) * sigmoid(-z, -(model.casing_z.max() - dz), slope)
def source_sm(mesh, t, z):
sm = np.zeros(mesh.nE)
sm = np.outer(depth_distribution(z), waveform(t))
return sm
# -
z = np.load(f"{trial_directory}/z_currents.npy")
src_vec = source_sm(mesh, sim.times, z)
fig, ax = plt.subplots(1, 1)
plt.colorbar(ax.pcolormesh(sim.times, z, src_vec), ax=ax)
ax.set_xscale("log")
ax.set_xlim(1e-6, sim.times.max())
ax.set_xlabel("time (s)")
ax.set_ylabel("z")
# +
def test_source(source):
source = source.reshape(128, 191, order="F")
src, grad = create_source(sim, model, source, trial_directory)
def src_deriv(dy, adjoint=False):
if not adjoint:
dy = dy.reshape(128, 191, order="F")
else:
dy = dy.reshape(mesh.nE, 191, order="F")
return discretize.utils.mkvc(grad(dy, adjoint))
return discretize.utils.mkvc(src), src_deriv
x0 = discretize.utils.mkvc(src_vec)
discretize.Tests.checkDerivative(
test_source,
x0=x0,
num=4,
plotIt=False,
)
# +
# adjoint test
src_vec = discretize.utils.mkvc(src_vec.reshape(128, 191, order="F"))
src, src_deriv = test_source(src_vec)
v = np.random.rand(128*191)
w = np.random.rand(mesh.nE*191)
a = w.T.dot(discretize.utils.mkvc(src_deriv(v.reshape(128, 191, order="F"), adjoint=False)))
b = v.T.dot(discretize.utils.mkvc(src_deriv(w, adjoint=True)))
print(f"{np.linalg.norm(a):1.3e}, {np.linalg.norm(b):1.3e}, {np.linalg.norm(a-b):1.3e}")
# +
def test_rhs(source):
source = source.reshape(128, 191, order="F")
src, grad_src = create_source(sim, model, source, trial_directory)
rhs = getRHS(sim, src)
def src_deriv(dy, adjoint=False):
if not adjoint:
dy = dy.reshape(128, 191, order="F")
return discretize.utils.mkvc(getRHS_deriv(sim, grad_src(dy, adjoint), adjoint))
else:
dy = dy.reshape(mesh.nF, 191, order="F")
return grad_src(getRHS_deriv(sim, dy, adjoint), adjoint)
return discretize.utils.mkvc(rhs), src_deriv
x0 = discretize.utils.mkvc(src_vec)
discretize.Tests.checkDerivative(
test_rhs,
x0=x0,
# dx=1e-3*x0,
num=4,
plotIt=False,
expectedOrder=1,
)
# +
# adjoint test
src_vec = discretize.utils.mkvc(src_vec.reshape(128, 191, order="F"))
rhs, rhs_deriv = test_rhs(src_vec)
v = np.random.rand(128*191)
w = np.random.rand(mesh.nF*191)
a = w.T.dot(discretize.utils.mkvc(rhs_deriv(v.reshape(128, 191, order="F"), adjoint=False)))
b = v.T.dot(discretize.utils.mkvc(rhs_deriv(w, adjoint=True)))
print(f"{np.linalg.norm(a):1.3e}, {np.linalg.norm(b):1.3e}, {np.linalg.norm(a-b):1.3e}")
# +
src_sm, _ = create_source(sim, model, src_vec.reshape(128, 191, order="F"), trial_directory)
def test_forward(src_sm):
src_sm = src_sm.reshape(mesh.nEy, sim.nT+1, order="F")
j = fields(sim, src_sm)
def j_deriv(v, adjoint=False):
if not adjoint:
v = v.reshape(mesh.nEy, sim.nT+1, order="F")
return discretize.utils.mkvc(fields_deriv(sim, v, adjoint))
else:
v = v.reshape(mesh.nF, sim.nT+1, order="F")
return fields_deriv(sim, v, adjoint)
return discretize.utils.mkvc(j), j_deriv
x0 = discretize.utils.mkvc(src_sm)
discretize.Tests.checkDerivative(
test_forward,
x0=x0,
num=4,
plotIt=False,
expectedOrder=1,
)
# +
# adjoint test
j, j_deriv = test_forward(src_sm)
v = np.random.rand(np.prod(src_sm.shape))
w = np.random.rand(np.prod(j.shape))
a = w.T.dot(discretize.utils.mkvc(j_deriv(v, adjoint=False)))
b = v.T.dot(discretize.utils.mkvc(j_deriv(w, adjoint=True)))
print(f"{np.linalg.norm(a):1.3e}, {np.linalg.norm(b):1.3e}, {np.linalg.norm(a-b):1.3e}")
# +
def test_forward_full(src_vec):
src_vec = src_vec.reshape(128, 191, order="F")
j, j_deriv = run_forward(5, src_vec)
def grad(v):
v = v.reshape(128, 191, order="F")
return discretize.utils.mkvc(j_deriv(v, adjoint=False))
return discretize.utils.mkvc(j), grad
x0 = discretize.utils.mkvc(src_vec)
discretize.Tests.checkDerivative(
test_forward_full,
x0=x0,
num=5,
plotIt=False,
expectedOrder=1,
)
# +
# adjoint test
src_vec = src_vec.reshape(128, 191, order="F")
j, j_deriv = run_forward(5, src_vec)
v = np.random.rand(128*191)
w = np.random.rand(np.prod(j.shape))
a = w.T.dot(discretize.utils.mkvc(j_deriv(v.reshape(128, 191, order="F"), adjoint=False)))
b = v.T.dot(discretize.utils.mkvc(j_deriv(w, adjoint=True)))
err = a-b
if np.linalg.norm(err)/np.linalg.norm(a) < 1e-10:
passing = True
else:
passing = False
print(
f"{np.linalg.norm(a):1.3e}, "
f"{np.linalg.norm(b):1.3e}, "
f"{np.linalg.norm(err):1.3e}, "
f"{'passing :)' if passing is True else 'failing :('}"
)
# +
def test_sanity_check(src_vec):
# src_vec = src_vec.reshape(128, 2, order="F")
A = np.random.randn(128, 128)
bigA = sp.block_diag([A, A])
j = bigA.dot(src_vec)
def grad(v):
# v = v.reshape(128, 2, order="F")
return discretize.utils.mkvc(bigA.dot(v))
return discretize.utils.mkvc(j), grad
x0 = discretize.utils.mkvc(src_vec[:, 0:2])
discretize.Tests.checkDerivative(
test_sanity_check,
x0=x0,
num=10,
plotIt=False,
expectedOrder=1,
)
# -
src_sm, _ = create_source(sim, model, src_vec, trial_directory)
src_sm = src_sm.reshape(mesh.nEy, sim.nT+1, order="F")
j = fields(sim, src_sm)
# +
tind = 30
fig, ax = plt.subplots(1, 1)
out = mesh.plotImage(
mesh.aveF2CCV * j[:, tind],
view="vec",
vType="CCv",
ax=ax, mirror=True,
range_x=np.r_[-1000, 1000],
range_y=np.r_[-1500, 100],
sample_grid = np.r_[5., 5.],
pcolorOpts={"norm":LogNorm()},
clim = np.r_[1e-10, 1e-2],
stream_threshold = 1e-10
)
ax.set_aspect(1)
plt.colorbar(out[0])
ax.set_title(f"current density, t={sim.times[tind]*1e3:1.1e}ms")
# +
tind = 10
fig, ax = plt.subplots(1, 1)
out = mesh.plotImage(
mesh.aveE2CC * src_sm[:, tind],
# view="vec",
# vType="CCv",
ax=ax, mirror=True,
range_x=0.15*np.r_[-1, 1],
range_y=np.r_[-1210, -1190], #10*np.r_[-1, 1],
# sample_grid = np.r_[5., 5.],
pcolorOpts={"norm":LogNorm()},
clim = np.r_[1e-13, 1e-2],
# stream_threshold = 1e-13
)
mesh.plotGrid(ax=ax)
# ax.set_aspect(1)
plt.colorbar(out[0])
ax.set_title(f"source term, t={sim.times[tind]*1e3:1.1e}ms")
# -
# # Set up ML pipeline
dtype = torch.float64
device = torch.device("cpu")
nspatial = 128
ntimes = 191
nsrcz = 128
# +
class ForwardSimulation(torch.autograd.Function):
@staticmethod
def forward(ctx, source_vec): #, trial_ind):
# trial_ind = tri
trial_directory = f"{data_directory}/trial_{trial_ind}"
# load up objects
model, mesh, sim = load_trial(trial_directory)
ctx.model = model
ctx.mesh = mesh
ctx.sim = sim
# create source
source, source_grad = create_source(sim, model, source_vec.data.numpy(), trial_directory)
rhs = getRHS(sim, source)
ctx.source_grad = source_grad
# compute fields
f = fields(sim, source)
if getattr(ctx, 'P', None) is None:
P = get_j_interpolation_mat(trial_directory, mesh)
ctx.P = P
# project data
j_compare = (P * discretize.utils.mkvc(f))
if dtype == torch.float32:
return torch.from_numpy(j_compare).float()
return torch.from_numpy(j_compare).double()
@staticmethod
def backward(ctx, dy):
P = ctx.P
v = P.T * dy.data.numpy()
v = v.reshape(ctx.mesh.nF, ctx.sim.nT+1, order="F")
f_deriv = fields_deriv_adjoint(ctx.sim, v)
grad = ctx.source_grad(f_deriv, adjoint=True)
if dtype == torch.float32:
return torch.from_numpy(grad).float()
return torch.from_numpy(grad).double()
# +
# class CasingData(torch.utils.data.Dataset):
# def __init__(self, directory, trial_indices):
# self.directory = directory
# self.trial_indices = trial_indices
# def __len__(self):
# return len(self.trial_indices)
# def __getitem__(self, idx):
# if torch.is_tensor(idx):
# idx = idx.tolist()
# +
# source, source_deriv = create_source(sim, model, src_vec, trial_directory)
# rhs = getRHS(sim, source)
trial_ind = 10
trials = [trial_ind]
jd_numpy = np.load(f"{trial_directory}/j_difference.npy")
# -
plt.hist(np.log10(np.abs(jd_numpy)), 20);
floor = 1e-12
print((np.abs(jd_numpy)>floor).sum() / len(jd_numpy))
# +
jd = torch.from_numpy(jd_numpy)
std = 0.02
w = torch.from_numpy(1./(std * np.abs(jd_numpy) + floor))
forward = ForwardSimulation.apply
if dtype == torch.float64:
jd = jd.double()
w = w.double()
else:
jd = jd.float()
w = w.float()
s0_scaling = 1
learning_rate = 1
# optimizer = torch.optim.SGD(s0, lr=learning_rate)
# -
plt.hist(1./(std * np.abs(jd_numpy) + floor))
def convert_to_torch_sparse(mat):
mat = mat.tocoo()
values = mat.data
indices = np.vstack((mat.row, mat.col))
# create pytorch sparse matrix
i = torch.LongTensor(indices)
if dtype == torch.float32:
v = torch.FloatTensor(values)
else:
v = torch.DoubleTensor(values)
shape = mat.shape
if dtype == torch.float32:
return torch.sparse.FloatTensor(i, v, torch.Size(shape))
return torch.sparse.DoubleTensor(i, v, torch.Size(shape))
# +
Dtime = discretize.utils.sdiag(1./sim.time_mesh.hx) * discretize.utils.ddx(sim.nT)
Dtime_torch = convert_to_torch_sparse(Dtime)
z_currents = np.load(f"{trial_directory}/z_currents.npy")
Dz = discretize.utils.sdiag(1./np.diff(z_currents)) * discretize.utils.ddx(len(z_currents)-1)
Dz_torch = convert_to_torch_sparse(Dz)
# +
# # %%time
max_iter = 1
beta = None
beta_factor = 0 #100
# beta_cooling = 0.5
alpha_s = 1e-4
alpha_t = sim.time_mesh.hx.min()**2
alpha_z = 1
s0 = torch.zeros(nspatial, ntimes, dtype=dtype, device=device, requires_grad=True)
if beta is None:
s0_tmp = torch.randn(nspatial, ntimes, dtype=dtype, device=device, requires_grad=True)
j_pred = forward(s0_scaling * s0_tmp)
# dmisfit = 1/len(jd) * (w*(j_pred - jd)).pow(2).sum()
dmisfit = ((j_pred - jd)).pow(2).sum()
regularization = (
alpha_s * s0_tmp.pow(2).sum() +
alpha_t * Dtime_torch.mm(s0_tmp.T).pow(2).sum() +
alpha_z * Dz_torch.mm(s0_tmp).pow(2).sum()
)
beta = beta_factor * dmisfit.item() / regularization.item()
for i in range(max_iter):
s_iter = s0_scaling * s0
j_pred = forward(s_iter)
# dmisfit = 1/len(jd) * (w*(j_pred - jd)).pow(2).sum()
dmisfit = ((j_pred - jd)).pow(2).sum()
smallness = alpha_s * s0.pow(2).sum()
smooth_time = alpha_t * Dtime_torch.mm(s0.T).pow(2).sum()
smooth_depth = alpha_z * Dz_torch.mm(s0).pow(2).sum()
regularization = (
smallness +
smooth_time +
smooth_depth
)
loss = dmisfit + beta * regularization
print(
f"iter {i}, "
f"dmisfit: {dmisfit.item():1.4e}, "
f"reg: {regularization.item():1.4e}, "
f"beta * reg: {beta * regularization.item():1.4e}, "
f"loss: {loss.item():1.4e}"
)
print(
f" small: {smallness.item():1.4e}, "
f"smooth time: {smooth_time.item():1.4e}, "
f"smooth depth: {smooth_depth.item():1.4e}\n"
)
# optimizer.zero_grad()
loss.backward()
# optimizer.step()
with torch.no_grad():
s0 -= learning_rate * s0.grad
s0.grad.zero_()
# beta = beta_cooling * beta
# +
fig, ax = plt.subplots(1, 1)
z = np.load(f"{trial_directory}/z_currents.npy")
plotme = s0_scaling * s0.data.numpy()
clim = np.r_[1e-4, 1] * np.max(np.abs(plotme))
norm = Normalize(
clim[0] if clim is not None else
np.max([1e-20, np.min(np.absolute(plotme))]),
vmin = -clim[1], vmax=clim[1]
)
plt.colorbar(ax.pcolormesh(sim.times, z, plotme, cmap="BrBG_r", norm=norm), ax=ax)
ax.set_xscale("log")
ax.set_xlim(1e-7, sim.times.max())
ax.set_xlabel("time (s)")
ax.set_ylabel("z")
# -
# load up objects
model, mesh, sim = load_trial(trial_directory)
src, _ = create_source(sim, model, s0.data.numpy(), trial_directory)
# +
tind = 10
plotme = mesh.aveE2CC * src[:, tind]
clim = np.r_[1e-4, 1] * np.max(np.abs(plotme))
norm = SymLogNorm(
clim[0] if clim is not None else
np.max([1e-20, np.min(np.absolute(plotme))]),
vmin = -clim[1], vmax=clim[1]
)
fig, ax = plt.subplots(1, 1)
plt.colorbar(mesh.plotImage(
plotme,
mirror=True,
mirror_data=-1*plotme,
pcolorOpts={"norm": norm, "cmap": "BrBG_r"},
ax=ax
)[0], ax=ax)
ax.set_xlim(0.25*np.r_[-1, 1])
ax.set_ylim(np.r_[-2000, 50])
# +
x = np.load(f"{trial_directory}/x.npy")
z = np.load(f"{trial_directory}/z.npy")
t = np.logspace(-6, -2, 128)
nj = len(x) * len(z)
# +
tind = 90
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
j_plot = j_pred.data.numpy()
jx = j_plot[:nj*len(t)].reshape(len(x)*len(z), len(t), order="F")[:, tind]
jz = j_plot[nj*len(t):].reshape(len(x)*len(z), len(t), order="F")[:, tind]
jx = jx.reshape(len(x), len(z), order="F")
jz = jz.reshape(len(x), len(z), order="F")
j_amplitude = np.sqrt(jx**2 + jz**2)
jtrue_x = jd_numpy[:nj*len(t)].reshape(len(x)*len(z), len(t), order="F")[:, tind]
jtrue_z = jd_numpy[nj*len(t):].reshape(len(x)*len(z), len(t), order="F")[:, tind]
jtrue_x = jtrue_x.reshape(len(x), len(z), order="F")
jtrue_z = jtrue_z.reshape(len(x), len(z), order="F")
jtrue_amplitude = np.sqrt(jtrue_x**2 + jtrue_z**2)
# plot the true
plt.colorbar(ax[0].pcolormesh(x, z, np.log10(jtrue_amplitude).T), ax=ax[0])
ax[0].streamplot(x, z, jtrue_x.T, jtrue_z.T, color="k")
# # plot the estimated
# plt.colorbar(ax[1].pcolormesh(x, z, np.log10(j_amplitude).T), ax=ax[1])
# ax[1].streamplot(x, z, jx.T, jz.T, color="k")
# plot the difference
# +
fig, ax = plt.subplots(1, 1)
out = mesh.plotImage(
mesh.aveF2CCV * j[:, tind],
view="vec",
vType="CCv",
ax=ax, mirror=True,
range_x=np.r_[-1000, 1000],
range_y=np.r_[-1500, 100],
sample_grid = np.r_[5., 5.],
pcolorOpts={"norm":LogNorm()},
clim = np.r_[1e-10, 1e-2],
stream_threshold = 1e-10
)
ax.set_aspect(1)
plt.colorbar(out[0])
ax.set_title(f"current density, t={sim.times[tind]*1e3:1.1e}ms")
# -
class CasingNet(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolutional component (for currents in casing)
self.conv1 =
# dense component (for parameters of casing model)
# combine the two
def forward(self, x):
pass
| notebooks/pytorch_pipeline.ipynb |