code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# title: "Scatter Plot"
# author: "Sanjay"
# date: 2020-09-05
# description: "-"
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: kagglevil
# language: python
# name: kagglevil
# ---
# Importing Matplotlib library
from matplotlib import pyplot as plt
# x-axis values
x = [15, 20, 39, 44]
# y-axis values
y = [10, 35, 58, 64]
# Function to plot x and y values
plt.scatter(x,y)
# Function to show the plot
plt.show()
| docs/python/matplotlib/Scatter-Plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="7f4vpK1iy8lv" colab_type="text"
# # Question Generator command line example
# + [markdown] id="wk_40UVVr-8s" colab_type="text"
# First we need to install HuggingFace's transformers library.
# + id="vwUQSv8xtow5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 588} executionInfo={"status": "ok", "timestamp": 1596018453705, "user_tz": -540, "elapsed": 8858, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08195438125971362273"}} outputId="6dcae178-e9ed-425c-e91b-f7c24e1c4480"
# !pip install transformers
# + [markdown] id="TgpWgCxvzMgV" colab_type="text"
# Next we're going to clone the github repo.
# + id="JrmBJenTxjmF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} executionInfo={"status": "ok", "timestamp": 1596018456450, "user_tz": -540, "elapsed": 11590, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08195438125971362273"}} outputId="61dae871-99f9-446d-dca0-2a1d6252760f"
# !git clone https://github.com/amontgomerie/question_generator
# %cd question_generator/
# + [markdown] id="FErCFnQPI_6g" colab_type="text"
# Make sure that we're using the GPU:
# + id="uObsDWn5I917" colab_type="code" colab={}
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
assert device == torch.device('cuda'), "Not using CUDA. Set: Runtime > Change runtime type > Hardware Accelerator: GPU"
# + [markdown] id="BSnZw4uMyf2G" colab_type="text"
# Finally we can run question generation script using the following arguments:
# ```
# --text_dir # the directory of the artice to use as context
# --num_questions # the number of questions to be generated (default 10)
# --answer_style # can be 'all', 'sentences', or 'multiple_choice' (default 'all')
# --show_answers # set to 'False' to hide answers (default 'True')
# --use_qa_eval # use to turn off the question filtering 'True'/'False' (default True)
# ```
#
# + id="qM2OZ7v0s6nU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 910} executionInfo={"status": "ok", "timestamp": 1596018570183, "user_tz": -540, "elapsed": 125308, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08195438125971362273"}} outputId="43892161-0891-4998-d0a4-8e3a3e18f57f"
# !python 'run_qg.py' --text_dir 'articles/twitter_hack.txt'
| examples/qg_commandline_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env
# language: python
# name: env
# ---
# +
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from os import listdir
from os.path import isfile, join
import numpy as np
import soundfile as sf
from scipy import io
import scipy.signal as sp
from src.features import gtgram
import simpleaudio as sa
import matplotlib as mpl
import matplotlib.pyplot as plt
import src.features.filters as filters
import src.features.helpers_vis as hp_vis
import src.features.helpers as hp
import src.data.generateData as generate_data
ROOT = Path('.').resolve().parents[0]
# set the path to the sound files
SOUND_FILES = ROOT / 'data/raw/sound_samples/'
# create a list of the sound files
SOUND_FILES = list(SOUND_FILES.glob('**/*.wav'))
# +
def create_spectrum(freq_bands=24, snr=0.2, normalize=False, azimuth=12, time_window=0.1, max_freq=20000):
str_r = 'data/processed_' + str(max_freq) + 'Hz/binaural_right_0_gammatone_' + str(time_window) + '_window_' + str(int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm_no_hrtf.npy'
str_l = 'data/processed_' + str(max_freq) + 'Hz/binaural_left_0_gammatone_' + str(time_window) + '_window_' + str(int(snr * 100)) + '_srn_' + str(freq_bands) + '_channels_' + str((azimuth - 12) * 10) + '_azi_' + str(normalize) + '_norm_no_hrtf.npy'
path_data_r = ROOT / str_r
path_data_l = ROOT / str_l
print(path_data_r.as_posix())
# check if we can load the data from a file
if path_data_r.is_file() and path_data_l.is_file():
print('Data set found. Loading from file : ' + str_r)
print(path_data_l)
return np.load(path_data_r), np.load(path_data_l)
else:
print('Creating data set : ' + str_l)
# use always all elevations -> 50
psd_all_i = np.zeros((len(SOUND_FILES), 25, freq_bands))
psd_all_c = np.zeros((len(SOUND_FILES), 25, freq_bands))
for i in range(0,psd_all_i.shape[0]):
print("Creating dataset for sound: " + SOUND_FILES[i].name)
for i_elevs in range(psd_all_i.shape[1]):
# load a sound sample
signal = sf.read(SOUND_FILES[i].as_posix())[0]
# filter the signal
signal_elevs = sp.filtfilt([1, 0], 1, signal)
# add noise to the signal
signal_elevs = (1 - snr) * signal_elevs + snr * np.random.random(signal_elevs.shape[0]) * signal.max()
##### Sound Playback #####
# signal_play = signal_elevs * (2**15 - 1) / np.max(np.abs(signal_elevs))
# signal_play = signal_play.astype(np.int16)
#
# # Start playback
# play_obj = sa.play_buffer(signal_play, 1, 2, 44100)
#
# # Wait for playback to finish before exiting
# play_obj.wait_done()
# filter the signal
signal_elevs_c = sp.filtfilt([1, 0], 1, signal)
# add noise to the signal
signal_elevs_c = (1 - snr) * signal_elevs_c + snr * np.random.random(signal_elevs_c.shape[0]) * signal.max()
# Default gammatone-based spectrogram parameters
time_window = 0.1
twin = time_window
thop = twin / 2
fmin = 100
fs = 44100
###### Apply Gammatone Filter Bank ##############
# ipsi side
y = gtgram.gtgram(signal_elevs, fs, twin,
thop, freq_bands, fmin, max_freq)
y = np.mean(y, axis=1)
y = (20 * np.log10(y + np.finfo(np.float32).eps))
psd_all_i[i, i_elevs, :] = y
# contralateral side
y = gtgram.gtgram(signal_elevs_c, fs,
twin, thop, freq_bands, fmin, max_freq)
y = np.mean(y, axis=1)
y = (20 * np.log10(y + np.finfo(np.float32).eps))
psd_all_c[i, i_elevs, :] = y
#################################################
np.save(path_data_r.absolute(), psd_all_c)
np.save(path_data_l.absolute(), psd_all_i)
return psd_all_c, psd_all_i
# +
########################################################################
######################## Set parameters ################################
########################################################################
normalize = False # paramter is not considered
time_window = 0.1 # time window for spectrogram in sec
# Parameter to test
snr = 0.2 # Signal to noise ratio
freq_bands = 128 # Frequency bands in resulting data
azimuth = 12 # which azimuths to create
max_freq = 20000 # define max frequency for gammatone filter bank
save_figs=True
save_type='svg'
model_name='elevation_spectra_maps'
exp_name='figures_paper'
elevations=25
clean=True
participant_number = 9
logger = logging.getLogger(__name__)
logger.info('Plotting elevation spectra map for different sounds')
elevations = np.arange(0, elevations, 1)
# make sure save type is given
if not save_type or len(save_type) == 0:
save_type = 'svg'
exp_name_str = hp.create_exp_name([exp_name, time_window, int(snr * 100), freq_bands, max_freq,
participant_number, (azimuth - 12) * 10, normalize, len(elevations)])
exp_path = ROOT / 'models' / model_name
exp_file = exp_path / exp_name_str
########################################################################
########################################################################
# create the spectrum data
spec_c, spec_i = create_spectrum(freq_bands, snr, normalize, azimuth, time_window, max_freq=max_freq)
# create the filtered HRTF data
psd_all_c, psd_all_i = generate_data.create_data(freq_bands, participant_number, snr, normalize, azimuth, time_window, max_freq=max_freq)
vmin= -80
vmax= -40
fig_size = (7, 12)
# fig_size = (20, 14)
formatter = hp_vis.ERBFormatter(20, max_freq, unit='', places=0)
for i_sound, sound in enumerate(SOUND_FILES):
sound = sound.name.split('.')[0]
# IPSI
fig = plt.figure(figsize=fig_size)
ax = fig.add_subplot(2, 1, 1)
ax.set_title(sound)
data = np.squeeze(spec_i[i_sound])
c = ax.pcolormesh(np.linspace(0, 1, data.shape[1]), np.linspace(-45, 90, data.shape[0]),
data, shading='gouraud', linewidth=0, rasterized=True,vmin=vmin, vmax=vmax)
plt.colorbar(c)
ax.xaxis.set_major_formatter(formatter)
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Elevations [deg]')
# ax.set_yticklabels(t[1:-1])
ax = fig.add_subplot(2, 1, 2)
ax.set_title(sound)
data = np.squeeze(psd_all_c[i_sound])
c = ax.pcolormesh(np.linspace(0, 1, data.shape[1]), np.linspace(-45, 90, data.shape[0]),
data, shading='gouraud', linewidth=0, rasterized=True,vmin=vmin, vmax=vmax)
plt.colorbar(c)
ax.xaxis.set_major_formatter(formatter)
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('Elevations [deg]')
if save_figs:
fig_save_path = ROOT / 'reports' / 'figures' / exp_name_str / model_name / ('participant_' + str(participant_number))
if not fig_save_path.exists():
fig_save_path.mkdir(parents=True, exist_ok=True)
path_final = (fig_save_path / (model_name + '_' + exp_name + '_raw_maps_ipsi_' + str(sound) + '.' + save_type)).as_posix()
plt.savefig(path_final, dpi=300, transparent=True)
print('Writing File :' + path_final)
plt.close()
else:
plt.show()
# -
| notebooks/Display sound sample spectrum (no HRTF filtering).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.2 64-bit
# language: python
# name: python3
# ---
# +
from array import array
import reprlib
import math
class Vector:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
component = reprlib.repr(self._components)
component = component[component.find('['):-1]
return f'Vector({component})'
def __str__(self):
return str(tuple(self))
def __byte__(self):
return (bytes([ord(self.typecode)]) + bytes(self._components))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __abs__(self):
return math.hypot(*self)
def __bool__(self):
return bool(abs(self))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
def __len__(self):
return len(self._components)
def __getitem__(self, index):
return self._components[index]
# -
Vector([3, 4])
len(Vector([2, 3, 4, 5]))
Vector.__abs__([3,4])
# ## Vector Take #2: A Sliceable Sequence
v7 = Vector(range(8))
v7[1:3]
| chapter 12- Special Methods for Sequences/example 12-1-Vector Take #1: Vector2d Compatible.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Sample design: simple random sample
import pandas as pd
from pathlib import Path
import numpy as np
import matplotlib as matplotlib
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
import os
import datetime as dt
from shapely import wkt
from shapely.geometry import Point, Polygon, MultiPoint
import geopandas as gpd
import xarray as xr
# +
plt.rcParams.update({'font.size': 18})
SMALL_SIZE = 10
MEDIUM_SIZE = 14
BIGGER_SIZE = 20
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# -
np.random.seed(0)
repo_path = Path('/Users/etriesch/dev/ocean-carbon-sampling/')
data_clean_path = repo_path / 'data/clean/'
data_raw_path = repo_path / 'data/raw/'
geo_crs = 'epsg:4326'
proj_crs = '+proj=cea'
# load coastlines (saved locally)
boundary_fp = data_raw_path / 'stanford-vg541kt0643-shapefile.zip'
boundary = gpd.read_file(boundary_fp).to_crs(geo_crs)
# Monterrey desal mask
ca_cent = [-121.788649, 36.802834]
ca_lats = [33.48, 39.48]
ca_lons = [-125.48, -119.48]
# Texas desal mask
tx_cent = [-95.311296, 28.927239]
tx_lats = [25.57, 31.57]
tx_lons = [-98.21, -92.21]
# NH desal mask
nh_cent = [-70.799678, 42.563588]
nh_lats = [39.38, 45.38]
nh_lons = [-73.50, -67.50]
# # Create ocean boundaries
# +
# make disks
ca_disc = gpd.GeoSeries(Point(ca_cent), crs=proj_crs).buffer(1.5).set_crs(geo_crs, allow_override=True)
ca_disc = gpd.GeoDataFrame(geometry=ca_disc)
tx_disc = gpd.GeoSeries(Point(tx_cent), crs=proj_crs).buffer(1.5).set_crs(geo_crs, allow_override=True)
tx_disc = gpd.GeoDataFrame(geometry=tx_disc)
nh_disc = gpd.GeoSeries(Point(nh_cent), crs=proj_crs).buffer(1.5).set_crs(geo_crs, allow_override=True)
nh_disc = gpd.GeoDataFrame(geometry=nh_disc)
# cut discs at coastal boundary
ca = ca_disc.overlay(boundary, how='difference')
tx = tx_disc.overlay(boundary, how='difference')
nh = nh_disc.overlay(boundary, how='difference')
# -
# make rectangles (not used)
def get_bounding_box(lats, lons):
geometry = []
for i in lons:
for j in lats:
geometry += [Point(i,j)]
geo = Polygon(geometry).envelope
geo = gpd.GeoDataFrame(geometry=gpd.GeoSeries(geo, crs=geo_crs))
return geo
ca_box = get_bounding_box(ca_lats, ca_lons)
tx_box = get_bounding_box(tx_lats, tx_lons)
nh_box = get_bounding_box(nh_lats, nh_lons)
# plot desal plants on map
fig, ax = plt.subplots(figsize=(9, 9))
boundary.plot(ax=ax, color='darkgreen', alpha=0.2)
boundary.boundary.plot(ax=ax, color='darkgreen', alpha=0.7, linewidth=0.1)
# california
ca.plot(ax=ax, color='darkblue', alpha=0.5, label='Sample region')
gpd.GeoSeries(Point(ca_cent)).plot(ax=ax, color='darkred', markersize=50, marker='*', label='Desal. plant')
# texas
tx.plot(ax=ax, color='darkblue', alpha=0.5, label='Sample region')
gpd.GeoSeries(Point(tx_cent)).plot(ax=ax, color='darkred', markersize=50, marker='*')
# new hampshire
nh.plot(ax=ax, color='darkblue', alpha=0.5, label='Sample region')
gpd.GeoSeries(Point(nh_cent)).plot(ax=ax, color='darkred', markersize=50, marker='*')
# set limit
ax.set_xlim(-127, -66)
ax.set_ylim(24, 50)
plt.title('Selected sample regions')
ax.legend()
plt.show()
# ## Read in temp and color data
# read data
t_raw = pd.read_csv(data_clean_path / 'sst.csv')
c_raw = pd.read_csv(data_clean_path / 'chlor_a.csv')
# c_ann_raw = pd.read_csv(data_clean_path / 'chlor_a_annual.csv')
# + tags=[]
# merge on x/y values
m = pd.merge(left=c_raw, right=t_raw, how='inner', on=['x', 'y'], suffixes=('_c', '_t'))
# + tags=[]
# make geodataframe
geo = [Point(lon, lat) for lat, lon in zip(m.lat_c, m.lon_c)]
geo_m = gpd.GeoDataFrame(m, geometry=geo, crs=geo_crs)
# -
# ## Subset to sample zones
# +
# make sample zones
# first convert points to convex hulls, then resnip them to the coastlines
pac_sample_zone = MultiPoint((geo_m.overlay(ca, how='intersection').geometry.values)).convex_hull
pac_sample_zone = gpd.GeoSeries(pac_sample_zone, crs=geo_crs)
pac_sample_zone = gpd.GeoDataFrame(geometry=pac_sample_zone).overlay(ca, how='intersection')
atl_sample_zone = MultiPoint((geo_m.overlay(nh, how='intersection').geometry.values)).convex_hull
atl_sample_zone = gpd.GeoSeries(atl_sample_zone, crs=geo_crs)
atl_sample_zone = gpd.GeoDataFrame(geometry=atl_sample_zone).overlay(nh, how='intersection')
gul_sample_zone = MultiPoint((geo_m.overlay(tx, how='intersection').geometry.values)).convex_hull
gul_sample_zone = gpd.GeoSeries(gul_sample_zone, crs=geo_crs)
gul_sample_zone = gpd.GeoDataFrame(geometry=gul_sample_zone).overlay(tx, how='intersection')
# -
# # Simple random sampling
# Using rejection sampling. Here we scale up the number of target samples relative to the bounding box containing the sampling zone, then sample the entire bounding box, and reject any samples not in the sampling zone. It can be shown that the sample points are uniformly randomly distributed within our target sampling zone
def rejection_sample(n, region):
# get fraction of sampling area
sample_area = region.to_crs(proj_crs).area
total_area = (gpd.GeoDataFrame(
geometry=gpd.GeoSeries(
Polygon([Point([region.bounds.minx, region.bounds.miny]), Point([region.bounds.minx, region.bounds.maxy]),
Point([region.bounds.maxx, region.bounds.miny]), Point([region.bounds.maxx, region.bounds.maxy])]),
crs=geo_crs).envelope).to_crs(proj_crs).area)
pct_sample_area = sample_area / total_area
# scale up target sample size to account for this
n_scale = int(np.ceil(n / pct_sample_area))
# generate lat lons
lon = np.random.uniform(region.bounds.minx, region.bounds.maxx, n_scale)
lat = np.random.uniform(region.bounds.miny, region.bounds.maxy, n_scale)
geo = [Point(lat, lon) for lat, lon in zip(lon, lat)]
geo_sub = [pt for pt in geo if region.contains(pt).values]
print(f'Targeted {n} samples, {len(geo_sub)} returned ({len(geo_sub)-n})')
return gpd.GeoSeries(geo_sub, crs=region.crs)
SAMPLES = 165
ca_samples = rejection_sample(SAMPLES, pac_sample_zone)
tx_samples = rejection_sample(SAMPLES, gul_sample_zone)
nh_samples = rejection_sample(SAMPLES, atl_sample_zone)
# make tuples of sample zones, discs, and desalination plant locations
PAC = [ca_samples, ca, ca_cent] # pacific
ATL = [nh_samples, nh, nh_cent] # atlantic
GUL = [tx_samples, tx, tx_cent] # gulf
# +
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(20,20))
# pacific
PAC[1].boundary.plot(ax=ax1, alpha=0.8, color='gray')
PAC[0].plot(ax=ax1, markersize=10, label='sample')
gpd.GeoSeries(Point(PAC[2])).plot(ax=ax1, color='darkred', markersize=100, marker='*', label='desal. plant')
ax1.set_title('Pacific: Monterrey, CA')
# gulf
GUL[1].boundary.plot(ax=ax2, alpha=0.8, color='gray')
GUL[0].plot(ax=ax2, markersize=10, label='sample')
gpd.GeoSeries(Point(GUL[2])).plot(ax=ax2, color='darkred', markersize=100, marker='*', label='desal. plant')
ax2.set_title('Gulf: Freetown, TX')
# atlantic
ATL[1].boundary.plot(ax=ax3, alpha=0.8, color='gray')
ATL[0].plot(ax=ax3, markersize=10, label='sample')
gpd.GeoSeries(Point(ATL[2])).plot(ax=ax3, color='darkred', markersize=100, marker='*', label='desal. plant')
ax3.set_title('Atlantic: Hamilton, MA')
ax1.legend()
ax2.legend()
ax3.legend()
plt.show()
# -
| notebooks/sd_rs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Ref : https://musicinformationretrieval.com/novelty_functions.html
import os
import sys
from os import listdir
from os.path import isfile, join
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
from PIL import Image
import librosa
import librosa.display
import IPython.display as ipd
# %matplotlib inline
print(sys.version)
print('librosa : ', librosa.__version__)
print('numpy : ', np.__version__)
print('scipy : ', sp.__version__)
print('matplotlib : ', mpl.__version__)
# +
files = [os.path.join('./sample_audio', f) for f in os.listdir('./sample_audio') if os.path.isfile(os.path.join('./sample_audio', f))]
print(files)
#files = "./sample_audio/10sec.wav"
# +
def zerolistmaker(n):
listofzeros = [0] * n
return listofzeros
def kl(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
print(kl([0,1,0],[0,1,0]))
print(kl([0,1,0],[0,0.5,0.5]))
print(kl([0,1,0],[0.3,0.3,0.3]))
'''Chord
A : A C# E
Bm : B D F#
C#m : C# E G#
D : B F# A
E : E G# B
F#m : F# A C#
G# : G# B F#
A : A C# E
'''
c_ = 1
d_ = 2
e_ = 4
f_ = 6
g_ = 8
a_ = 9
b_ = 11
a__=[a_,c_,e_]
b__=[b_,d_,f_]
c__=[c_,e_,g_]
d__=[b_,f_,a_]
e__=[e_,g_,b_]
f__=[f_,a_,c_]
g__=[g_,b_,f_]
chords__ = [a__,b__,c__,d__,e__,f__,g__]
chords = []
for chord in chords__:
temp = zerolistmaker(12)
for note in chord:
temp[note] = 1
#A = [0,1,0,0,1,0,0,0,0,1,0,0]
print(temp)
#normalize
temp /= np.sum(temp)
chords.append(temp)
# +
duration = 5
madi_time = 1.184
#1마디 : 1.184
#못갖춘마디
y, sr = librosa.load(files[2],offset = 1.184/6, duration = duration)
onset_env = librosa.onset.onset_strength(y, sr=sr)
tempo = librosa.beat.estimate_tempo(onset_env, sr=sr)
print( "tempo : ", tempo) #(beats per minute)
print("How many Madi?: ",duration/madi_time)
half_measure = 60 / tempo
duration = len(y)/sr
'''
About STFT function
(y, n_fft=2048, hop_length=512, win_length=2048, window='hann', center=True, dtype=<class 'numpy.complex64'>, pad_mode='reflect
[shape=(1 + n_fft/2, t = 1 + sr*time/hop_length), dtype=dtype]
'''
plt.figure(figsize=(10,4))
librosa.display.waveplot(y,sr)
plt.title(files[1] + "_waveplot")
S = np.abs(librosa.stft(y))
chroma = librosa.feature.chroma_stft(S=S, sr=sr)
print(chroma.shape)
pointer = 0
def chordname(idx):
if idx == 0:
return "A"
elif idx == 1:
return "Bm"
elif idx == 2:
return "C#m"
elif idx == 3:
return "D"
elif idx == 4:
return "E"
elif idx == 5:
return "F#m"
elif idx ==6 :
return "G#o"
else:
return "?"
'''
#Print
print(chroma.shape)
print(chroma)
print(chroma[0].shape)
print(chroma.T.shape)
print(chroma.T[0:50])
'''
result = []
for i in range(int(duration/madi_time)):
temp = pointer
pointer += sr*madi_time/512#hop_length
#print(pointer)
#MW : 제곱
chroma.T[chroma.T<0.5] = 0.01
chroma.T[chroma.T==1] = 0.8
sum_ = np.sum(chroma.T[:][int(temp):int(pointer)], axis = 0)
print(sum_)
#sum_[sum_<np.max(sum_)/4] = 0.001
sum_ /= np.sum(sum_)
print(sum_)
score = 100
score_idx = 0
for i in range(len(chords)):
new_score = kl(chords[i], sum_)
print(chordname(i),": ", new_score)
if score >new_score:
score = new_score
score_idx = i
#print score
print("*** Final:",chordname(score_idx)," - ", score)
result.append(chordname(score_idx))
print(result)
#print(chordname(score_idx))
#Plotting
plt.figure(figsize=(10, 4))
librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
plt.colorbar()
plt.title('Chromagram')
plt.tight_layout()
# +
y, sr = librosa.load(files[2])
onset_env = librosa.onset.onset_strength(y, sr=sr)
tempo = librosa.beat.estimate_tempo(onset_env, sr=sr)
print( "tempo : ", tempo) #(beats per minute)
half_measure = 60 / tempo
print(half_measure)
# +
#0.394
# -
| chord_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# READING THE FILES
data = pd.read_csv('D:\\Udemy projects,\\Eda\\height-weight-gender-Kung-San.csv')
data.head()
data.tail()
type('height-weight-gender-Kung-San')
data.shape
# We take heights and weights in two seperate numpy arrays
heights = data['height'].values
weights = data['weight'].values
# Plotting a scatter plot
plt.figure(figsize = (10,8))
plt.scatter(heights, weights)
plt.grid()
plt.xlabel('Heights')
plt.ylabel('Weights')
plt.title('Weight vs Heights')
# Boxplot of heights and weights
plt.figure(figsize = (10,6))
plt.subplot(121)
plt.boxplot(heights)
plt.subplot(122)
plt.boxplot(weights)
# PLotting a histogram
plt.figure(figsize = (20,6))
plt.subplot(121)
plt.hist(heights)
plt.title("Histogram of heights")
plt.subplot(122)
plt.hist(weights)
plt.title("Histogram of weights")
# Mean, meadian, standard deviation of heights
mean_height = np.mean(heights)
median_height = np.median(heights)
standard_deviation_height = np.std(heights)
print('mean_height = {}, median_height= {}, std.dev of height = {}'.format(mean_height, median_height,
standard_deviation_height))
# Inter Quartile Range
Q1_height, Q3_height = np.percentile(heights, [25,75])
IQR_height = Q3_height - Q1_height
print("For heights, Q1 = {}, Q3 = {}, IQR = {}".format(Q1_height, Q3_height, IQR_height))
# + active=""
# Similarly we can find the mean, median, Q1, Q3, IQR for the weights function
# -
np.corrcoef(heights,weights)[0,1]
# # Boxplot of height and weight grouped by male/female
#
data.boxplot(['height', 'weight'],by= 'male')
# # taking heights of male and female seperately
heights_male = data.query('male == 1')['height']
heights_female = data.query('male == 0')['height']
print("No of males = {}, No of females = {}".format(heights_male.shape[0], heights_female.shape[0]))
# # Taking weights of male and female seperately
weights_male = data.query('male == 1')['weight']
weights_female = data.query('male == 0')['weight']
# # Plotting the scatter plot of heights and weights of male and female
plt.figure(figsize = (10,8))
plt.scatter(heights_male, weights_male, color = 'b', label = 'male')
plt.scatter(heights_female, weights_female, color = 'r', label = 'female')
plt.grid()
plt.xlabel('height')
plt.ylabel('weights')
plt.title('Height vs weight')
plt.legend()
plt.show()
| Udemy eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
#
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
data_combined = pd.merge(mouse_metadata, study_results, on = "Mouse ID", how = "outer" )
# -
# #Reading the DataFrames
#
mouse_metadata.head()
study_results.head()
data_combined.head()
# Checking the number of mice in the DataFrame.
nmice = len(data_combined["Mouse ID"].unique())
print(f"There are {nmice} mouse in the DataFrame")
# +
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
#data_combined.groupby(["Mouse ID", "Timepoint"]).count()
data_combined = data_combined[data_combined.Timepoint !=0]
#drop_duplicate = data_combined.loc[data_combined.duplicated(subset=["Mouse ID", ]), "Mouse ID"].unique()
#clean_combined = data_combined[data_combined["Mouse ID"].isin(drop_duplicate) ==False]
data_combined
# -
# +
# Optional: Get all the data for the duplicate mouse ID.
#List o all the duplicate
duplicate_row =data_combined[data_combined.duplicated(["Mouse ID"])]
#print("Duplicate Rows based on a single column are: ", duplicate_row, sep='\n')
duplicate_row2= pd.DataFrame(duplicate_row)
print("Below is the DataFrame containing solo duplicated Mouse ID")
duplicate_row2.head(50)
# -
print("Below is the list of all duplicate Mouse ID")
unique_duplicate_id = duplicate_row2["Mouse ID"].unique()
unique_duplicate_id
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
print(" Here I have my original data, but I am only keeping every first instance of all duplicates")
data_combined.drop_duplicates(subset= "Mouse ID", keep = 'first', inplace = True)
data_combined
# Checking the number of mice in the clean DataFrame.
Nmice = len(data_combined)
print(f"After cleaning the DataFrame, there are {Nmice} mouse left")
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# This method is the most straightforward, creating multiple series and putting them all together at the end.
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# This method produces everything in a single groupby function.
Summary_df = data_combined[["Drug Regimen", "Tumor Volume (mm3)"]].copy()
Summary_count_df = Summary_df.groupby(["Drug Regimen"]).count()
Summary_count_df = Summary_count_df.rename(columns={"Tumor Volume (mm3)": "Count"})
Summary_sum_df = Summary_df.groupby(["Drug Regimen"]).sum()
Summary_sum_df = Summary_sum_df.rename(columns={"Tumor Volume (mm3)": "Sum"})
Summary_median_df = Summary_df.groupby(["Drug Regimen"]).median()
Summary_median_df = Summary_median_df.rename(columns={"Tumor Volume (mm3)": "Median"})
Summary_stdev_df = Summary_df.groupby(["Drug Regimen"]).std()
Summary_stdev_df = Summary_stdev_df.rename(columns={"Tumor Volume (mm3)": "Standar Deviation"})
Summary_sem_df = Summary_df.groupby(["Drug Regimen"]).sem()
Summary_sem_df = Summary_sem_df.rename(columns={"Tumor Volume (mm3)": "SEM"})
Summary = pd.concat([Summary_count_df, Summary_sum_df, Summary_median_df, Summary_stdev_df, Summary_sem_df], axis=1, sort=False)
Summary
# +
FINAL_pd = pd.merge(Summary_count_df, Summary_sum_df, on = "Drug Regimen", how = "outer")
FINAL_pd = pd.merge(FINAL_pd, Summary_median_df, on = "Drug Regimen", how = "outer")
FINAL_pd = FINAL_pd.rename(columns={"Tumor Volume (mm3)_x":"Count", "Tumor Volume (mm3)_y":"Sum", "Tumor Volume (mm3)": "Median"})
#Summary_df=Summary_df.describe()
FINAL_pd["Mean"]=FINAL_pd.Sum/FINAL_pd.Count
FINAL_pd = pd.merge(FINAL_pd, Summary_stdev_df, on = "Drug Regimen", how = "outer")
FINAL_pd = FINAL_pd.rename(columns={"Tumor Volume (mm3)":"Standard Deviation"})
FINAL_pd["Variance"]=FINAL_pd["Standar Deviation"]**2
FINAL_pd = pd.merge(FINAL_pd, Summary_sem_df, on = "Drug Regimen", how = "outer")
FINAL_pd = FINAL_pd.rename(columns={"Tumor Volume (mm3)":"SEM"})
FINAL_pd
# -
# ## Bar Plots
# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pandas.
bar_df = data_combined[["Drug Regimen", "Tumor Volume (mm3)","Timepoint"]].copy()
bar_df=bar_df.groupby(["Drug Regimen", "Timepoint"]).count()
bar_df.plot(kind="bar", figsize=(20,5))
# +
# Generate a bar plot showing the number of mice per time point for each treatment throughout the course of the study using pyplot.
bar_df = data_combined[["Drug Regimen", "Tumor Volume (mm3)","Timepoint"]].copy()
bar_df=bar_df.groupby(["Drug Regimen", "Timepoint"]).count()
x = np.arange(len(bar_df))
plt.bar(x, bar_df["Tumor Volume (mm3)"], color='r', alpha=0.7, align="edge")
plt.title(" Number of mice per time point for each treatment")
# -
# ## Pie Plots
# Generate a pie plot showing the distribution of female versus male mice using pandas
sex_pd = data_combined[["Sex", "Mouse ID"]].copy()
#sex_pd.head()
sex_pd= sex_pd.groupby(["Sex"]).count()
sex_pd.plot(kind="pie", subplots=True)
plt.title("Pie Plot showing the distribution of female versus male mice using pandas")
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pyplot
sex_pd = data_combined[["Sex", "Mouse ID"]].copy()
sex_pd.head()
sex_pd= sex_pd.groupby(["Sex"]).count()
sex_pd
labels = 'Female', 'Male'
sizes = [15, 30, 45, 10]
plt.title("Pie Plot showing the distribution of female versus male mice using matplotlib")
explode = (0, 0.05) # only "explode" the 2nd slice (i.e. 'Hogs')
plt.pie(sex_pd["Mouse ID"], explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
# +
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
# Filter original data for just the Capomulin Drug Regime
Capomulin_df = duplicate_row2.loc[(duplicate_row2["Drug Regimen"] == "Capomulin"),:]
# Set variables to hold relevant data
timepoint = Capomulin_df["Timepoint"]
tumor_volume = Capomulin_df["Tumor Volume (mm3)"]
# Plot the tumor volume for various mice
tumor_volume_line, = plt.plot(timepoint, tumor_volume)
# Show the chart, add labels
plt.xlabel('Timepoint')
plt.ylabel('Tumor Volume')
plt.title('Tumor Volume over Time for Capomulin Mice')
plt.xlim(5, 45)
plt.ylim(20, 50)
plt.show()
# -
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens. Calculate the IQR and quantitatively determine if there are any potential outliers.
filtered_df = data_combined.loc[(data_combined["Drug Regimen"] == "Capomulin") | (data_combined["Drug Regimen"] == "Ramicane") | (data_combined["Drug Regimen"] == "Ceftamin") | (data_combined["Drug Regimen"] == "Propriva"), :]
# Sort by Timpepoints based on the latest values
filtered_df = filtered_df.sort_values("Timepoint", ascending = False)
# Dropping duplicates, keeping first value, should be the latest timepoint per mouse
filtered_df = filtered_df.drop_duplicates(subset="Mouse ID", keep='first')
# Determine quartiles
quartiles = filtered_df['Tumor Volume (mm3)'].quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
# Determine upper and lower bounds
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
# Print a filtered dataframe of any outliers
outliers_df = filtered_df.loc[(filtered_df['Tumor Volume (mm3)'] > upper_bound) | (filtered_df['Tumor Volume (mm3)'] < lower_bound), :]
outliers_df
# -
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
Tumor_Volume = filtered_df['Tumor Volume (mm3)']
fig1, ax1 = plt.subplots()
ax1.set_title('Tumor Volume of Mice')
ax1.set_ylabel('Tumor Volume')
ax1.boxplot(Tumor_Volume)
plt.show()
# ## Line and Scatter Plots
# +
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
mouse_weight = Capomulin_df.groupby(Capomulin_df["Mouse ID"])["Weight (g)"].mean()
tumor_volume = Capomulin_df.groupby(Capomulin_df["Mouse ID"])["Tumor Volume (mm3)"].mean()
# Create Scatter Plot with values calculated above
plt.scatter(mouse_weight,tumor_volume)
plt.xlabel("Weight of Mouse")
plt.ylabel("Tumor Volume")
plt.show()
# -
# ## Correlation and Regression
# +
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
mouse_weight = Capomulin_df.groupby(Capomulin_df["Mouse ID"])["Weight (g)"].mean()
tumor_volume = Capomulin_df.groupby(Capomulin_df["Mouse ID"])["Tumor Volume (mm3)"].mean()
# Perform a linear regression on year versus violent crime rate
slope, int, r, p, std_err = st.linregress(mouse_weight, tumor_volume)
# Create equation of line to calculate predicted violent crime rate
fit = slope * mouse_weight + int
# Plot the linear model on top of scatter plot
plt.scatter(mouse_weight,tumor_volume)
plt.xlabel("Weight of Mouse")
plt.ylabel("Tumor Volume")
plt.plot(mouse_weight,fit,"--")
plt.xticks(mouse_weight, rotation=90)
plt.show()
# Caculate correlation coefficient
corr = round(st.pearsonr(mouse_weight,tumor_volume)[0],2)
print(f'The correlation between weight and tumor value is {corr}')
| Pymaceuticals/pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Alternating Least Square (ALS)
import pandas as pd
import numpy as np
from tqdm import tqdm
from interaction_table import orders_weigher, InteractionTable
from process_data import preprocess_orders_and_clicks, additional_filtration_orders_and_clicks
from h3_index import H3Index
# #!pip install fastparquet
h3index = H3Index('../data/h3_to_chains.pkl')
# +
# # !mkdir -p ../data/moscow_slice
# preprocess_orders_and_clicks(
# path_to_orders="../data/orders",
# path_to_clicks="../data/clicks",
# save_path="../data/moscow_slice",
# )
# +
def get_clicks():
path = '../data/clicks/'
clicks = pd.read_parquet(f'{path}/clicks.parquet')
return clicks
def get_orders():
path = '../data/moscow_slice/'
orders = pd.read_parquet(f'{path}/orders.parquet')
orders = orders.rename(columns={"customer_id": "user_id"})
clicks = pd.read_parquet(f'{path}/clicks.parquet')
#regs = pd.read_pickle('../data/CITIES_MAPPING.pkl')
#regs = [v for k, v in regs.items() if v > 2]
regs = [1] # moscow
orders, _ = additional_filtration_orders_and_clicks(orders, clicks, regs_to_filter=regs)
return orders
# -
interactions = InteractionTable(get_orders(), None)
interactions.sparse_interaction_matrix
# +
test = interactions.interaction_df[['user_id', 'weight']]
test = test.groupby('user_id').sum()
test = test.reset_index()[['user_id', 'weight']]
user_with_few_interactions = set(test[test['weight'] <= 2].user_id.unique())
ncf_interactions = interactions.interaction_df
ncf_interactions = ncf_interactions.query('user_id not in @user_with_few_interactions')
ncf_interactions.to_parquet('../data/moscow_slice/ncf_orders.parquet')
print("ncf_interactions:", len(ncf_interactions))
ncf_valid_users = set(ncf_interactions['user_id'].unique())
print("ncf_uniq_users:", len(ncf_valid_users))
ncf_valid_chains = set(ncf_interactions['chain_id'].unique())
print("ncf_uniq_chains:", len(ncf_valid_chains))
# +
val_df = pd.read_pickle('../data/test_VALID.pkl')
val_df = val_df[['customer_id', 'h3', 'chain_id']]
val_df = val_df.rename(columns={"customer_id": "user_id"})
val_df.user_id = val_df.user_id.astype(int)
print("initial:")
print("df, uniq_users, uniq_chains:", len(val_df), len(val_df.user_id.unique()), len(val_df.chain_id.unique()))
val_df = val_df.query('h3 in @h3index.valid')
print()
print("after invalid h3 filtering:")
print("df, uniq_users, uniq_chains:", len(val_df), len(val_df.user_id.unique()), len(val_df.chain_id.unique()))
val_df = val_df.query('user_id in @ncf_valid_users')
print()
print("after invalid users filtering:")
print("df, uniq_users, uniq_chains:", len(val_df), len(val_df.user_id.unique()), len(val_df.chain_id.unique()))
val_df = val_df.query('chain_id in @ncf_valid_chains')
print()
print("after invalid chains filtering:")
print("df, uniq_users, uniq_chains:", len(val_df), len(val_df.user_id.unique()), len(val_df.chain_id.unique()))
val_df = val_df.drop_duplicates()
print()
print("after dropping duplicates:")
print("df, uniq_users, uniq_chains:", len(val_df), len(val_df.user_id.unique()), len(val_df.chain_id.unique()))
# -
# %%time
val_df["valid_chain"] = val_df["h3"].map(h3index.h3_to_chains)
val_df = val_df.explode("valid_chain")
val_df = val_df.query('valid_chain in @ncf_valid_chains')
val_df["h3"] = val_df["h3"].map(h3index.h3_to_index)
val_df = val_df.rename(columns={"chain_id": "test_chain_id"})
val_df = val_df.rename(columns={"valid_chain": "chain_id"})
val_df.head()
print("df, uniq_users, uniq_test_chains, uniq_chains:",
len(val_df), len(val_df.user_id.unique()),
len(val_df.test_chain_id.unique()), len(val_df.chain_id.unique()))
# %%time
path = '../data/moscow_slice/'
val_df.to_parquet(f'{path}/ncf_val_df.parquet')
# ## Сколько данных тестовой выборки отсеивается в зависимости от interactions
# ### Clicks + orders: full
#
# initial:
# df, uniq_users, uniq_chains: 2300001 1253198 19810
#
# after invalid h3 filtering:
# df, uniq_users, uniq_chains: 2293762 1249258 19788
#
# after invalid users filtering:
# df, uniq_users, uniq_chains: 1987082 1044374 19453
#
# after invalid chains filtering:
# df, uniq_users, uniq_chains: 1984220 1043382 19118
#
# ### Orders: full
#
# initial:
# df, uniq_users, uniq_chains: 2300001 1253198 19810
#
# after invalid h3 filtering:
# df, uniq_users, uniq_chains: 2293762 1249258 19788
#
# after invalid users filtering:
# df, uniq_users, uniq_chains: 1860055 952741 19285
#
# after invalid chains filtering:
# df, uniq_users, uniq_chains: 1856314 951570 18664
# ### Clicks + orders: processed full
#
# initial:
# df, uniq_users, uniq_chains: 2300001 1253198 19810
#
# after invalid h3 filtering:
# df, uniq_users, uniq_chains: 2293762 1249258 19788
#
# after invalid users filtering:
# df, uniq_users, uniq_chains: 851440 415267 17272
#
# after invalid chains filtering:
# df, uniq_users, uniq_chains: 692249 369776 16446
# ### Orders: processed moscow
#
# initial:
# df, uniq_users, uniq_chains: 2300001 1253198 19810
#
# after invalid h3 filtering:
# df, uniq_users, uniq_chains: 2293762 1249258 19788
#
# after invalid users filtering:
# df, uniq_users, uniq_chains: 483559 212160 11404
#
# after invalid chains filtering:
# df, uniq_users, uniq_chains: 341373 172590 5297
# ### Orders: processed saint-peterburg
# initial:
# df, uniq_users, uniq_chains: 2300001 1253198 19810
#
# after invalid h3 filtering:
# df, uniq_users, uniq_chains: 2293762 1249258 19788
#
# after invalid users filtering:
# df, uniq_users, uniq_chains: 164699 62373 5801
#
# after invalid chains filtering:
# df, uniq_users, uniq_chains: 87146 46513 1384
# ### Orders: processed other regions
#
# initial:
# df, uniq_users, uniq_chains: 2300001 1253198 19810
#
# after invalid h3 filtering:
# df, uniq_users, uniq_chains: 2293762 1249258 19788
#
# after invalid users filtering:
# df, uniq_users, uniq_chains: 376063 178911 13700
#
# after invalid chains filtering:
# df, uniq_users, uniq_chains: 240902 136697 9381
# # Выводы:
# * 20% тестовых юзеров нет ни в clicks, ни в orders (cold start);
# * 75% тестовых юзеров есть в orders, т.е clicks можно не рассматривать (всего 5%);
# * только 30% (!!!!) тестовых юзеров остается после вызова processed_data;
# * 48% -- москва, 39% -- регионы, 13% -- спб в orders после вызова processed_data;
# * также есть сделать val.drop_duplicates, то отсортируется порядка 30% строк!
# ### Если h3 пользователя неизвестен, то можно брать следующий в иерархии h3 (более крупный)
val_df = pd.pivot_table(val_df,
values=['chain_id'],
index=['user_id', 'h3'],
aggfunc={'chain_id': set})
val_df = val_df.reset_index()
# +
def predict(model, user_id, h3, thr=0.9, top_k=10, filter_already_liked_items=True):
user_index = interactions.user_index[user_id]
valid_chains = h3index.h3_to_chains[h3]
filter_items = [v for k, v in interactions.chain_index.items() if k not in valid_chains]
top = model.recommend(user_index,
interactions.sparse_interaction_matrix.T,
N=top_k,
filter_already_liked_items=filter_already_liked_items,
filter_items=filter_items)
top = [interactions.r_chain_index[x] for x, score in top if score > thr]
return top
def old_items(interactions_df, user_id):
return set(interactions_df[interactions_df['user_id'] == user_id]['chain_id'].unique())
# -
def metric(y_true, y_pred, y_old, at1=10, at2=30, average=True):
"""
new_prec@10 + new_prec@30 + 1/2 *(prec_@10 + prec@30)
"""
scores_new = []
scores_all = []
scores_total = []
for t, p, o in zip(y_true, y_pred, y_old):
t = list(t)
p = list(p)
o = o if isinstance(o, (set, list)) else []
prec1 = len(set(t[:at1]) & set(p[:at1])) / at1
prec2 = len(set(t[:at2]) & set(p[:at2])) / at2
new_prec1 = len((set(p[:at1]) - set(o)) & set(t[:at1])) / at1
new_prec2 = len((set(p[:at2]) - set(o)) & set(t[:at2])) / at2
scores_total.append(new_prec1 + new_prec2 + 0.5 * (prec1 + prec2))
scores_new.append(new_prec1 + new_prec2)
scores_all.append(prec1 + prec2)
return (np.mean(scores_total) if average else scores_total,
np.mean(scores_new) if average else scores_new,
np.mean(scores_all) if average else scores_all)
# +
# # !pip install implicit
import implicit
def hyper_params(val_df, factors=60, thr=0.7, top_k=30, filter_liked=True):
print('factors: ', factors, ', thr: ', thr, ', top_k: ', top_k, ', filter_liked: ', filter_liked)
model = implicit.als.AlternatingLeastSquares(factors=factors)
model.fit(interactions.sparse_interaction_matrix)
val = val_df
val['pred_chains'] = val.apply(lambda x: predict(model, x.user_id, x.h3, thr, top_k, filter_liked), axis=1)
val['old_chains'] = val.apply(lambda x: old_items(interactions.interaction_df, x.user_id), axis=1)
scores = metric(val['chain_id'], val['pred_chains'], val['old_chains'])
print('total, new, all = ', scores)
print()
# -
hyper_params(val_df, factors=60, thr=0.7, top_k=30, filter_liked=True)
# factors: 60 , thr: 0.7 , top_k: 30 , filter_liked: True
#
# total, new, all = (0.02605082142811052, 0.00023745918670228555, 0.05162672448281647)
for factors in [30, 40, 50, 60, 70]:
for thr in [0.7, 0.75, 0.8, 0.85, 0.9]:
for top_k in [5, 10, 20, 30]:
for filter_liked in [True, False]:
hyper_params(val_df, factors, thr, top_k, filter_liked)
path = '../data/moscow_slice/'
mp_chain_to_index = pd.read_pickle(f'{path}/chain_to_index.pkl')
mp_user_to_index = pd.read_pickle(f'{path}/user_to_index.pkl')
mp_index_to_chain = {v:k for k, v in mp_chain_to_index.items()}
mp_index_to_user = {v:k for k, v in mp_user_to_index.items()}
mp_index_to_h3 = h3index.r_h3_to_index
path = '../data/moscow_slice/'
p_val_df = pd.read_parquet(f'{path}/processed_val_df.parquet')
# p_val_df = p_val_df.rename(columns={"h3_id": "chain_id"})
# p_val_df = p_val_df.rename(columns={"test_chain_id": "h3_id"})
# p_val_df = p_val_df.rename(columns={"chain_id": "test_chain_id"})
print("df, uniq_users, uniq_pred_chains, uniq_test_chains:",
len(p_val_df), len(p_val_df.user_id.unique()), len(p_val_df.h3_id.unique()),
len(p_val_df.pred_chain_id.unique()), len(p_val_df.test_chain_id.unique()))
p_val_df.head()
# %%time
p_val_df["user_id"] = p_val_df["user_id"].map(mp_index_to_user)
p_val_df["h3_id"] = p_val_df["h3_id"].map(mp_index_to_h3)
p_val_df["pred_chain_id"] = p_val_df["pred_chain_id"].map(mp_index_to_chain)
p_val_df["test_chain_id"] = p_val_df["test_chain_id"].map(mp_index_to_chain)
p_val_df = p_val_df.rename(columns={"test_chain_id": "chain_id", "h3_id": "h3"})
print("df, uniq_users, uniq_pred_chains, uniq_test_chains:",
len(p_val_df), len(p_val_df.user_id.unique()),
len(p_val_df.pred_chain_id.unique()), len(p_val_df.chain_id.unique()))
p_val_df.head()
# %%time
p_val_df = pd.pivot_table(p_val_df,
values=['chain_id', 'pred_chain_id'],
index=['user_id', 'h3'],
aggfunc={'chain_id': set, 'pred_chain_id': set})
p_val_df = p_val_df.reset_index()
p_val_df = p_val_df.rename(columns={"pred_chain_id": "pred_chains", "chain_id": "chains"})
p_val_df.head()
# %%time
p_val_df['old_chains'] = p_val_df.apply(lambda x: old_items(ncf_interactions, x.user_id), axis=1)
p_val_df.head()
scores = metric(p_val_df['chains'], p_val_df['pred_chains'], p_val_df['old_chains'])
print('total, new, all = ', scores)
# total, new, all = (0.05414295764939652, 0.018852887627577093, 0.07058014004363886)
p_val_df.head(10000)
p_val_df[['h3', 'pred_chains']].head()
# %%time
p_val_df.apply(lambda x: sum([t for t in x.pred_chains if t not in h3index.h3_to_chains[x.h3]]), axis=1).sum()
| utils/ALS-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Синтаксис
#
# Наша цель -- представить предложение естественного языка в виде дерева, так, чтобы оно отражало синтаксические зависимости между словами.
#
# ## Зачем это нужно
#
# * «Банкомат съел карту» vs «карта съела банкомат»;
# * Определение правильности грамматики фразы (при порождении речи);
# * (правиловый) Машинный перевод;
# * Information extraction;
# * Синтаксическая роль токена как метрика его важности (подлежащее важнее определения), использование весов в классификаторе.
#
# Это можно сделать разными способами.
#
#
# ## Constituency parsing
#
# (парсинг составляющих)
#
# ### Что это?
#
# * слова предложения -- листья (самые нижние вершины)
# * они объединяются в более крупные вершины в логичном для синтаксиса порядке
#
# 
#
# ## Depencency parsing
# (парсинг зависимостей)
#
# ### Что это?
#
# * слова предложения -- вершины; *зависимости (dependencies)* между ними -- рёбра
# * зависимости могут быть разными: например, субъект глагола, объект глагола, прилагательное-модификатор, и так далее
#
# ### Формат
#
# Существует несколько форматов записи деревьев зависимостей, но самый популярный и общеиспользуемый -- [CoNLL-U](http://universaldependencies.org/format.html).<br/>
# Как это выглядит (пример из [русского Universal Dependency трибанка](https://github.com/UniversalDependencies/UD_Russian-SynTagRus)):
my_example = """
# sent_id = 2003Armeniya.xml_138
# text = Перспективы развития сферы высоких технологий.
1 Перспективы перспектива NOUN _ Animacy=Inan|Case=Nom|Gender=Fem|Number=Plur 0 ROOT 0:root _
2 развития развитие NOUN _ Animacy=Inan|Case=Gen|Gender=Neut|Number=Sing 1 nmod 1:nmod _
3 сферы сфера NOUN _ Animacy=Inan|Case=Gen|Gender=Fem|Number=Sing 2 nmod 2:nmod _
4 высоких высокий ADJ _ Case=Gen|Degree=Pos|Number=Plur 5 amod 5:amod _
5 технологий технология NOUN _ Animacy=Inan|Case=Gen|Gender=Fem|Number=Plur 3 nmod 3:nmod SpaceAfter=No
6 . . PUNCT _ _ 1 punct 1:punct _
"""
# Комментарии + таблица c 9 колонками (разделители табы):
# * ID
# * FORM: токен
# * LEMMA: начальная форма
# * UPOS: универсальная часть речи
# * XPOS: лингво-специфичная часть речи
# * FEATS: морфологическая информация: падеж, род, число etc
# * HEAD: id ролителя
# * DEPREL: тип зависимости, то есть отношение к токену-родителю
# * DEPS: альтернативный подграф (не будем углубляться :))
# * MISC: всё остальное
#
# Отсутствующие данные представляются с помощью `_`. Больше подробностей про формат -- в [официальной документаци](http://universaldependencies.org/format.html).<br>
# User-friendly визуализация: 
#
# Отрытый инструмент для визуализации, ручной разметки и конвертации в другие форматы: UD Annotatrix. [Online-интерфейс](https://universal-dependencies.linghub.net/annotatrix), [репозиторий](https://github.com/jonorthwash/ud-annotatrix).
#
# Трибанк -- много таких предложений. Обычно они разделяются двумя переносами строки.
# ### Как считывать данные в питоне
#
# Используем библиотеку [conllu](https://github.com/EmilStenstrom/conllu).
# !pip3 install conllu
from conllu import parse
help(parse)
sentences = parse(my_example)
sentence = sentences[0]
sentence[0]
sentence[-1]
# ## Визуализация
#
# В nltk есть DependencyGraph, который умеет рисовать деревья (и ещё многое другое). Для того, чтобы визуализация работала корректно, ему нужна зависимость: graphviz.
# !apt-get install graphviz
# !pip3 install graphviz
from nltk import DependencyGraph
# В отличие от `conllu`, `DependencyGraph` не справляется с комментариями, поэтому придётся их убрать. Кроме того ему обязательно нужен `deprel` *ROOT* в верхнем регистре, иначе он не находит корень.
sents = []
for sent in my_example.split('\n\n'):
# убираем коменты
sent = '\n'.join([line for line in sent.split('\n') if not line.startswith('#')])
# заменяем deprel для root
sent = sent.replace('\troot\t', '\tROOT\t')
sents.append(sent)
graph = DependencyGraph(tree_str=sents[0])
graph
tree = graph.tree()
print(tree.pretty_print())
# ## UDPipe
#
# Есть разные инструменты для парсинга зависимостей. Сегодня мы посмотрим на [UDPipe](http://ufal.mff.cuni.cz/udpipe). UDPipe умеет парсить текст с помощью готовых моделей (которые можно скачать [здесь](https://github.com/jwijffels/udpipe.models.ud.2.0/tree/master/inst/udpipe-ud-2.0-170801)) и обучать модели на своих трибанках.
#
# Собственно, в UDPipe есть три вида моделей:
# * токенизатор (разделить текст на предложения, предложения на токены, сделать заготовку для CoNLL-U)
# * тэггер (лемматизировать, разметить части речи)
# * сам парсер (проставить каждому токену `head` и `deprel`)
#
# Мы сегодня не будем обучать новых моделей (это слишком долго), а используем готовую модель для русского.
# ### The Python binding
#
# У udpipe есть питоновская обвязка. Она довольно [плохо задокументирована](https://pypi.org/project/ufal.udpipe/), но зато можно использовать прямо в питоне :)
# !pip install ufal.udpipe
from ufal.udpipe import Model, Pipeline
# !wget https://github.com/jwijffels/udpipe.models.ud.2.0/raw/master/inst/udpipe-ud-2.0-170801/russian-ud-2.0-170801.udpipe
model = Model.load("russian-ud-2.0-170801.udpipe") # path to the model
# если успех, должно быть так (model != None)
model
pipeline = Pipeline(model, 'generic_tokenizer', '', '', '')
example = "Если бы мне платили каждый раз. Каждый раз, когда я думаю о тебе."
parsed = pipeline.process(example)
print(parsed)
# Как видим, UDPipe и токенизировал, и лематизировал текст, сделал POS-tagging и, собственно, синтаксический парсинг.
# ### Command line interface
#
# Но с обвязкой бывают проблемы, и вообще довольно удобно пользоваться прекомпилированной утилитой `udpipe` из шелла.
# !wget https://github.com/ufal/udpipe/releases/download/v1.2.0/udpipe-1.2.0-bin.zip
# +
# # !unzip udpipe-1.2.0-bin.zip
# -
# !ls udpipe-1.2.0-bin/
# Внутри бинарники для всех популярных ОС, выбираем свою. У меня путь к бинарнику такой: `udpipe-1.2.0-bin/bin-linux64`.
#
# Синтаксис:
# ! udpipe-1.2.0-bin/bin-linux64/udpipe --help
# Типичная команда для парсинга будет выглядеть так:
# +
with open('example.txt', 'w') as f:
f.write(example)
# ! udpipe-1.2.0-bin/bin-linux64/udpipe --tokenize --tag --parse\
# russian-ud-2.0-170801.udpipe example.txt > parsed_example.conllu
# ! cat parsed_example.conllu
# -
# Если нас интересует только тэггинг:
# +
with open('example.txt', 'w') as f:
f.write(example)
# ! udpipe-1.2.0-bin/bin-linux64/udpipe --tokenize --tag\
# russian-ud-2.0-170801.udpipe example.txt > tagged_example.conllu
# ! cat tagged_example.conllu
# -
# (Ну а потом снова считываем проанализированные предложения питоном).
#
# Вот два способа работать с UDPipe. Choose your fighter!
# #### Задание
#
# Напишите функцию, которая проверяет, не состоит ли предложение из большого числа однородных предложений.
# ## SVO-triples
#
# С помощью синтекстического парсинга можно извлекать из предложений тройки субъект-объект-глагол, которые можно использовать для извлечения информации из текста.
sent = """1 Собянин _ NOUN _ Animacy=Anim|Case=Nom|Gender=Masc|Number=Sing|fPOS=NOUN++ 2 nsubj _ _
2 открыл _ VERB _ Aspect=Perf|Gender=Masc|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act|fPOS=VERB++ 0 ROOT _ _
3 новый _ ADJ _ Animacy=Inan|Case=Acc|Degree=Pos|Gender=Masc|Number=Sing|fPOS=ADJ++ 4 amod _ _
4 парк _ NOUN _ Animacy=Inan|Case=Acc|Gender=Masc|Number=Sing|fPOS=NOUN++ 2 dobj _ _
5 и _ CONJ _ fPOS=CONJ++ 4 cc _ _
6 детскую _ ADJ _ Case=Acc|Degree=Pos|Gender=Fem|Number=Sing|fPOS=ADJ++ 7 amod _ _
7 площадку _ NOUN _ Animacy=Inan|Case=Acc|Gender=Fem|Number=Sing|fPOS=NOUN++ 4 conj _ _
8 . _ PUNCT . fPOS=PUNCT++. 2 punct _ _"""
# Тройки слово-слово-связь:
graph = DependencyGraph(tree_str=sent)
list(graph.triples())
# Тройки субьект-объект-глагол:
# +
def get_sov(sent):
graph = DependencyGraph(tree_str=sent)
sov = {}
for triple in graph.triples():
if triple:
if triple[0][1] == 'VERB':
sov[triple[0][0]] = {'subj':'','obj':''}
for triple in graph.triples():
if triple:
if triple[1] == 'nsubj':
if triple[0][1] == 'VERB':
sov[triple[0][0]]['subj'] = triple[2][0]
if triple[1] == 'dobj':
if triple[0][1] == 'VERB':
sov[triple[0][0]]['obj'] = triple[2][0]
return sov
sov = get_sov(sent)
print(sov)
# -
# #### Задание
#
# Измените код выше так, чтобы учитывались:
# 1. Однородные члены предложения
# * (парк, площадка), (Германия, Щвейцария)
# 2. Сложные сказуемые
# * (начнет продавать), (запретил провозить)
# 3. Непрямые объекты
# * (едет, Польшу), (спел, скандале)
# # Sentiment Analysis with Recursive Neural Network
#
# (если на паре осталось время)
#
# * [источник туториала](https://medium.com/@keisukeumezawa/chainer-tutorial-sentiment-analysis-with-recursive-neural-network-180ddde892a2)
# * [статья](https://nlp.stanford.edu/~socherr/EMNLP2013_RNTN.pdf); архитектура описана в 4 секции
# * [демо с кликабельными картинками](http://nlp.stanford.edu:8080/sentiment/rntnDemo.html)
# * [jupyter notebook](https://chainer-colab-notebook.readthedocs.io/en/latest/notebook/official_example/sentiment.html), [репозиторий](https://github.com/chainer/chainer/tree/master/examples/sentiment).
# До сих пор мы смотрели на парсинг зависимостей, но для анализа тональности в этой части используется другой подход, *парсинг составляющих*, или *constituency parsing*.
# 
# ### Идея
#
# Сентимент предложения складывается из сентимента его составляющих, а тех -- в свою очередь, из их составляющих.
#
# 
#
# (в датасете 5 классов тональности: --, -, 0, +, ++)
# ### Recursive Neural Network
#
# Это нейросети, которые работают с данными переменной длины, используя иерархические структуры (деревья).
# Скрытое состояние i-той вершины дерева вычисляются из скрытых состояний её левого и правого ребёнка:
#
# 
# 
#
# Векторные представления фраз (узлов дерева) подаются на вход слою-классификатору тональности и слою softmax (в обучающем датасете все составляющие размечены по тональности).
| Lectures notebooks/(Lectures notebooks) netology Machine learning/21. Syntactic analysis and keyword selection/syntax.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import seaborn as sns
from functools import partial
import pysam
from tqdm.notebook import trange
from tqdm.autonotebook import tqdm
import scipy.stats as st
import functools
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action='ignore', category=SettingWithCopyWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
# -
# Import data, custom figure-making functions
sys.path.append('../figures')
from figure_constants import *
from figure_functions import *
sys.path.append(installDir+'scripts')
from chartannotator import add_stat_annotation as si
multiple_annotation_method=None
# +
real_transmissionPairs = transmissionPairs.loc[transmissionPairs.kind=='transmission']
NUMBER_OF_H3N2_PAIRS = len(real_transmissionPairs.loc[real_transmissionPairs.subtype=='H3N2'])
NUMBER_OF_H1N1_PAIRS = len(real_transmissionPairs.loc[real_transmissionPairs.subtype=='H1N1'])
NUMBER_OF_FLUB_PAIRS = len(real_transmissionPairs.loc[real_transmissionPairs.subtype=='Influenza B'])
subtype = 'H3N2'
# -
import functools
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
# +
@memoize
def getReadDepth(sample, segment, pos, alt):
reffile = SNPs.loc[SNPs['sampleID']==sample, 'referenceFile'].iloc[0]
ref = reffile.split('/')[5]
refbase = reffile.split('/')[-1].split('_')
if 'Hong_Kong' in reffile:
chrom = hongkongContigs[segment]
elif 'Michigan' in reffile:
chrom = '_'.join(refbase[:-4])+'_'+segment
elif refbase[-3] in ['17','18','19']:
chrom = '_'.join(refbase[:-3])+'_'+segment
else:
chrom = '_'.join(refbase[:-2])+'_'+segment
bamfile = '/'.join(reffile.split('/')[0:6])+'/'+'_'.join(reffile.split('/')[-1].split('_')[:-2])+'/map_to_consensus/'+sample+'.bam'
pos = int(pos)
sam = pysam.AlignmentFile(bamfile, "rb")
try:
pileup = sam.pileup(contig=chrom, start=pos-1, end=pos, truncate=True, stepper="nofilter")
column = next(pileup)
except StopIteration:
print (chrom, pos)
print (pileup)
print (bamfile)
return (0,0,0)
except:
print (sam.references)
print (chrom)
print (reffile)
print (ref)
raise
column.set_min_base_quality(30)
try:
bases = column.get_query_sequences(mark_matches=True)
altreads = bases.count(alt.lower()) + bases.count(alt.upper())
except:
altreads = 0
depth = column.get_num_aligned()
if depth > 0:
frequency = round(altreads/column.get_num_aligned(),4)
else:
frequency = 0
return frequency, altreads, depth
def checkForDuplicateColumnsPostMerge(df, suffixes=('_x','_y'), verbose=False):
'''if an index/contact or x/y column pairing are identical, unify them into one column.
Keeps np.nan values seperate.'''
columns = [column[:-len(suffixes[0])] for column in df.columns if column[-len(suffixes[0]):]==suffixes[0]]
merged=[]
kept = []
for column in columns:
columna = column+suffixes[0]
columnb = column+suffixes[1]
a=df[columna].values
b=df[columnb].values
if (df[columna].dtype.kind in 'biufc') and (df[columnb].dtype.kind in 'biufc'):
theyAreEqual = ((a==b)|np.isclose(a,b,atol=1E-4)|np.isclose(b,a,atol=1E-4))
else:
theyAreEqual = ((a==b))
if theyAreEqual.all():
df = df.rename(columns={columna:column}).drop(columns=[columnb])
merged.append(column)
else:
kept.append(column)
if verbose:
print('merged:')
print (merged)
print('kept:')
print(kept)
return df
def updateDuplicateColumnsPostMerge(df, exclude=[], suffixes=('_x','_y'), verbose=False):
'''if an index/contact or x/y column pairing are identical except for na values, unify them into one column.
Assumes np.nan values are artifacts, and fills in values if one column has them'''
columns = [column[:-len(suffixes[0])] for column in df.columns if column[-len(suffixes[0]):]==suffixes[0]]
merged=[]
kept = []
for column in columns:
columna = column+suffixes[0]
columnb = column+suffixes[1]
a=df[columna].values
b=df[columnb].values
if (df[columna].dtype.kind in 'biufc') and (df[columnb].dtype.kind in 'biufc'):
theyAreEqual = ((a==b)|pd.isna(a)|pd.isna(b)|np.isclose(a,b,atol=1E-4)|np.isclose(b,a,atol=1E-4))
else:
theyAreEqual = ((a==b)|pd.isna(a)|pd.isna(b))
if 'AAstr' in column:
if verbose:
print (((a==b)|pd.isna(a)|pd.isna(b)).all())
print (df[((a!=b)&pd.notna(a)&pd.notna(b))])
if theyAreEqual.all():
df[columna].update(df[columnb])
df = df.rename(columns={columna:column}).drop(columns=[columnb])
merged.append(column)
else:
kept.append(column)
if verbose:
print('updated:')
print (merged)
print('untouched:')
print(kept)
return df
@memoize
def getReadDepthWrapper(row):
if pd.isna(row.SNP_frequency_index):
try:
result = getReadDepth(row['index'], row.segment,row.pos,row.alt_nuc)+(row.SNP_frequency_contact,row.AD_contact,row.depth_contact)
except:
print (row[['index','contact','segment','pos','SNP_frequency_index','SNP_frequency_contact']])
raise
elif pd.isna(row.SNP_frequency_contact):
try:
result = (row.SNP_frequency_index,row.AD_index,row.depth_index)+getReadDepth(row.contact, row.segment,row.pos,row.alt_nuc)
except:
print (row)
raise
else:
result = (row.SNP_frequency_index,row.AD_index,row.depth_index,row.SNP_frequency_contact,row.AD_contact,row.depth_contact)
return result
# +
def draw_rand_pairing(n):
int1 = np.random.randint(0, n)
int2 = np.random.randint(0, n)
while int1 == int2:
int2 = np.random.randint(0, n)
return (int1, int2)
def draw_rand_pairings(n, p_candidates):
rand_pairings = list()
while len(rand_pairings) < n+1:
index, contact = draw_rand_pairing(len(p_candidates))
if abs(p_candidates.iloc[index].time_of_symptom_onset - p_candidates.iloc[contact].time_of_symptom_onset) <= pd.Timedelta(7, 'd'):
if p_candidates.iloc[index].subclade == p_candidates.iloc[contact].subclade:
if pd.notna(p_candidates.iloc[index].day0_sample):
index = p_candidates.iloc[index].day0_sample
else:
index = p_candidates.iloc[index].day7_sample
if pd.notna(p_candidates.iloc[contact].day0_sample):
contact = p_candidates.iloc[contact].day0_sample
else:
contact = p_candidates.iloc[contact].day7_sample
rand_pairings.append((index, contact))
return rand_pairings
# +
def add_antigenic_product(transmissionSNPs):
HA_add_on = transmissionSNPs.loc[transmissionSNPs['product'].isin(['HA_antigenic','HA_nonantigenic'])]
HA_add_on.loc[:, 'product'] = 'HA'
transmissionSNPs = transmissionSNPs.append(HA_add_on)
return transmissionSNPs
def make_all_changes_minor_to_major(transmissionSNPs):
#Adjust SNP frequencies so that I'm always looking at the change that happens to the *minor* allele
transmissionSNPs['minorAlleleFreq_index']= transmissionSNPs.SNP_frequency_index
transmissionSNPs['minorAlleleFreq_contact']= transmissionSNPs.SNP_frequency_contact
transmissionSNPs['minor_alt_nuc']= transmissionSNPs.alt_nuc
transmissionSNPs['minor_ref_nuc']= transmissionSNPs.ref_nuc
print (transmissionSNPs.SNP_frequency_index.max())
tmpSNPs = transmissionSNPs.copy()
majorityMinoritySNPs=tmpSNPs.SNP_frequency_index > 0.5
alt_nucs = tmpSNPs.loc[majorityMinoritySNPs,'alt_nuc']
tmpSNPs.loc[majorityMinoritySNPs,'minor_alt_nuc'] = tmpSNPs.loc[majorityMinoritySNPs,'ref_nuc']
tmpSNPs.loc[majorityMinoritySNPs,'minor_ref_nuc'] = alt_nucs
tmpSNPs.loc[majorityMinoritySNPs, 'minorAlleleFreq_index'] = np.abs(1-tmpSNPs.loc[majorityMinoritySNPs, 'SNP_frequency_index'].values)
tmpSNPs.loc[majorityMinoritySNPs, 'minorAlleleFreq_contact'] = np.abs(1-tmpSNPs.loc[majorityMinoritySNPs, 'SNP_frequency_contact'].values)
tmpSNPs['SNP_frequency_directional_change'] = tmpSNPs.SNP_frequency_contact - tmpSNPs.SNP_frequency_index
tmpSNPs['abs_SNP_frequency_difference'] = np.abs(tmpSNPs.SNP_frequency_directional_change)
return tmpSNPs
def calc_changes_in_SNP_frequency(df):
df['abs_SNP_frequency_difference'] = np.abs(df.SNP_frequency_contact-df.SNP_frequency_index)
df['SNP_frequency_directional_change'] = df.SNP_frequency_contact-df.SNP_frequency_index
df['log_abs_SNP_frequency_difference'] = np.log10(df.abs_SNP_frequency_difference).fillna(0).replace((np.inf), 0).replace((-np.inf),0)
return df
def apply_depth_filter(transmissionSNPs, min_depth=100):
return transmissionSNPs.loc[~((transmissionSNPs.depth_contact < min_depth)|(transmissionSNPs.depth_index < min_depth))]
# -
# minimum allele frequency is used as a pseudocount here when synon/nonsynon divergence in a gene pairing is 0
def calc_pairing_divergences(transmittedSNPs, pairings, subtype='H3N2', freq_cutoff=0.01):
# First, calculate divergences only using SNPs that are above the freq cutoff
transmittedSNPs = transmittedSNPs.loc[transmittedSNPs.abs_SNP_frequency_difference >= freq_cutoff]
# Next, in order to account for gene pairings with no differences between them
# (and thus aren't reprsented in the transmittedSNPs dataframe),
# I will make a separate dataframe of id columns that contain all possible pairing/gene/AAtype combinations.
all_possible_products = pd.DataFrame(transmittedSNPs.loc[transmittedSNPs.subtype == subtype,'product'].dropna().unique())
AAtypes = pd.DataFrame(['Nonsynonymous', 'Synonymous'])
# in case a pairing has no differences at all, I will use a separate list of pairings
transmittedSNPs['pairing_id'] = transmittedSNPs['index'] + '|' + transmittedSNPs.contact
pairing_ids = pairings['index']+ '|' + pairings.contact
# actually create the dataframe of possible combinations
pairing_divergence = all_possible_products.merge(AAtypes, how='cross').merge(pd.DataFrame(pairing_ids).reset_index(drop=True), how='cross')
pairing_divergence = pairing_divergence.rename(columns={'0_x':'product','0_y':'AAtype',0:'pairing_id'})
# calc the sum of absolute SNP frequency changes (aka divergence)
# and merge with all possible combinations of pairings/genes/AAtypes.
# Combinations of id variables that have no changes will be nan, and so I will set those at 0.
between_pairing_sum_of_frequencies = transmittedSNPs.groupby(['pairing_id', 'product', 'AAtype']).sum()['abs_SNP_frequency_difference'].reset_index().rename(columns={'abs_SNP_frequency_difference':'sum_divergence'})
pairing_divergence = pairing_divergence.merge(between_pairing_sum_of_frequencies, on=['pairing_id','product', 'AAtype'], how='left')
pairing_divergence.sum_divergence = pairing_divergence.sum_divergence.fillna(0)
# To finish off calculating the total divergence, because I will be taking the log of these data, which are counts,
# I have to deal with missing data (0s that are due to under-counting. Presumably 0s just represent regions that
# rarely aquire mutations. It is difficult to observe rate of mutation below a hard cutoff of 1/len of gene.
# One common way to deal with this is to add a pseudocount of the smallest possible observation to all observations.
# For this experiment, the smallest possible observation is one mutation between pairs of frequency "freq_cutoff".
# So I add that pseudocount here.
pseudocount = freq_cutoff
pairing_divergence.sum_divergence += pseudocount
# I will normalize divergences to the number of synon/nonsynon sites in the *index* case. So first, identify index:
pairing_divergence['sampleID'] = pairing_divergence.pairing_id.str.split('|').str[0]
# And merge with the genes dataframe to add the number of synon/nonsynon sites per gene in the index cases.
# There should not be any missing data here; I should previously have calculated this for all samples collected.
pairing_divergence = pairing_divergence.merge(genes[['sampleID','product','N_sites_gene','S_sites_gene']], on=['sampleID','product'], how='left').rename(columns={'sampleID':'index'})
# Now reorganize the synon/nonsynon sites data so that each row is either synon or nonsynon only
pairing_divergence['sites'] = pairing_divergence.N_sites_gene
pairing_divergence.loc[pairing_divergence.AAtype == 'Synonymous', 'sites'] = pairing_divergence.loc[pairing_divergence.AAtype == 'Synonymous', 'S_sites_gene'].values
# Finally, I can now normalize total divergence by the number of possible sites
pairing_divergence['normalized_divergence'] = pairing_divergence.sum_divergence/pairing_divergence.sites
# And I can calculate the log of the normalized divergece.
pairing_divergence['log_divergence'] = np.log10(pairing_divergence.normalized_divergence)
return pairing_divergence
from tqdm.notebook import tqdm
def get_all_pairing_divergences(pairings):
# It is simpler to calculate the per-gene, per-AAtype normalized divergence for all possible pairings
# (there are ~2600 of them) than to do this for 10000 random pairs. This function does that.
print('making pairings')
transmissionPairs = pd.DataFrame(pairings, columns=['index','contact']).dropna()
transmissionPairs['kind'] = 'transmission'
divergence_relevent_columns = ['sampleID',
'SNP_frequency',
'alt_nuc',
'ref_nuc',
'depth',
'RD',
'AD',
'subtype',
'AAtype',
'segment',
'pos',
'product']
print('obtaining transmitted SNPs for all pairings')
index_SNPs = transmissionPairs.merge(SNPs[divergence_relevent_columns].rename(columns={'sampleID':'index'}),on='index', how='left')
contact_SNPs = transmissionPairs.merge(SNPs[divergence_relevent_columns].rename(columns={'sampleID':'contact'}),on='contact', how='left')
print ('making SNP keys')
index_SNPs['SNPkey'] = index_SNPs['index'] + ':' + index_SNPs['contact'] + ':'+index_SNPs.segment+':'+index_SNPs.pos.astype(str)+':'+index_SNPs.alt_nuc+':'+index_SNPs['product'].fillna('OORF')
contact_SNPs['SNPkey'] = contact_SNPs['index'] + ':' + contact_SNPs['contact'] + ':'+contact_SNPs.segment+':'+contact_SNPs.pos.astype(str)+':'+contact_SNPs.alt_nuc+':'+contact_SNPs['product'].fillna('OORF')
print ('merging index and contact SNPs')
transmissionSNPs = index_SNPs.merge(contact_SNPs, on='SNPkey', how='outer', suffixes=('_index','_contact'))
transmissionSNPs = updateDuplicateColumnsPostMerge(transmissionSNPs, suffixes=('_index','_contact'))
transmissionSNPs = transmissionSNPs.drop_duplicates()
# Its important to have depths for both the index and contact for all single nucleotide differences between the two.
# I don't have this information for SNPs which arose de novo in the contact or reverted to refernece in the contact,
# Because I don't have depth data for sites that are 100% reference.
# So I will applying a function to all transmission SNPs that:
# a) determines whether the index or contact frequency/AD/depth info contains nans
# b) calls getReadDepth on the appropriate information to fill in the nans
# c) returns the original data with getReadDepth's results filling in the nans
columnsToUpdate = ['SNP_frequency_index','AD_index','depth_index','SNP_frequency_contact','AD_contact','depth_contact']
print ('getting depths')
tqdm.pandas()
if os.path.exists('tSNPs_w_depth.csv'):
tmp=pd.read_csv('tSNPs_w_depth.csv').drop('Unnamed: 0', axis=1)
else:
tmp = pd.DataFrame(transmissionSNPs.progress_apply(getReadDepthWrapper,axis=1).to_list())
tmp.to_csv('tSNPs_w_depth.csv')
#It makes me nervous that I'm applying a function to all my values which in theory could change all my SNP values.
#So I'm going to do this carefully. I will apply the function and create a separate data frame, preserving my original data.
#I then assert that the data that I am about to change is either a) identical to the new data, or b) nan
print('checking results')
a = transmissionSNPs[columnsToUpdate].to_numpy()
b = tmp.to_numpy()
# I replace my original data with my updated data
transmissionSNPs[columnsToUpdate] = b
# To save time, I do not get reference SNP depth, just total depth and alt depth.
# Both are calculated w/ quality minimums, so ref_depth is just total depth - alt depth
# I'm only changing values that are nan, otherwise I will use the info previously gathered
transmissionSNPs.loc[transmissionSNPs.RD_index.isna(), 'RD_index'] = transmissionSNPs.loc[transmissionSNPs.RD_index.isna(), 'depth_index']-transmissionSNPs.loc[transmissionSNPs.RD_index.isna(), 'AD_index']
transmissionSNPs.loc[transmissionSNPs.RD_contact.isna(), 'RD_contact'] = transmissionSNPs.loc[transmissionSNPs.RD_contact.isna(), 'depth_contact']-transmissionSNPs.loc[transmissionSNPs.RD_contact.isna(), 'AD_contact']
assert(len(transmissionSNPs.loc[transmissionSNPs.RD_index.isna()])==0)
assert(len(transmissionSNPs.loc[transmissionSNPs.RD_contact.isna()])==0)
assert(len(transmissionSNPs.loc[transmissionSNPs.AD_index.isna()])==0)
assert(len(transmissionSNPs.loc[transmissionSNPs.AD_contact.isna()])==0)
# And now that my nans are filled in, I calculate the differences in snp frequency
print('calculating divergences')
# First filter out any transmission SNPs where the index or contact was not sequenced at
# sufficient depth to be confident of the within-host frequency of that site
transmissionSNPs = apply_depth_filter(transmissionSNPs, min_depth=100)
transmissionSNPs = calc_changes_in_SNP_frequency(transmissionSNPs)
if len(transmissionSNPs['product'] == 'HA') == 0:
print ('acutally using this clause')
transmissionSNPs = add_antigenic_product(transmissionSNPs)
divergences = calc_pairing_divergences(transmissionSNPs, pairings=pairings)
divergences = divergences[['pairing_id','product','AAtype','normalized_divergence']]
return divergences
# +
# First, get the normalized divergences of all combinations of samples that could plausibly be the result of
# a transmission (ie., sx onset in contact occured within 10 days after sx onset of index)
a = allvsall.loc[allvsall.subtype_index==subtype]
# All vs all is a df of all potential pairings w/ distances pre-calculated.
# It should already be limited to plausible transmissions.
assert len(a.loc[np.abs(pd.to_datetime(a.time_of_symptom_onset_index)-pd.to_datetime(a.time_of_symptom_onset_index))<=(pd.to_timedelta('10D'))]) == len(a)
all_plausible_pairing_divergences = get_all_pairing_divergences(a[['index','contact']])
# Now that I have a df with the divergences of each AA type of each product in every plausible sample combination,
# I need to calculate the stat I'm actually interested in: the log of the ratio of Nonsynon to Synon divergences
# for each gene product in each plausible sample combo.
# First, take the log of the normalized divergence
all_plausible_pairing_divergences['log_normalized_divergence'] = np.log(all_plausible_pairing_divergences.normalized_divergence)
# Then, do this sort of odd code that is very fast. Sort by pairing_id, product, and AA type.
all_plausible_pairing_divergences = all_plausible_pairing_divergences.sort_values(['pairing_id','product', 'AAtype']).reset_index(drop=True)
# Because I sort by AA type last, every even row is Nonsynonymous, and every odd row is Synonymous.
# So make a data frame that is just id values (ie, pairings and products):
pairing_divergences = all_plausible_pairing_divergences.groupby(['pairing_id','product']).first().reset_index()[['pairing_id','product']]
# and our log divergence ratio will be log evens = log odds. Having previously sorted by pairing_id and product,
# the resulting numbers should be in the right order.
pairing_divergences['log_divergence_ratio'] = all_plausible_pairing_divergences.loc[all_plausible_pairing_divergences.index%2==0, 'log_normalized_divergence'].values - all_plausible_pairing_divergences.loc[all_plausible_pairing_divergences.index%2==1, 'log_normalized_divergence'].values
# +
###Create random pairs with distances that are in *same distribution* as household pairs
# add information about pairing genetic distances back to the all_plausible_pairing_divergences df
distances = allvsall[['index','contact','distance']]
distances['pairing_id'] = allvsall['index']+'|'+allvsall.contact
all_plausible_pairing_divergences = all_plausible_pairing_divergences.merge(distances[['pairing_id','index','contact','distance']], on='pairing_id',how='left')
# bin those pairings by distance
subtype_ava = all_plausible_pairing_divergences.groupby(['pairing_id','distance']).first().reset_index()[['pairing_id','distance']]
subtype_ava['quantiles'] = pd.cut(subtype_ava.distance,bins=np.linspace(0, 500, 50001), labels=np.linspace(0.01, 500, 50000))
subtype_transPairs = transmissionPairs.loc[(transmissionPairs.subtype==subtype) & (transmissionPairs.kind=='transmission')].reset_index(drop=True)
bootstrap_size = 10000
bootstrap_stat = 'distance'
# then fit a log-norm distribution to our actual transmissionPairs to match distances to
mu, sigma = np.log(subtype_transPairs.distance).mean(), np.log(subtype_transPairs.distance).std()
# -
def draw_n_random_pairings_from_lognormal_distribution(potential_pairings, n, mu, sigma):
random_pairings = list()
print('taking distance distribution')
samples = np.round(np.random.lognormal(mean=mu, sigma=sigma, size=n), 2)
print('finding samples w/ distances nearest to drawn distances')
potentials = potential_pairings.quantiles.cat.categories[potential_pairings.quantiles.cat.codes].values
adjusted_samples = potentials[np.abs(samples[:,np.newaxis]-potentials[np.newaxis,:]).argmin(axis=1)]
print('darwing bootstrapped sample pairs')
random_pairings = [potential_pairings.loc[potential_pairings.quantiles==x].sample(1)['pairing_id'].values[0] for x in adjusted_samples]
return random_pairings
# its faster to draw boostrapSize*numOfBootstraps random pairs from the lognormal distribution
# and later reshape into a [boostrapSize, numOfBootstraps] shaped dataframe/array
drawings = draw_n_random_pairings_from_lognormal_distribution(potential_pairings = subtype_ava,
n = NUMBER_OF_H3N2_PAIRS*bootstrap_size,
mu=mu,
sigma=sigma)
# +
random_drawings_df = pd.DataFrame(drawings, columns=['pairing_id'])
random_drawings_df['bootstrap_id'] = random_drawings_df.index % 10000
# Now that I have 10000 bootstraps of n pairs,
# I can merge with pairing_divergences to get the log divergence ratios of each product for all my randomly drawn pairs
random_drawings_df = random_drawings_df.merge(pairing_divergences, on='pairing_id', how='left')
# Calc the average log divergence ratio for each product per bootstrap
random_drawings_df = random_drawings_df.groupby(['bootstrap_id', 'product']).mean().reset_index()
# And save the product. The rest of the work will be done in the notebook that actually makes the figure.
random_drawings_df.to_csv('/mnt/d/orchards/10000_random_H3N2_log_ratios.tsv', sep='\t')
| scripts/secondary_analysis/pre_processing_scripts/Bootstrap transmission divergence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
## VIGA BI-APOIADA COM CARGA DISTRIBUIDA E APOIO MOVEL
import numpy as np
import matplotlib.pyplot as plt
# Entrada de dados
L = 5 # metros
q0 = 2000 # Newton
a = 0.2*L
x = np.linspace(0,L,1000)
# Calculos
qx = -q0*(x)**0 + ((q0*L**2)/(2*a))*(x-a)**(-1)*(x>=a)
Vx = -q0*(x)**1 + ((q0*L**2)/(2*a))*(x-a)**(0)*(x>=a) - q0*((L**2)/(2*a) - L)
Mx = (-q0/2)*(x)**2 + ((q0*L**2)/(2*a))*(x-a)**(1)*(x>=a) - q0*((L**2)/(2*a) - L)*x
print(max(Mx))
# outputs, graphics
plt.figure()
plt.plot(x,qx,'k')
plt.ylabel('Esforco Cortante')
plt.xlabel('Comprimento L')
plt.grid('on')
plt.show()
plt.figure()
plt.plot(x,Vx,'r')
plt.ylabel('Esforco Cortante')
plt.xlabel('Comprimento L')
plt.grid('on')
plt.show()
plt.figure()
plt.plot(x,Mx,'c')
plt.ylabel('Momento Fletor')
plt.xlabel('Comprimento L')
plt.grid('on')
plt.show()
# -
| test_IPython.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
os.chdir("..")
import pickle
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import colors
import imgkit
# +
s = pd.read_pickle("OUTPUT/opt_settings.pkl").loc[:,['transaction_cost', 'lag', 'resample',
'rounding', 'windowZ', 'windowOLS', 'entryZ', 'exitZ','entryMult', 'exitMult']]
r = pd.read_pickle("OUTPUT/results.pkl").loc[:,['sharpe_ratio', 'total_return',
'max_drawdown', 'mean_hold_time', 'n_transactions',
'long_ret', 'short_ret', 'spread_ret', 'trans_ret']]
r['total_return']= r.long_ret + r.short_ret + r.spread_ret + r.trans_ret - 4
#ts = ['T1','T2','T4']
ts = ['T0','T1','T2','T3','T4','T5','T6']
s = s.droplevel("Sample")[np.in1d(s.index.get_level_values('Treatment'),ts)]
r = r[np.in1d(r.index.get_level_values('Treatment'),ts)]
r.reorder_levels(['Data','Model','Sample','Treatment'])
r = r.sort_index(level = ['Sample'])
s = s.sort_index(level = ['Data','Model'])
skal = np.in1d(s.index.get_level_values('Model'),"Kalman")
rkal = np.in1d(r.index.get_level_values('Model'),"Kalman")
srya = np.in1d(s.index.get_level_values('Data'),"rya_ryaay")
rrya = np.in1d(r.index.get_level_values('Data'),"rya_ryaay")
shsbc= np.in1d(s.index.get_level_values('Data'),"hsbcln_hsbc")
rhsbc= np.in1d(r.index.get_level_values('Data'),"hsbcln_hsbc")
r[r[['long_ret','short_ret','spread_ret','trans_ret']] ==0] = 1
s2 = s.copy()
r2 = r.copy()
r.sharpe_ratio = ["0.00" if math.isnan(val) else "{0:.2f}".format(val) for val in r.sharpe_ratio]
r.total_return = ["0.00%" if math.isnan(val) else "{0:.2f}%".format(val*100) for val in r.total_return]
r.max_drawdown = ["0.00%" if math.isnan(val) else "{0:.2f}%".format(val*100) for val in r.max_drawdown]
r.mean_hold_time = r.mean_hold_time.astype(int)
r.n_transactions = r.n_transactions.astype(int)
r.long_ret =["0.00%" if math.isnan(val) else "{0:.2f}%".format((val-1)*100) for val in r.long_ret]
r.short_ret =["0.00%" if math.isnan(val) else "{0:.2f}%".format((val-1)*100) for val in r.short_ret]
r.spread_ret =["0.00%" if math.isnan(val) else "{0:.2f}%".format((val-1)*100) for val in r.spread_ret]
r.trans_ret =["0.00%" if math.isnan(val) else "{0:.2f}%".format((val-1)*100) for val in r.trans_ret]
s.transaction_cost = ["{0:.1f} bps".format(val*10000) for val in s.transaction_cost]
s.lag = s.lag.astype(int)
s['resample'] = s['resample'].astype(int)
s.rounding = s.rounding.astype(int)
s.exitMult = ["0" if math.isnan(val) else "{0:.1f}".format(val) for val in s.exitMult]
s.entryMult = ["0" if math.isnan(val) else "{0:.1f}".format(val) for val in s.entryMult]
s.exitZ = ["0" if math.isnan(val) else "{0:.1f}".format(val) for val in s.exitZ]
s.entryZ = ["0" if math.isnan(val) else "{0:.1f}".format(val) for val in s.entryZ]
s.windowZ = ["0" if math.isnan(val) else int(val) for val in s.windowZ]
s.windowOLS = ["0" if math.isnan(val) else int(val) for val in s.windowOLS]
# -
def b_g(x, filt=None,r = True,cmap='RdYlBu', low=0, high=0):
if filt is None:
filt = [True] * len(x)
# Pass the columns from Dataframe A
if r:
a = r2.loc[filt,x.name].copy()
else:
a = s2.loc[filt,x.name].copy()
rng = a.max() - a.min()
norm = colors.Normalize(a.min() - (rng * low),
a.max() + (rng * high))
normed = norm(a.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
return ['background-color: %s' % color for color in c]
c1 = ['sharpe_ratio', 'max_drawdown', 'mean_hold_time','n_transactions']
c2 = ['total_return','long_ret', 'short_ret', 'spread_ret', 'trans_ret']
stylestr = "* {font-family: \"Times New Roman\", Times, serif;} table, td, tr, th {text-align: center;border-color: black;border-width:thin;border-style:solid; border-width: 2px;border-collapse:collapse}"
widthstr = "table {table-layout: fixed; width: 200px;} th,td {width: 120px;overflow: hidden;}"
for t in ["test","train"]:
for m in ["Benchmark","Kalman"]:
for c in [1,2]:
cols = c1 if c ==1 else c2
f = np.in1d(r.index.get_level_values('Model'),m) & np.in1d(r.index.get_level_values('Sample'),t)
e= r.droplevel("Model").droplevel("Sample").loc[f,cols].style.apply(b_g, filt = f)
html = e.render()
html = html[0:27] + stylestr + widthstr + html[27:len(html)]
path = "OUTPUT/Tables/r_" + m + "_" + t + "_" + str(c) + ".png"
imgkit.from_string(html, path)
# +
f = np.in1d(s.index.get_level_values('Model'),"Kalman")
html = s.loc[f,['transaction_cost', 'lag', 'resample', 'rounding', 'entryMult', 'exitMult']]\
.droplevel("Model").style.apply(b_g, r=False, filt = f).render()
html = html[0:27] + stylestr + html[27:len(html)]
path = "OUTPUT/Tables/s_all_Kalman.png"
imgkit.from_string(html, path)
html = s.loc[~f,['transaction_cost', 'lag', 'resample', 'rounding', 'windowZ','windowOLS', 'entryZ', 'exitZ']]\
.droplevel("Model").style.apply(b_g, r=False, filt = ~f).render()
html = html[0:27] + stylestr + html[27:len(html)]
path = "OUTPUT/Tables/s_all_Benchmark.png"
imgkit.from_string(html, path)
# -
for f1 in [True,False]:
for f2 in [True,False]:
filt1 = skal if f1 else ~skal
filt2 = srya if f2 else shsbc
filt = filt1 & filt2
cols = (['transaction_cost', 'lag', 'resample', 'rounding', 'entryMult', 'exitMult'] if f1 else
['transaction_cost', 'lag', 'resample', 'rounding', 'windowZ','windowOLS', 'entryZ', 'exitZ'])
html = s.loc[filt,cols].droplevel("Data").droplevel("Model").dropna('columns').style.apply(b_g, filt=filt, r=False).render()
html = html[0:27] + stylestr + html[27:len(html)]
d1 = "Kalman" if f1 else "Benchmark"
d2 = "Ryanair" if f2 else "HSBC"
path = "OUTPUT/Tables/s_"+d1+"_"+d2+".png"
imgkit.from_string(html, path)
for f1 in [True,False]:
for f2 in [True,False]:
for c in [1,2]:
cols = c1 if c ==1 else c2
filt1 = rkal if f1 else ~rkal
filt2 = rrya if f2 else rhsbc
filt = filt1 & filt2
html = r.droplevel("Data").droplevel("Model").loc[filt,cols].style.apply(b_g, filt=filt).render()
html = html[0:27] + stylestr+widthstr + html[27:len(html)]
d1 = "Kalman" if f1 else "Benchmark"
d2 = "Ryanair" if f2 else "HSBC"
path = "OUTPUT/Tables/r_"+d1+"_"+d2+"_"+str(c)+".png"
imgkit.from_string(html, path)
| NOTEBOOKS/Display Results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kaggle Seattle Airbnb
#
# ## Project Info
#
# ### Author Info
#
# - Author: [<NAME>](https://www.linkedin.com/in/zacks-shen/)
# - Contributor: [<NAME>](https://www.linkedin.com/in/yen-duo-chu/)
#
# ---
#
# ### GitHub
#
# - [Kaggle-Seattle-Airbnb](https://github.com/ZacksAmber/Kaggle-Seattle-Airbnb)
#
# ---
#
# ### Kaggle
#
# - [Kaggle-Seattle-Airbnb](https://www.kaggle.com/zacksshen/kaggle-seattle-airbnb)
#
# ---
#
# ### Reference
#
# > [Kaggle](https://www.kaggle.com/airbnb/seattle)<br>
# > [Data Source](http://insideairbnb.com/seattle)<br>
# > [Map](http://insideairbnb.com/get-the-data.html)
#
#
# ---
#
# ### An important clarification
#
# The price was mainly decided by the location, room type, amenities and etc.<br>
# But the number of reviews, the quality and reviews, and other features are helpful for ranking on the search engine and bringing a good impression to the users.
#
# ---
#
# ### Pros
#
# This project explored the relationships between the features and the nightly price set by the hosts.<br>
# It also exposed how Airbnb decide your listing's value.<br>
# Machine Learning models for price prediction can intensify competition among hosts on Airbnb, leading to the following benefits:
# - Hosts have an intuitive opportunity to compare their services and amenities with their competitors. As competition intensifies, the overall service quality and market size in the rental housing market and the online travel booking market will be improved.
# - Hosts can set a competitive price to improve the chance of getting booked; Renters can find better listings at a lower price.
# - Hosts can increase their rent when demand is in short supply.
# - Airbnb can grow faster including attract more users and hosts, leading to reduce the cost such as opertional costs and data center cost.
# - Airbnb has more opportunities to explore related businesses such as collaboration with car rental companies (e.g, AVIS), online travel companies (e.g, Tripadvisor), and social networks (e.g, Instagram).
#
# ---
#
# ### Cons
#
# The model accuracy will be lower than expected due to missing some essential features.
# - Transaction data: The datasets do not contain transaction data, such as the historical transaction data or if the listings have been booked.
# - We don't know if the listings are booked or just unavailable.
# - We will explore how to find the confirmed orders under the Exploratory Data Analysis part.
# - The search engine data: The datasets do not contain searching histories.
#
# ---
#
# ## Conculsion
#
# Hosts could adjust the following features for imporving their competitiveness.
# - Decrease
# - `cleaning_fee`
# - `security_deposit`
#
# - Change
# - `cancellation_policy`
# - `bed_type`
# - `beds`
# - `guests_included`
# - `extra_people`
# - `accommodates`
#
# - Amenities:
# - `Indoor Fireplace`
# - `Cable TV`
# - `TV`
# - `Doorman`
# - `Dryer`
# - `Air Conditioning`
# - `Gym`
# - `Family/Kid Friendly`
# - `Kitchen`
# - `Washer`
#
# - Hard to change:
# - `neighbourhood_group_cleansed`
# - `room_type`
# - `bathrooms`
# - `bedrooms`
#
#
# Although we don't have the transaction data, and the number of reviews did not play an important role in price prediction, I'm sure such features will influence the rank on the search engine. In other words, an Airbnb host could lower the price and set a short minimum night in initial to gain more reviews quickly. Then you absolutely can have a higher chance be exploring by the potential renters.
# ---
#
# # Airbnb Exploration
#
# ## Who needs the prediction? The host.
#
# <details>
# <summary>Click to see ML sample in Airbnb</summary>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919210229.png' alt='After 10 Steps, you can set a price per night'>
#
# <center>The price is suggested by Airbnb</center>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919210423.png'>
#
# <center>Suggested prices are based on your listing’s location and amenities, as well as nearby prices and demand. You choose your price, and you can adjust it at any time.</center>
#
# </details>
#
# ---
#
# ## What prediction does Airbnb use? Tip and Smart Pricing.
#
# <details>
# <summary>Click to see <b>Tip</b></summary>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20211009230743.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20211009230813.png'>
#
# </details>
#
# <details>
# <summary>Click to see <b>Smart Pricing</b></summary>
#
# Smart Pricing lets you set your prices to automatically match demand. It’s controlled by the pricing settings you choose, and you can adjust nightly prices anytime.
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919213149.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919213824.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919214931.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919214948.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919215026.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919215039.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20210919215201.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20211003122330.png'>
#
# </details>
#
# ---
#
# ## How does the prediction work? Price suggestion based on features.
#
# The following features are defined by Airbnb:
#
# - How many people are searching for listings like yours
# - The dates they’re looking at
# - Whether other listings are getting booked
# - Your listing’s best qualities
# - Your neiborhood: To calculate pricing based on location, Smart Pricing looks at whether your listing is in a city neighborhood, a suburb, or a more spread-out area.
# - Review rate: The number and quality of your reviews is another key factor in Smart Pricing.
# - Completed trips: If you honor most confirmed reservations, your prices can go higher within the minimum and maximum range you set.
# - Your listing' amenities: Wi-fi, washer/dryer, and air conditioning are especially important, but Smart Pricing looks at all your amenities.
#
# <details>
# <summary>Click to see <b>Calendar</b></summary>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20211003123104.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20211003123145.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20211003123708.png'>
#
# <img src='https://raw.githubusercontent.com/ZacksAmber/PicGo/master/img/20211003123609.png'>
#
# </details>
# ---
#
# # Dependencies
# + _kg_hide-input=false tags=[]
# Statistics
import pandas as pd
import numpy as np
import math as mt
# Data Visualization
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import plotly.express as px
import plotly.graph_objects as go
import plotly.io as pio
px.defaults.width = 1200
px.defaults.height = 800
# plotly.io Settings for both plotly.graph_objects and plotly.express
pio.templates.default = "plotly_white" # "plotly", "plotly_white", "plotly_dark", "ggplot2", "seaborn", "simple_white", "none"
pio.kaleido.scope.default_format = 'svg'
pio.kaleido.scope.default_scale = 1
# Data Preprocessing - Standardization, Encoding, Imputation
from sklearn.preprocessing import StandardScaler # Standardization
from sklearn.preprocessing import Normalizer # Normalization
from sklearn.preprocessing import OneHotEncoder # One-hot Encoding
from sklearn.preprocessing import OrdinalEncoder # Ordinal Encoding
from category_encoders import MEstimateEncoder # Target Encoding
from sklearn.preprocessing import PolynomialFeatures # Create Polynomial Features
from sklearn.impute import SimpleImputer # Imputation
# Exploratory Data Analysis - Feature Engineering
from sklearn.preprocessing import PolynomialFeatures
from sklearn.feature_selection import mutual_info_regression
from sklearn.decomposition import PCA
# Modeling - ML Pipelines
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
# Modeling - Algorithms
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
#from catboost import CatBoostRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
# ML - Evaluation
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
# ML - Tuning
import optuna
#from sklearn.model_selection import GridSearchCV
# Settings
# Settings for Seaborn
sns.set_theme(context='notebook', style='ticks', palette="bwr_r", font_scale=0.7, rc={"figure.dpi":240, 'savefig.dpi':240})
# -
# ---
#
# ## Load Datasets
#
# - listings: The listings' information including nightly price (base price).
# - reviews: All of the past reviews for each listing.
# - calendar: The availability for each listing with the base price, special price, or smart price.
# + tags=[]
import os
kaggle_project = 'seattle'
# Import dataset from local directory './data' or from Kaggle
data_dir = ('./data/202107' if os.path.exists('data') else f'/kaggle/input/{kaggle_project}')
# print all files in data_dir
for dirname, _, filenames in os.walk(data_dir):
for filename in filenames:
print(os.path.join(dirname, filename))
# Import three datasets
reviews = pd.read_csv(f'{data_dir}/reviews.csv')
calendar = pd.read_csv(f'{data_dir}/calendar.csv')
listings = pd.read_csv(f'{data_dir}/listings_kfold.csv') if os.path.exists(f'{data_dir}/listings_kfold.csv') else pd.read_csv(f'{data_dir}/listings.csv')
# -
# ---
#
# ## Cross-Validation KFold
# + tags=[]
def generate_listings_kfold():
# Mark the train dataset with kfold = 5
listings = pd.read_csv(f'{data_dir}/listings.csv')
if os.path.exists(f'{data_dir}/listings_kfold.csv'):
os.remove(f'{data_dir}/listings_kfold.csv')
kf = KFold(n_splits=5, shuffle=True, random_state=42)
for fold, (train_idx, valid_idx) in enumerate(kf.split(X=listings)):
listings.loc[valid_idx, "kfold"] = fold
listings.to_csv(f'{data_dir}/listings_kfold.csv', index=False)
generate_listings_kfold()
#listings = pd.read_csv(f'listings_kfold.csv')
# + tags=[]
# After assigning kfold
# If error, run the above function then re-load listings_kfold.csv
listings.loc[:, ['id', 'kfold']].head()
# -
# ---
#
# ## Load Metadata
#
# The [metadata](https://docs.google.com/spreadsheets/d/1M_qah-ym6O8vDcSmoKAP-lbZRPHUey83R_DJaW3LXfs/edit?usp=sharing) was analyzed and made by [<NAME>](https://www.linkedin.com/in/zacks-shen/) and [<NAME>](https://www.linkedin.com/in/yen-duo-chu/).
#
# The medata data includes includes `Label`, `Data Type`, `Description`, `ML`, `Reason`:
# - `Label`: the column name
# - `Data Type`: the data type of the column
# - `Description`: the label usage based on our observation from Airbnb
# - `ML`: is it useful for Machine Learning
# - `0`: cannot be used for ML since it is meaningless, or it's hard to measuring (e.g. listing photos)
# - `1`: must be used for ML due to the official description by Airbnb
# - `2`: possible be used for ML due to our assessment
# - `Reason`: why the label can or cannot be used for ML (may empty)
# + tags=[]
# Define sheet id and base url
sheet_id = "1M_qah-ym6O8vDcSmoKAP-lbZRPHUey83R_DJaW3LXfs"
base_url = f"https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet="
# Load metadata for three datasets
listings_metadata = pd.read_csv(base_url+"listings")
calendar_metadata = pd.read_csv(base_url+"calendar")
reviews_metadata = pd.read_csv(base_url+"reviews")
# -
# ---
#
# # ETL Pipeline
#
# The ETL pipeline provides data transformation and formatting. Thus, we can calculate the data and perform machine learning with the correct data format.
# + tags=[]
class ETL_pipeline:
def __init__(self, data_frame):
self.df = data_frame
# Data type transformation
def _transformation(self, data_frame):
df = data_frame
# Convert dollar columns from object to float
# Remove '$' and ','
#dollar_cols = ['price', 'weekly_price', 'monthly_price', 'extra_people', 'security_deposit', 'cleaning_fee']
dollar_cols = ['price']
for dollar_col in dollar_cols:
df[dollar_col] = df[dollar_col].replace('[\$,]', '', regex=True).astype(float)
# Convert dollar columns from object to float
# Remove '%'
percent_cols = ['host_response_rate', 'host_acceptance_rate']
for percent_col in percent_cols:
df[percent_col] = df[percent_col].replace('%', '', regex=True).astype(float)
# Replace the following values in property_type to Unique space due to small sample size
unique_space = ["Barn",
"Boat",
"Bus",
"Camper/RV",
"Treehouse",
"Campsite",
"Castle",
"Cave",
"Dome House",
"Earth house",
"Farm stay",
"Holiday park",
"Houseboat",
"Hut",
"Igloo",
"Island",
"Lighthouse",
"Plane",
"Ranch",
"Religious building",
"Shepherd’s hut",
"Shipping container",
"Tent",
"Tiny house",
"Tipi",
"Tower",
"Train",
"Windmill",
"Yurt",
"Riad",
"Pension",
"Dorm",
"Chalet"]
df.property_type = df.property_type.replace(unique_space, "Unique space", regex=True)
# Convert 't', 'f' to 1, 0
tf_cols = ['host_is_superhost', 'instant_bookable', 'require_guest_profile_picture', 'require_guest_phone_verification']
for tf_col in tf_cols:
df[tf_col] = df[tf_col].replace('f', 0, regex=True)
df[tf_col] = df[tf_col].replace('t', 1, regex=True)
return df
# Parse listings
def parse_listings(self):
"""Parse listings.
"""
df = self.df
df = self._transformation(df)
return df
def parse_reviews(self):
"""Parse reviews.
"""
df = self.df
df.date = pd.to_datetime(df.date)
return df
# Parse calendar
def parse_calender(self):
"""Paser calendar.
"""
df = self.df
# Convert date from object to datetime
df.date = pd.to_datetime(df.date)
# Convert price from object to float
# Convert '$' and ',' to ''
df.price = df.price.replace('[\$,]', '', regex=True).astype(float)
# Convert 't', 'f' to 1, 0
df['available'] = df['available'].replace('f', 0, regex=True)
df['available'] = df['available'].replace('t', 1, regex=True)
return df
# + tags=[]
# e.g. Before parsing
listings.loc[:4, ['id', 'price']]
# -
# bathrooms_text
# bedrooms 4
listings.host_acceptance_rate.unique()
listings.columns.sort_values()
# + tags=[]
listings = ETL_pipeline(listings).parse_listings()
reviews = ETL_pipeline(reviews).parse_reviews()
calendar = ETL_pipeline(calendar).parse_calender()
# -
# e.g. After parsing
listings.loc[:4, ['id', 'price']]
# ---
#
# # ML Pipeline
#
# `EDA_demand` calculates the demand for each listing from csv reviews.<br>
# `ML Pipeline` imputes and transforms data for Machine Learning.
class EDA_demand:
def __init__(self):
pass
def reviews_rate_vs_unavailability(self, period=30):
"""Calculate the booked listing from file calendar.
Args:
period (int): Positive integer. Default is 30.
Returns:
Pandas DataFrame.
"""
assert (0 < period <= 365) & isinstance(period, int), "period must be an integer and greater than 0"
self.period = period
#
# Calculate review rate & unavailability
#
# reviews Rate: review / days
"""
SELECT
listing_id,
COUNT(listing_id) / DATEDIFF(20160104+1, MIN(date)) AS reviews_per_day
FROM reviews
GROUP BY listing_id
"""
# Extract the first reviews date for each listing
func = lambda df: pd.Series({'first_day': df.date.min()})
df_reviews_per_day = pd.DataFrame(reviews.groupby('listing_id').apply(func))
# Define last scraped date
last_scraped = listings.last_scraped.unique()[0]
last_scraped = pd.Timestamp(last_scraped)
df_reviews_per_day['last_day'] = last_scraped + pd.DateOffset(days=1)
# Calculate the datediff
df_reviews_per_day['datediff'] = df_reviews_per_day.last_day - df_reviews_per_day.first_day
df_reviews_per_day['datediff'] = df_reviews_per_day['datediff'].dt.days
# Calculate the reviews Rate
df_reviews_per_day['reviews_per_day'] = reviews.groupby('listing_id').size() / df_reviews_per_day['datediff']
"""
SELECT listing_id, SUM(IF(available = 0, 1, 0))
FROM calendar
WHERE DATEDIFF(date, 20160104) <= period
GROUP BY listing_id
"""
last_day = last_scraped + pd.DateOffset(days=period-1)
filter = calendar.date <= (last_day)
func = lambda df: pd.Series({f'unavailability_{period}_unscaled': sum(df.available == 0)}) # Scaling available to day scale
df_unavailability = pd.DataFrame(calendar[filter].groupby('listing_id').apply(func))
df_unavailability[f'unavailability_{period}'] = df_unavailability[f'unavailability_{period}_unscaled'] / period
#df_unavailability['first_day'] = last_scraped
#df_unavailability['last_day'] = last_day
self.df_unavailability = df_unavailability
# Join two tables
df_unavailability_reviews = df_unavailability.join(df_reviews_per_day, how='left')
df_unavailability_reviews.reviews_per_day.fillna(value=0, inplace=True)
#df_unavailability_reviews.loc[:, [f'unavailability_{period}_unscaled', f'unavailability_{period}', 'reviews_per_day']]
# Find outliers (unavailable rather than booked)
# Extrat quantiles
reviews_rate_25 = df_unavailability_reviews.reviews_per_day.quantile(q=0.25, interpolation='higher')
unavailability_75 = df_unavailability_reviews[f'unavailability_{period}'].quantile(q=0.75, interpolation='higher')
# Low reviews rate: 0.010376
filter1 = df_unavailability_reviews.reviews_per_day < reviews_rate_25
# High unavailability: 0.660274
filter2 = df_unavailability_reviews[f'unavailability_{period}'] > unavailability_75
outliers = df_unavailability_reviews[filter1 & filter2]
df_unavailability_reviews['demand'] = df_unavailability_reviews[f'unavailability_{period}_unscaled']
df_unavailability_reviews.loc[outliers.index, 'demand'] = period - df_unavailability_reviews.loc[outliers.index, 'demand']
self.outliers = outliers
self.df_unavailability_reviews = df_unavailability_reviews
return self.df_unavailability_reviews
def plot(self, outliers=True):
"""Display plot or describe the relationship between reviews per day and unavailabilities to filter the outliers of demand.
Args:
outlier (bool): Display outliers or not. Default is True
Returns:
Plotly instance
"""
period = self.period
if outliers is True:
idx = self.outliers.index
df = self.df_unavailability_reviews.loc[idx, :]
else:
idx = self.df_unavailability_reviews.index.drop(self.outliers.index)
df = self.df_unavailability_reviews.loc[idx, :]
assert df.shape[0] > 0, "No records"
fig = px.line(df,
x=df.index,
y=[f'unavailability_{period}', 'reviews_per_day'],
color_discrete_sequence=['rgb(71, 92, 118, 0.9)', 'rgb(250, 211, 102, 0.9)']
)
fig.update_layout(title=f'Unavailability per day vs. reviews per day<br>Outliers', xaxis_title='index', yaxis_title='Rate')
return fig
class ML_pipeline:
"""ML Pipeline for listings.
"""
def __init__(self, data_frame, features, target, days=365):
"""
Args:
data_frame (Pandas DataFrame): listings.
features (list): The Machine Learning features.
target (str): price
days (int): The days after 2016-01-04 for calculating demand.
"""
import warnings
warnings.filterwarnings("ignore") # ignore target encoding warnings
# Get demand
demand = EDA_demand().reviews_rate_vs_unavailability(days)
# The index will change to id
data_frame = data_frame.set_index('id').join(demand['demand'], how='inner')
features.append(target)
data_frame = data_frame[features]
# Encode amenities
data_frame = self._encode_amentities(data_frame)
data_frame.pop('amenities')
self.data_frame = data_frame
# encode amentities
def _encode_amentities(self, data_frame):
# Replace amenities from {}" to ''
data_frame.amenities.replace('[{}"]', '', regex=True, inplace=True)
# Split amenities with ,
amenities = data_frame.amenities.str.split(',', expand=True)
"""All amenities
'24-Hour Check-in',
'Air Conditioning',
'Breakfast',
'Buzzer/Wireless Intercom',
'Cable TV',
'Carbon Monoxide Detector',
'Cat(s)',
'Dog(s)',
'Doorman',
'Dryer',
'Elevator in Building',
'Essentials',
'Family/Kid Friendly',
'Fire Extinguisher',
'First Aid Kit',
'Free Parking on Premises',
'Gym',
'Hair Dryer',
'Hangers',
'Heating',
'Hot Tub',
'Indoor Fireplace',
'Internet',
'Iron',
'Kitchen',
'Laptop Friendly Workspace',
'Lock on Bedroom Door',
'Other pet(s)',
'Pets Allowed',
'Pets live on this property',
'Pool',
'Safety Card',
'Shampoo',
'Smoke Detector',
'Smoking Allowed',
'Suitable for Events',
'TV',
'Washer',
'Washer / Dryer',
'Wheelchair Accessible',
'Wireless Internet'
"""
# For each col, extract the unique amenities
amenities_uniques = []
for col in amenities.columns:
amenities_uniques += list(amenities[col].unique())
# Remove the duplicate values
amenities_uniques = set(amenities_uniques)
amenities_uniques.remove('')
amenities_uniques.remove(None)
# Only two rows have Washer / Dryer, and they both have washer and dryer
amenities_uniques.remove('Washer / Dryer')
# When 'Pets live on this property' is True, one or more from 'Cat(s)', 'Dog(s)', 'Other pet(s)' will appear
# Encoding amenities
amenities_enc = pd.DataFrame()
for amenity in amenities_uniques:
amenities_enc[amenity] = data_frame.amenities.str.contains(amenity, regex=False)
# Rename the columns with prefix amenity_
amenities_enc.columns = [f"amenity_{col}" for col in amenities_enc.columns]
# Concat encoded amenities and data_frame
data_frame = pd.concat([data_frame, amenities_enc], axis=1)
return data_frame
def _imputation(self, X_train, X_valid, y_train, y_valid):
X_train, X_valid, y_train, y_valid = X_train.copy(), X_valid.copy(), y_train.copy(), y_valid.copy()
# Zero imputation
# Reason:
zero_imp = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0)
zero_features = ['reviews_per_month', 'host_response_rate', 'host_is_superhost', 'security_deposit', 'cleaning_fee']
X_train_zero_imp = pd.DataFrame(zero_imp.fit_transform(X_train[zero_features]))
X_valid_zero_imp = pd.DataFrame(zero_imp.transform(X_valid[zero_features]))
X_train_zero_imp.columns = zero_features
X_valid_zero_imp.columns = zero_features
X_train_zero_imp.index = X_train.index
X_valid_zero_imp.index = X_valid.index
X_train_zero_imp = X_train_zero_imp.astype(float)
X_valid_zero_imp = X_valid_zero_imp.astype(float)
# Mean imputation
# Reason:
mean_imp = SimpleImputer(missing_values=np.nan, strategy='mean')
mean_features = ['host_acceptance_rate', 'review_scores_accuracy', 'review_scores_checkin',
'review_scores_value', 'review_scores_location', 'review_scores_cleanliness',
'review_scores_communication', 'review_scores_rating']
X_train_mean_imp = pd.DataFrame(mean_imp.fit_transform(X_train[mean_features]))
X_valid_mean_imp = pd.DataFrame(mean_imp.transform(X_valid[mean_features]))
X_train_mean_imp.columns = mean_features
X_valid_mean_imp.columns = mean_features
X_train_mean_imp.index = X_train.index
X_valid_mean_imp.index = X_valid.index
X_train_mean_imp = X_train_mean_imp.astype(float)
X_valid_mean_imp = X_valid_mean_imp.astype(float)
# Mode imputation
# Reason:
mode_imp = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
mode_features = ['bathrooms', 'bedrooms', 'beds', 'property_type']
X_train_mode_imp = pd.DataFrame(mode_imp.fit_transform(X_train[mode_features]))
X_valid_mode_imp = pd.DataFrame(mode_imp.transform(X_valid[mode_features]))
X_train_mode_imp.columns = mode_features
X_valid_mode_imp.columns = mode_features
X_train_mode_imp.index = X_train.index
X_valid_mode_imp.index = X_valid.index
X_train_mode_imp[['bathrooms', 'bedrooms', 'beds']] = X_train_mode_imp[['bathrooms', 'bedrooms', 'beds']].astype(int)
X_valid_mode_imp[['bathrooms', 'bedrooms', 'beds']] = X_valid_mode_imp[['bathrooms', 'bedrooms', 'beds']].astype(int)
# Replace the unimputated columns
for feature in zero_features:
X_train[feature] = X_train_zero_imp[feature]
X_valid[feature] = X_valid_zero_imp[feature]
for feature in mean_features:
X_train[feature] = X_train_mean_imp[feature]
X_valid[feature] = X_valid_mean_imp[feature]
for feature in mode_features:
X_train[feature] = X_train_mode_imp[feature]
X_valid[feature] = X_valid_mode_imp[feature]
return X_train, X_valid, y_train, y_valid
def _one_hot_encoding(self, X_train, X_valid, y_train, y_valid):
X_train, X_valid, y_train, y_valid = X_train.copy(), X_valid.copy(), y_train.copy(), y_valid.copy()
oe_enc_features = ['cancellation_policy', 'require_guest_profile_picture', 'require_guest_phone_verification',
'neighbourhood_group_cleansed', 'property_type', 'instant_bookable', 'room_type', 'bed_type']
oe = OrdinalEncoder()
X_train[oe_enc_features] = oe.fit_transform(X_train[oe_enc_features])
X_valid[oe_enc_features] = oe.transform(X_valid[oe_enc_features])
return X_train, X_valid, y_train, y_valid
def _target_encoding(self, X_train, X_valid, y_train, y_valid):
X_train, X_valid, y_train, y_valid = X_train.copy(), X_valid.copy(), y_train.copy(), y_valid.copy()
target_enc_features = ['cancellation_policy', 'require_guest_profile_picture', 'require_guest_phone_verification',
'neighbourhood_group_cleansed', 'property_type', 'instant_bookable', 'room_type', 'bed_type']
# Create the encoder instance. Choose m to control noise.
target_enc = MEstimateEncoder(cols=target_enc_features, m=5.0)
X_train = target_enc.fit_transform(X_train, y_train)
X_valid = target_enc.transform(X_valid)
return X_train, X_valid, y_train, y_valid
def getData(self, kfold, target_encoding=True):
data_frame = self.data_frame.copy()
# Split train and valid
X_train = data_frame[data_frame.kfold != kfold]
X_valid = data_frame[data_frame.kfold == kfold]
y_train = X_train.pop('price')
y_valid = X_valid.pop('price')
# Imputation
X_train, X_valid, y_train, y_valid = self._imputation(X_train, X_valid, y_train, y_valid)
# Target Encoding
if target_encoding:
X_train, X_valid, y_train, y_valid = self._target_encoding(X_train, X_valid, y_train, y_valid)
else:
X_train, X_valid, y_train, y_valid = self._one_hot_encoding(X_train, X_valid, y_train, y_valid)
return X_train, X_valid, y_train, y_valid
# e.g. Before ML pipeline
listings.loc[:2, ['id', 'neighbourhood_group_cleansed', 'property_type', 'amenities', 'price']]
# +
# e.g. After ML pipeline
features = ['host_acceptance_rate', 'neighbourhood_group_cleansed', 'property_type', 'room_type',
'bathrooms', 'bedrooms', 'beds', 'bed_type', 'number_of_reviews', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value', 'reviews_per_month', 'host_response_rate', 'host_is_superhost',
'accommodates', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people', 'minimum_nights',
'maximum_nights', 'instant_bookable', 'cancellation_policy', 'require_guest_profile_picture',
'require_guest_phone_verification', 'amenities', 'demand', 'kfold']
ml_pipeline = ML_pipeline(data_frame=listings, features=features, target='price')
X_train, X_valid, y_train, y_valid = ml_pipeline.getData(kfold=0, target_encoding=True) # perform target encoding
X = pd.concat([X_train, X_valid], axis=0)
y = pd.concat([y_train, y_valid])
X['price'] = y
# -
X.loc[[241032, 953595, 3308979], ['neighbourhood_group_cleansed', 'property_type', 'price']]
# e.g. After ML pipeline
X.loc[[241032, 953595, 3308979], 'amenity_Elevator in Building':]
# ---
#
# # EDA and Feature Engineering
# ## Heatmap 1.0
#
# Pandas `.corr()` method calculates the Pearson correlation coefficiennt between each two features.<br>
# And heatmap shows the relationship more clearly.<br>
# Here, `accommodates`, `bathrooms`, `bedrooms`, `beds`, `security_deposit`, `cleaning_fee`, `guests_included`, `extra_people` have relatively higher correlation coefficient than other features with `price`.
features = listings_metadata[(listings_metadata.ML == 1) | (listings_metadata.ML == 2)].Label.to_list() # Official & Possible ML features
features.append('price') # Add target
plt.figure(dpi=800)
sns.heatmap(listings[features].corr(), cmap="rocket", annot=True, annot_kws={"fontsize": 3});
# ---
#
# ## Heatmap 2.0
#
# Since Pandas `.corr()` only calculates the numeric data. I performed target encoding then draw the heatmap again.
#
# `room_type`, `neighbourhood_group_cleansed`, `bed_type`, `cancellation_policy` were categorical data that cannot be calculated the correlation coefficient. But after target encoding, we can clearly see they have a very high impact on price.
# For the amenities, `TV`, `Hot Tub`, `Kitchen`, `Indoor Fireplace`, `Dryer`, `Family/Kid Friendly`, `Doorman`, `Gym`, `Cable TV`, `Washer`, `Air Conditioning` are the more important than other amenities.
#
#
# All available amenities:
# - 24-Hour Check-in
# - Air Conditioning
# - Breakfast
# - Buzzer/Wireless Intercom
# - Cable TV
# - Carbon Monoxide Detector
# - Cat(s)
# - Dog(s)
# - Doorman
# - Dryer
# - Elevator in Building
# - Essentials
# - Family/Kid Friendly
# - Fire Extinguisher
# - First Aid Kit
# - Free Parking on Premises
# - Gym
# - Hair Dryer
# - Hangers
# - Heating
# - Hot Tub
# - Indoor Fireplace
# - Internet
# - Iron
# - Kitchen
# - Laptop Friendly Workspace
# - Lock on Bedroom Door
# - Other pet(s)
# - Pets Allowed
# - Pets live on this property
# - Pool
# - Safety Card
# - Shampoo
# - Smoke Detector
# - Smoking Allowed
# - Suitable for Events
# - TV
# - Washer
# - Washer / Dryer
# - Wheelchair Accessible
# - Wireless Internet
def clean_corr(df, target, threshold):
"""Return df.corr() that greater or equal than threshold.
Args:
df (dataframe): Pandas dataframe.
target (str): The name of target.
threshold (float): The miniumu required correlation coefficient.
Returns:
df.corr()
"""
df = df.corr().copy()
for col in df.columns:
if abs(df.loc[col, target]) < threshold:
df.drop(col, axis=0, inplace=True)
df.drop(col, axis=1, inplace=True)
return df
X_corr = clean_corr(X, 'price', 0.1)
plt.figure(dpi=800)
sns.heatmap(X_corr, cmap="rocket", annot=True, annot_kws={"fontsize": 3});
# ---
#
# ## Map
#
# For more ideas, visualizations of all Seattle datasets can be found [here](http://insideairbnb.com/seattle/).
#
# > Reference: [Scatter Plots on Mapbox in Python](https://plotly.com/python/scattermapbox/#multiple-markers)
# + tags=[]
#px.set_mapbox_access_token(open(".mapbox_galaxy").read())
px.set_mapbox_access_token('<KEY>')
fig = px.scatter_mapbox(listings,
lat='latitude',
lon='longitude',
color='neighbourhood_group_cleansed',
#size='price',
color_continuous_scale=px.colors.cyclical.IceFire,
hover_name='id',
hover_data=['listing_url', 'property_type', 'room_type'],
size_max=15,
zoom=10,
title='Map of price group by neighbourhood_group_cleansed')
fig.show()
# -
# ---
#
# ## Histogram
#
# Take a glance at the following high correlation coefficient features.
#
# `accommodates`, `bathrooms`, `bedrooms`, `beds`, `security_deposit`, `cleaning_fee`, `guests_included`, `extra_people`, `room_type`, `neighbourhood_group_cleansed`, `bed_type`, `cancellation_policy`
# + tags=[]
fig = px.histogram(listings.dropna(subset=['property_type'], axis=0),
x='price',
histnorm='percent',
color='property_type',
title='Histogram of price vs. propery_type')
fig.show()
# -
listings[listings.room_type=='1125']
# + tags=[]
fig = px.histogram(listings.dropna(subset=['room_type'], axis=0),
x='price',
histnorm='percent',
color='room_type',
title='Histogram of price vs. room_type')
fig.show()
# + tags=[]
fig = px.histogram(listings.dropna(subset=['beds'], axis=0),
x='price',
histnorm='percent',
color='beds',
title='Histogram of price vs. (number of) beds')
fig.show()
# -
listings.columns.sort_values()
fig = px.histogram(listings.dropna(subset=['host_is_superhost'], axis=0),
x='price',
histnorm='percent',
color='host_is_superhost',
title='Histogram of price vs. superhost')
fig.show()
# ---
#
# ## Parallel Coordinates Plot
#
# Reference: [Parallel Coordinates Plot in Python](https://plotly.com/python/parallel-coordinates-plot/)
features = ['accommodates', 'bathrooms', 'bedrooms', 'beds', 'security_deposit', 'cleaning_fee',
'guests_included', 'extra_people', 'room_type', 'neighbourhood_group_cleansed',
'bed_type', 'cancellation_policy']
fig = px.parallel_coordinates(listings[features + ['price']].dropna(), color = "price",
color_continuous_scale = px.colors.diverging.Tealrose, color_continuous_midpoint = 2)
fig.show()
# ---
#
# ## Mutual Information
def make_mi_scores(X, y):
X = X.copy()
# Mutual Information required all data be integers
for colname in X.select_dtypes(["object", "category"]):
X[colname], _ = X[colname].factorize() # factorize() returns code and uniques
# All discrete features should now have integer dtypes
discrete_features = [pd.api.types.is_integer_dtype(t) for t in X.dtypes]
mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features, random_state=0)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
# +
# %%time
features = ['host_acceptance_rate', 'neighbourhood_group_cleansed', 'property_type', 'room_type',
'bathrooms', 'bedrooms', 'beds', 'bed_type', 'number_of_reviews', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value', 'reviews_per_month', 'host_response_rate', 'host_is_superhost',
'accommodates', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people', 'minimum_nights',
'maximum_nights', 'instant_bookable', 'cancellation_policy', 'require_guest_profile_picture',
'require_guest_phone_verification', 'amenities', 'demand', 'kfold']
X.drop(columns=['kfold', 'price'], inplace=True)
# Review the MI score from all data
all_mi_scores = pd.DataFrame(make_mi_scores(X, y))
all_mi_scores.style.bar(align='mid', color=['#d65f5f', '#5fba7d'])
# -
# ---
#
# ## Outliers and Real Demand
#
# > Reference: [Supply and demand](https://en.wikipedia.org/wiki/Supply_and_demand)
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/7/7a/Supply-and-demand.svg/1920px-Supply-and-demand.svg.png" width=50%>
#
# In [microeconomics](https://en.wikipedia.org/wiki/Microeconomics), **supply and demand** is an economic model of price determination in a market. The price _P_ of a product is determined by a balance between production at each price (supply _S_) and the desires of those with purchasing power at each price (demand _D_). The diagram shows a positive shift in demand from $D_1$ to $D_2$, resulting in an increase in price (_P_) and quantity sold (_Q_) of the product.
# However, the booked listings and unavailable listings are both `available = f` in the dataset **calendar**. As I mentioned above, we don't have the transaction data. Therefore, I designed a simple but effective model to filter the booked listings.
#
# For example:
#
# [listing 3402376](https://www.airbnb.com/rooms/3402376) has 5 reviews in total (1st review was in Sep 2014) but `available = 'f'` for 365 days. As you can see, the listing was not booked by someone for a whole year. Instead, the host set the listing as unavailable for 365 days.
calendar
# The calendar recorded the availability of each listing in the next 365 days
calendar.groupby('listing_id').size()
# When the available is False, the price is NaN (Not a Number)
print(calendar.isna().sum())
print(calendar[calendar.available == 1].price.isna().sum())
print(calendar[calendar.available == 0].price.isna().sum())
# The possible outliers
eda_demand = EDA_demand()
demand = eda_demand.reviews_rate_vs_unavailability(365)
demand.describe()
eda_demand.plot(outliers=False)
eda_demand.plot(outliers=True)
# ---
#
# # Machine Learning
#
# The models in this project are [XGBoost](https://xgboost.readthedocs.io/en/latest/) and [LightGBM](https://lightgbm.readthedocs.io/en/latest/).<br>
# ## ML Baseline
#
# The baseline can provide an insight of the performance of different data preprocessing strategies, such encoding methods.<br>
# Here, I chose target encoding. First, it had a better performance than ordinal encoding. Second, we already knew the categorical data have potential levels for different price.<br>
# e.g. different `roomt_type` has different `price` histogram.
# +
"""
One Hot Encoding
ML1, kfold: 0. RMSE: 56.481995126446456
ML1, kfold: 1. RMSE: 66.83960978199953
ML1, kfold: 2. RMSE: 61.957734603524976
ML1, kfold: 3. RMSE: 62.69133725976135
ML1, kfold: 4. RMSE: 55.715497896362415
ML1. Average RMSE: 60.73723493361895
ML2, kfold: 0. RMSE: 52.568454955844246
ML2, kfold: 1. RMSE: 63.234791588163155
ML2, kfold: 2. RMSE: 58.68112865265134
ML2, kfold: 3. RMSE: 60.09474908722824
ML2, kfold: 4. RMSE: 47.693034296085685
ML2. Average RMSE: 56.45443171599453
Target Encoding
ML1, kfold: 0. RMSE: 56.64589093002433
ML1, kfold: 1. RMSE: 62.44468185143068
ML1, kfold: 2. RMSE: 60.40781093438012
ML1, kfold: 3. RMSE: 63.666798642194124
ML1, kfold: 4. RMSE: 52.226979216000906
ML1. Average RMSE: 59.07843231480604
ML2, kfold: 0. RMSE: 52.92355341945994
ML2, kfold: 1. RMSE: 65.04777557551235
ML2, kfold: 2. RMSE: 58.69704656344895
ML2, kfold: 3. RMSE: 55.149794448218394
ML2, kfold: 4. RMSE: 49.509631025616585
ML2. Average RMSE: 56.26556020645124
"""
def baseline(target_encoding=True):
#import warnings
#warnings.filterwarnings("ignore")
features = ['host_acceptance_rate', 'neighbourhood_group_cleansed', 'property_type', 'room_type',
'bathrooms', 'bedrooms', 'beds', 'bed_type', 'number_of_reviews', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value', 'reviews_per_month', 'host_response_rate', 'host_is_superhost',
'accommodates', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people', 'minimum_nights',
'maximum_nights', 'instant_bookable', 'cancellation_policy', 'require_guest_profile_picture',
'require_guest_phone_verification', 'amenities', 'demand', 'kfold']
ml_pipeline = ML_pipeline(data_frame=listings, features=features, target='price')
amenities = ['amenity_Washer', 'amenity_Air Conditioning', 'amenity_TV',
'amenity_Kitchen', 'amenity_Wheelchair Accessible',
'amenity_Free Parking on Premises', 'amenity_Doorman',
'amenity_Cable TV', 'amenity_Smoke Detector',
'amenity_Pets live on this property', 'amenity_Internet',
'amenity_Hangers', 'amenity_Family/Kid Friendly',
'amenity_First Aid Kit', 'amenity_Indoor Fireplace', 'amenity_Gym',
'amenity_Suitable for Events', 'amenity_Breakfast', 'amenity_Cat(s)',
'amenity_Lock on Bedroom Door', 'amenity_Smoking Allowed',
'amenity_Dog(s)', 'amenity_Shampoo', 'amenity_Hair Dryer',
'amenity_Carbon Monoxide Detector', 'amenity_Wireless Internet',
'amenity_Hot Tub', 'amenity_Safety Card',
'amenity_Buzzer/Wireless Intercom', 'amenity_Pool',
'amenity_Elevator in Building', 'amenity_Pets Allowed',
'amenity_Fire Extinguisher', 'amenity_Other pet(s)',
'amenity_Laptop Friendly Workspace', 'amenity_Essentials',
'amenity_Iron', 'amenity_Dryer', 'amenity_24-Hour Check-in',
'amenity_Heating']
# Define sheet id and base url
sheet_id = "1M_qah-ym6O8vDcSmoKAP-lbZRPHUey83R_DJaW3LXfs"
base_url = f"https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet="
# Load metadata for three datasets
listings_metadata = pd.read_csv(base_url+"listings")
calendar_metadata = pd.read_csv(base_url+"calendar")
reviews_metadata = pd.read_csv(base_url+"reviews")
# ML1
ml1 = listings_metadata[listings_metadata.ML == 1].Label.to_list()
useless_features = ['availability_30', 'availability_60', 'availability_90', 'availability_365', 'first_review', 'last_review', 'amenities']
for useless_feature in useless_features:
ml1.remove(useless_feature)
ml1.append('demand')
ml1 += amenities
AVG_RMSE = []
for kfold in range(5):
X_train, X_test, y_train, y_test = ml_pipeline.getData(kfold=kfold, target_encoding=target_encoding)
model = XGBRegressor(random_state=kfold, n_jobs=-1)
model.fit(X_train[ml1], y_train)
test_preds = model.predict(X_test[ml1])
RMSE = mean_squared_error(y_test, test_preds, squared=False)
print(f"ML1, kfold: {kfold}. RMSE: {RMSE}")
AVG_RMSE.append(RMSE)
print(f"ML1. Average RMSE: {np.mean(AVG_RMSE)}\n")
# ML1 + ML2
ml1 = listings_metadata[listings_metadata.ML == 1].Label.to_list()
useless_features = ['availability_30', 'availability_60', 'availability_90', 'availability_365', 'first_review', 'last_review', 'amenities']
for useless_feature in useless_features:
ml1.remove(useless_feature)
ml2 = listings_metadata[listings_metadata.ML == 2].Label.to_list()
ml2.append('demand')
ml2 = ml1 + ml2 + amenities
AVG_RMSE = []
for kfold in range(5):
X_train, X_test, y_train, y_test = ml_pipeline.getData(kfold=kfold, target_encoding=target_encoding)
model = XGBRegressor(random_state=kfold, n_jobs=-1)
model.fit(X_train[ml2], y_train)
test_preds = model.predict(X_test[ml2])
RMSE = mean_squared_error(y_test, test_preds, squared=False)
print(f"ML2, kfold: {kfold}. RMSE: {RMSE}")
AVG_RMSE.append(RMSE)
print(f"ML2. Average RMSE: {np.mean(AVG_RMSE)}\n")
baseline(target_encoding=True)
# -
# ---
#
# ## Model Tuning
#
# The Hyperparameter tuning platform I used is [Optuna](https://optuna.org/).<br>
# I implemented a logger to write the tuning results in the local log file.<br>
# After all tunings are finished, the program will sent an email to my mailbox with the best hyperparameters.
# - **To enable this feature**, go to [configure your gmail first](#gmail-configuration).
#
# P.S: If your computer does not support GPU accleration, uncomment code `For CPU` and comment code `FOR GPU`.
#
# **If you want to train your model, DO NOT RUN the following code in this notebook.**<br>
# Instead, make another notebook for model tuning. Please following this [link](https://github.com/ZacksAmber/Kaggle-Seattle-Airbnb/blob/main/ML_Tuning.ipynb).
# ---
#
# ### Define Logger
# +
import logging
# Define logger
logger = logging.getLogger('ML')
# Set level for logger
logger.setLevel(logging.DEBUG)
# Define the handler and formatter for file logging
log_file = 'ML'
fileHandler = logging.FileHandler(f'{log_file}.log') # Define FileHandler
fileHandler.setLevel(logging.INFO) # Set level
fileFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Define formatter
fileHandler.setFormatter(fileFormatter) # Set formatter
logger.addHandler(fileHandler) # Add handler to logger
# -
# ---
#
# ### Define Features for ML
# +
# Define sheet id and base url
sheet_id = "1M_qah-ym6O8vDcSmoKAP-lbZRPHUey83R_DJaW3LXfs"
base_url = f"https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet="
# Load metadata for three datasets
listings_metadata = pd.read_csv(base_url+"listings")
calendar_metadata = pd.read_csv(base_url+"calendar")
reviews_metadata = pd.read_csv(base_url+"reviews")
amenities = ['amenity_Washer', 'amenity_Air Conditioning', 'amenity_TV',
'amenity_Kitchen', 'amenity_Wheelchair Accessible',
'amenity_Free Parking on Premises', 'amenity_Doorman',
'amenity_Cable TV', 'amenity_Smoke Detector',
'amenity_Pets live on this property', 'amenity_Internet',
'amenity_Hangers', 'amenity_Family/Kid Friendly',
'amenity_First Aid Kit', 'amenity_Indoor Fireplace', 'amenity_Gym',
'amenity_Suitable for Events', 'amenity_Breakfast', 'amenity_Cat(s)',
'amenity_Lock on Bedroom Door', 'amenity_Smoking Allowed',
'amenity_Dog(s)', 'amenity_Shampoo', 'amenity_Hair Dryer',
'amenity_Carbon Monoxide Detector', 'amenity_Wireless Internet',
'amenity_Hot Tub', 'amenity_Safety Card',
'amenity_Buzzer/Wireless Intercom', 'amenity_Pool',
'amenity_Elevator in Building', 'amenity_Pets Allowed',
'amenity_Fire Extinguisher', 'amenity_Other pet(s)',
'amenity_Laptop Friendly Workspace', 'amenity_Essentials',
'amenity_Iron', 'amenity_Dryer', 'amenity_24-Hour Check-in',
'amenity_Heating']
# ML1 + ML2
ml1 = listings_metadata[listings_metadata.ML == 1].Label.to_list()
useless_features = ['availability_30', 'availability_60', 'availability_90', 'availability_365', 'first_review', 'last_review', 'amenities']
for useless_feature in useless_features:
ml1.remove(useless_feature)
ml2 = listings_metadata[listings_metadata.ML == 2].Label.to_list()
ml2.append('demand')
ml2 = ml1 + ml2 + amenities
# -
# ---
#
# ### Tuning Configurations
# Silence Optuna
optuna.logging.set_verbosity(optuna.logging.WARNING)
# Define number of trails
n_trials = 2
# ---
#
# ### Model Tuning: XGBoost
def objective(trial):
"""Modeling tuning with Target encoding.
"""
features = ['host_acceptance_rate', 'neighbourhood_group_cleansed', 'property_type', 'room_type',
'bathrooms', 'bedrooms', 'beds', 'bed_type', 'number_of_reviews', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value', 'reviews_per_month', 'host_response_rate', 'host_is_superhost',
'accommodates', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people', 'minimum_nights',
'maximum_nights', 'instant_bookable', 'cancellation_policy', 'require_guest_profile_picture',
'require_guest_phone_verification', 'amenities', 'demand', 'kfold']
ml_pipeline = ML_pipeline(data_frame=listings, features=features, target='price')
RMSE_AVG = []
for kfold in range(5):
X_train, X_valid, y_train, y_valid = ml_pipeline.getData(kfold=kfold, target_encoding=True)
X_train, X_valid = X_train[ml2], X_valid[ml2]
# Hyperparameters for XGBoost
xgb_params = {
'lambda': trial.suggest_loguniform('lambda', 1e-3, 10.0),
'alpha': trial.suggest_loguniform('alpha', 1e-3, 10.0),
'reg_lambda': trial.suggest_loguniform("reg_lambda", 1e-8, 100.0),
'reg_alpha': trial.suggest_loguniform("reg_alpha", 1e-8, 100.0),
'colsample_bytree': trial.suggest_float("colsample_bytree", 0.1, 1.0),
'subsample': trial.suggest_float("subsample", 0.1, 1.0),
'learning_rate': trial.suggest_float("learning_rate", 1e-2, 0.3, log=True),
'n_estimators': trial.suggest_int('n_estimators', 100, 10000),
'max_depth': trial.suggest_int("max_depth", 1, 7),
'random_state': trial.suggest_categorical('random_state', [0, 42, 2021]),
'min_child_weight': trial.suggest_int('min_child_weight', 1, 300)
}
# For GPU
model = XGBRegressor(
tree_method='gpu_hist',
gpu_id=0,
predictor='gpu_predictor',
**xgb_params)
'''
# For CPU
model = XGBRegressor(**xgb_params)
'''
model.fit(
X_train, y_train,
early_stopping_rounds=300,
eval_set=[(X_valid, y_valid)],
verbose=5000
)
valid_preds = model.predict(X_valid)
RMSE = mean_squared_error(y_valid, valid_preds, squared=False)
RMSE_AVG.append(RMSE)
return np.mean(RMSE_AVG)
'''
%%time
study = optuna.create_study(direction='minimize', study_name=f'XGBoost {n_trials} trails')
study.optimize(objective, n_trials=n_trials, show_progress_bar=False) # set n_triasl
logger.info(f"Study name: {study.study_name}")
logger.info(f"Best value: {study.best_value}")
logger.info(f"Best paras: {study.best_params}")
logger.info("Mission Complete! --------------")
'''
# ---
#
# ### Model Tuning: LightGBM
def objective(trial):
"""Modeling tuning with Target encoding.
"""
features = ['host_acceptance_rate', 'neighbourhood_group_cleansed', 'property_type', 'room_type',
'bathrooms', 'bedrooms', 'beds', 'bed_type', 'number_of_reviews', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value', 'reviews_per_month', 'host_response_rate', 'host_is_superhost',
'accommodates', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people', 'minimum_nights',
'maximum_nights', 'instant_bookable', 'cancellation_policy', 'require_guest_profile_picture',
'require_guest_phone_verification', 'amenities', 'demand', 'kfold']
ml_pipeline = ML_pipeline(data_frame=listings, features=features, target='price')
RMSE_AVG = []
for kfold in range(5):
X_train, X_valid, y_train, y_valid = ml_pipeline.getData(kfold=kfold, target_encoding=True)
X_train, X_valid = X_train[ml2], X_valid[ml2]
# Hyperparameters for LightGBM
lgb_params = {
'random_state': trial.suggest_categorical('random_state', [0, 42, 2021]),
'num_iterations': trial.suggest_int('num_iterations', 100, 10000),
'learning_rate': trial.suggest_float("learning_rate", 1e-2, 0.3, log=True),
'max_depth': trial.suggest_int('max_depth', 1, 7),
'num_leaves': trial.suggest_int('num_leaves', 2, 100),
'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 100, 2000),
'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0),
'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0),
'feature_fraction': trial.suggest_uniform('feature_fraction', 0.01, 0.99),
'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.01, 0.99),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
}
# For GPU
model = LGBMRegressor(
device='gpu',
gpu_platform_id=0,
gpu_device_id=0,
n_jobs=-1,
metric='rmse',
**lgb_params
)
'''
# For CPU
model = LGBMRegressor(**lgb_params)
'''
model.fit(
X_train, y_train,
early_stopping_rounds=300,
eval_set=[(X_valid, y_valid)],
verbose=5000
)
valid_preds = model.predict(X_valid)
RMSE = mean_squared_error(y_valid, valid_preds, squared=False)
RMSE_AVG.append(RMSE)
return np.mean(RMSE_AVG)
'''
%%time
study = optuna.create_study(direction='minimize', study_name=f'LGBoost {n_trials} trails')
study.optimize(objective, n_trials=n_trials, show_progress_bar=False) # set n_triasl
logger.info(f"Study name: {study.study_name}")
logger.info(f"Best value: {study.best_value}")
logger.info(f"Best paras: {study.best_params}")
logger.info("Mission Complete! --------------")
'''
# + [markdown] tags=[]
# ---
#
# ### Gmail Configuration<a id='gmail-configuration'></a>
#
# > [How to Send Emails with Gmail using Python](https://stackabuse.com/how-to-send-emails-with-gmail-using-python/)
# + tags=["hide-input"]
def gmail(YOUR_GMAIL, YOUR_APP_PASSWORD, SEND_TO):
"""Send the ML tuning result to one or more email addresses.
Args:
YOUR_GMAIL (str): Your gmail address.
YOUR_APP_PASSWORD (str): Your APP Password for gmail.
SEND_TO (str or list): The target emails.
"""
gmail_user = YOUR_GMAIL
gmail_password = <PASSWORD> # Google App Password
import smtplib
from email.message import EmailMessage
msg = EmailMessage()
msg["From"] = YOUR_GMAIL
msg["Subject"] = "Seattle Airbnb ML Tuning"
msg["To"] = SEND_TO
msg.set_content(f"""\
{n_trials} Trials are done.
Mission Complete!""")
with open('ML.log', 'rb') as f:
content = f.read()
msg.add_attachment(content, maintype='application', subtype='log', filename='ML.log')
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login(gmail_user, gmail_password)
server.send_message(msg)
server.close()
#gmail(YOUR_GMAIL, YOUR_APP_PASSWORD, SEND_TO)
# + [markdown] tags=[]
# ---
#
# ## Model Blending
#
# > Reference: [Ensemble Learning: Stacking, Blending & Voting](https://towardsdatascience.com/ensemble-learning-stacking-blending-voting-b37737c4f483)
#
# After hyperparameter tuning, we have a beter hyperparameters for XGBoost and LightGBM. Then I performed a model blening for a better ML performance.
#
# My 200 Trails ($200 \times 5$ in total) hyperparameters:
#
# - XGBoost
#
# {'lambda': 0.029949323233957558, 'alpha': 0.47821306780284645, 'reg_lambda': 0.03007272817610808, 'reg_alpha': 5.7650942972599255e-05, 'colsample_bytree': 0.32733907049678806, 'subsample': 0.9397958925107069, 'learning_rate': 0.016087339011505105, 'n_estimators': 4117, 'max_depth': 6, 'random_state': 42, 'min_child_weight': 5}
#
# - LightGBM
#
# {'random_state': 42, 'num_iterations': 5549, 'learning_rate': 0.07313607774375752, 'max_depth': 5, 'num_leaves': 75, 'min_data_in_leaf': 100, 'lambda_l1': 1.3379869858112054e-06, 'lambda_l2': 0.00025091437242776726, 'feature_fraction': 0.5910800704597817, 'bagging_fraction': 0.9553891294481797, 'bagging_freq': 6, 'min_child_samples': 23}
# -
class Model_Blending:
def __init__(self, data_frame, features_etl, features_ml):
data_frame = data_frame.copy()
self.ml_pipeline = ML_pipeline(data_frame=data_frame, features=features_etl, target='price')
self.features_ml = features_ml
def _xgboost_reg(self, xgb_params):
"""
# For GPU
model = XGBRegressor(
tree_method='gpu_hist',
gpu_id=0,
predictor='gpu_predictor',
n_jobs=-1,
**xgb_params
)
"""
# For CPU
model = XGBRegressor(**xgb_params)
return model
def _lightgbm_reg(self, lgb_params):
"""
# For GPU
model = LGBMRegressor(
device='gpu',
gpu_platform_id=0,
gpu_device_id=0,
n_jobs=-1,
metric='rmse',
**lgb_params
)
"""
# For CPUT
model = LGBMRegressor(**lgb_params)
return model
def blending(self, model: str, params: dict):
'''Model blending. Generate 5 predictions according to 5 folds.
Args:
model: One of xgboost or lightgbm.
params: Hyperparameters for XGBoost or LightGBM.
Returns:
None
'''
assert model in ['xgboost', 'lightgbm'], "ValueError: model must be one of ['xgboost', 'lightgbm']!"
final_valid_predictions = {}
scores = []
for fold in range(5):
X_train, X_valid, y_train, y_valid = self.ml_pipeline.getData(kfold=fold, target_encoding=True)
X_train, X_valid = X_train[self.features_ml], X_valid[self.features_ml] # Add many amenities
# Get X_valid_ids
X_valid_ids = list(X_valid.index)
print(f"Training ...")
# Define model
if model == 'xgboost':
reg = self._xgboost_reg(params)
elif model == 'lightgbm':
reg = self._lightgbm_reg(params)
# Modeling - Training
reg.fit(
X_train, y_train,
early_stopping_rounds=300,
eval_set=[(X_valid, y_valid)],
verbose=False
)
# Modeling - Inference
valid_preds = reg.predict(X_valid)
final_valid_predictions.update(dict(zip(X_valid_ids, valid_preds))) # loop 5 times with different valid id
rmse = mean_squared_error(y_valid, valid_preds, squared=False)
scores.append(rmse)
print(f'Fold: {fold}, RMSE: {rmse}')
# Export results
if not os.path.exists('output'):
os.mkdir('output')
final_valid_predictions = pd.DataFrame.from_dict(final_valid_predictions, orient="index").reset_index()
final_valid_predictions.columns = ["id", f"{model}_pred"]
final_valid_predictions.to_csv(f"output/{model}_valid_pred.csv", index=False)
print('-----------------------------------------------------------------')
print(f'Average RMSE: {np.mean(scores)}, STD of RMSE: {np.std(scores)}')
def predict(self, models: list):
df_valids = pd.read_csv(f'output/{models[0]}_valid_pred.csv')
models.remove(models[0])
for model in models:
df = pd.read_csv(f'output/{model}_valid_pred.csv')
df_valids = df_valids.set_index('id').join(df.set_index('id'), how='inner')
# Calculate the average predictions
df_valids['mean_valids'] = df_valids.mean(axis=1)
# Join listings price to df_valids
df_valids['price'] = listings.set_index('id')['price']
# Use the average predictions to validate the target
return mean_squared_error(df_valids.price, df_valids['mean_valids'], squared=False)
# +
features_etl = ['host_acceptance_rate', 'neighbourhood_group_cleansed', 'property_type', 'room_type',
'bathrooms', 'bedrooms', 'beds', 'bed_type', 'number_of_reviews', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value', 'reviews_per_month', 'host_response_rate', 'host_is_superhost',
'accommodates', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people', 'minimum_nights',
'maximum_nights', 'instant_bookable', 'cancellation_policy', 'require_guest_profile_picture',
'require_guest_phone_verification', 'amenities', 'demand', 'kfold']
features_ml = ml2
xgb_params = {'lambda': 0.029949323233957558, 'alpha': 0.47821306780284645, 'reg_lambda': 0.03007272817610808,
'reg_alpha': 5.7650942972599255e-05, 'colsample_bytree': 0.32733907049678806, 'subsample': 0.9397958925107069,
'learning_rate': 0.016087339011505105, 'n_estimators': 4117, 'max_depth': 6, 'random_state': 42, 'min_child_weight': 5}
lgb_params = {'random_state': 42, 'num_iterations': 5549, 'learning_rate': 0.07313607774375752, 'max_depth': 5, 'num_leaves': 75,
'min_data_in_leaf': 100, 'lambda_l1': 1.3379869858112054e-06, 'lambda_l2': 0.00025091437242776726,
'feature_fraction': 0.5910800704597817, 'bagging_fraction': 0.9553891294481797, 'bagging_freq': 6, 'min_child_samples': 23}
model_blending = Model_Blending(listings, features_etl, features_ml)
# -
model_blending.blending(model='xgboost', params=xgb_params)
model_blending.blending(model='lightgbm', params=lgb_params)
model_blending.predict(models=['xgboost', 'lightgbm'])
# ---
#
# ## Model Stacking
#
# > Reference: [Ensemble Learning: Stacking, Blending & Voting](https://towardsdatascience.com/ensemble-learning-stacking-blending-voting-b37737c4f483)<br>
# > Reference: [How To Use “Model Stacking” To Improve Machine Learning Predictions](https://medium.com/geekculture/how-to-use-model-stacking-to-improve-machine-learning-predictions-d113278612d4)
#
#
# Model Stacking is a way to improve model predictions by combining the outputs of multiple models and running them through another machine learning model called a meta-learner.
#
# After hyperparameter tuning, we have a beter hyperparameters for XGBoost and LightGBM. Then I performed a model blening for a better ML performance.
#
# My 200 Trails ($200 \times 5$ in total) hyperparameters:
#
# - XGBoost
#
# {'lambda': 0.029949323233957558, 'alpha': 0.47821306780284645, 'reg_lambda': 0.03007272817610808, 'reg_alpha': 5.7650942972599255e-05, 'colsample_bytree': 0.32733907049678806, 'subsample': 0.9397958925107069, 'learning_rate': 0.016087339011505105, 'n_estimators': 4117, 'max_depth': 6, 'random_state': 42, 'min_child_weight': 5}
#
# - LightGBM
#
# {'random_state': 42, 'num_iterations': 5549, 'learning_rate': 0.07313607774375752, 'max_depth': 5, 'num_leaves': 75, 'min_data_in_leaf': 100, 'lambda_l1': 1.3379869858112054e-06, 'lambda_l2': 0.00025091437242776726, 'feature_fraction': 0.5910800704597817, 'bagging_fraction': 0.9553891294481797, 'bagging_freq': 6, 'min_child_samples': 23}
class Model_Stacking:
def __init__(self, data_frame, features_etl, features_ml):
data_frame = data_frame.copy()
self.ml_pipeline = ML_pipeline(data_frame=data_frame, features=features_etl, target='price')
self.features_ml = features_ml
def _xgboost_reg(self, xgb_params):
"""
# For GPU
model = XGBRegressor(
tree_method='gpu_hist',
gpu_id=0,
predictor='gpu_predictor',
n_jobs=-1,
**xgb_params
)
"""
# For CPU
model = XGBRegressor(**xgb_params)
return model
def _lightgbm_reg(self, lgb_params):
"""
# For GPU
model = LGBMRegressor(
device='gpu',
gpu_platform_id=0,
gpu_device_id=0,
n_jobs=-1,
metric='rmse',
**lgb_params
)
"""
# For CPUT
model = LGBMRegressor(**lgb_params)
return model
def stacking(self, model: str, params: dict):
'''Model blending. Generate 5 predictions according to 5 folds.
Args:
model: One of xgboost or lightgbm.
params: Hyperparameters for XGBoost or LightGBM.
Returns:
None
'''
assert model in ['xgboost', 'lightgbm'], "ValueError: model must be one of ['xgboost', 'lightgbm']!"
final_valid_predictions = {}
scores = []
for fold in range(5):
X_train, X_valid, y_train, y_valid = self.ml_pipeline.getData(kfold=fold, target_encoding=True)
X_train, X_valid = X_train[self.features_ml], X_valid[self.features_ml] # Add many amenities
# Get X_valid_ids
X_valid_ids = list(X_valid.index)
print(f"Training ...")
# Define model
if model == 'xgboost':
reg = self._xgboost_reg(params)
elif model == 'lightgbm':
reg = self._lightgbm_reg(params)
# Modeling - Training
reg.fit(
X_train, y_train,
early_stopping_rounds=300,
eval_set=[(X_valid, y_valid)],
verbose=False
)
# Modeling - Inference
valid_preds = reg.predict(X_valid)
final_valid_predictions.update(dict(zip(X_valid_ids, valid_preds))) # loop 5 times with different valid id
rmse = mean_squared_error(y_valid, valid_preds, squared=False)
scores.append(rmse)
print(f'Fold: {fold}, RMSE: {rmse}')
# Export results
if not os.path.exists('output'):
os.mkdir('output')
final_valid_predictions = pd.DataFrame.from_dict(final_valid_predictions, orient="index").reset_index()
final_valid_predictions.columns = ["id", f"{model}_pred"]
final_valid_predictions.to_csv(f"output/{model}_valid_pred.csv", index=False)
print('-----------------------------------------------------------------')
print(f'Average RMSE: {np.mean(scores)}, STD of RMSE: {np.std(scores)}')
def predict(self, models: list):
df_valids = pd.read_csv(f'output/{models[0]}_valid_pred.csv')
models.remove(models[0])
for model in models:
df = pd.read_csv(f'output/{model}_valid_pred.csv')
df_valids = df_valids.set_index('id').join(df.set_index('id'), how='inner')
# Join listings price to df_valids
df_valids['price'] = listings.set_index('id')['price']
# Implement a simple regressor such as linear regression
linear_reg = LinearRegression()
# Define X, y
X, y = df_valids.iloc[:, :len(models)], df_valids.price
# Use the models validations as training set for predictions
scores = cross_val_score(linear_reg, X, y, cv=5, scoring='neg_root_mean_squared_error')
scores = -scores
return np.mean(scores)
# +
features_etl = ['host_acceptance_rate', 'neighbourhood_group_cleansed', 'property_type', 'room_type',
'bathrooms', 'bedrooms', 'beds', 'bed_type', 'number_of_reviews', 'review_scores_rating',
'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value', 'reviews_per_month', 'host_response_rate', 'host_is_superhost',
'accommodates', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people', 'minimum_nights',
'maximum_nights', 'instant_bookable', 'cancellation_policy', 'require_guest_profile_picture',
'require_guest_phone_verification', 'amenities', 'demand', 'kfold']
features_ml = ml2
xgb_params = {'lambda': 0.029949323233957558, 'alpha': 0.47821306780284645, 'reg_lambda': 0.03007272817610808,
'reg_alpha': 5.7650942972599255e-05, 'colsample_bytree': 0.32733907049678806, 'subsample': 0.9397958925107069,
'learning_rate': 0.016087339011505105, 'n_estimators': 4117, 'max_depth': 6, 'random_state': 42, 'min_child_weight': 5}
lgb_params = {'random_state': 42, 'num_iterations': 5549, 'learning_rate': 0.07313607774375752, 'max_depth': 5, 'num_leaves': 75,
'min_data_in_leaf': 100, 'lambda_l1': 1.3379869858112054e-06, 'lambda_l2': 0.00025091437242776726,
'feature_fraction': 0.5910800704597817, 'bagging_fraction': 0.9553891294481797, 'bagging_freq': 6, 'min_child_samples': 23}
model_stacking = Model_Stacking(listings, features_etl, features_ml)
# -
model_stacking.stacking('xgboost', xgb_params)
model_stacking.stacking('lightgbm', lgb_params)
model_stacking.predict(models=['xgboost', 'lightgbm'])
| Seattle-2021.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Jwsyra88s_4J" colab_type="code" colab={}
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
from datetime import datetime
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
import uuid
import os
class GoogleDriveDatabase:
def __init__(self, drive, DATABASE_GID:str):
assert isinstance(DATABASE_GID, str)
self.folders = {}
self.drive = drive
folder_list = drive.ListFile({'q': "'{}' in parents and trashed=false".format(DATABASE_GID)}).GetList()
for file in folder_list:
if file['mimeType'] == "application/vnd.google-apps.folder":
self.folders[file['title']] = file['id']
print("{} folders loaded".format(len(self.folders.keys())))
def upload(self, filename, character, fileType):
assert isinstance(filename, str)
assert isinstance(character, str)
assert isinstance(fileType, str)
FILETYPE_MIME_MAP = {
"jpeg" : "image/jpeg",
"json" : "application/json",
"zip" : "application/zip"
}
assert fileType in FILETYPE_MIME_MAP.keys(), "fileType must be one of the following: {}".format(fileType)
assert os.path.isfile(filename), "{} does not exist as a file".format(filename)
assert self.checkFolder(character), "{} is not a valid character. Pick from list: \n{}".format(character, tuple(self.folders.keys()))
file = self.drive.CreateFile({
"title" : os.path.split(filename)[1],
"mimeType" : FILETYPE_MIME_MAP[fileType],
"parents" : [{"id" : self.folders[character]}]
})
file.SetContentFile(filename)
file.Upload()
os.remove(filename)
print("uploaded and deleted {}".format(filename))
@staticmethod
def FILE_EXTENSIONS() -> list:
return [".jpg", ".jpeg", ".png", ".zip", ".json"]
def getFiles(self, character) -> list:
return [
x for x in self.drive.ListFile({'q': "'{}' in parents and trashed=false".format(self.folders[character])}).GetList()
if x['mimeType'] != "application/vnd.google-apps.folder"
]
def download_file_name(self, file_name):
if not any(extension in file_name for extension in GoogleDriveDatabase.FILE_EXTENSIONS()):
file_name += ".jpg"
return file_name
def download_file(self, file, folder:str, **kwargs) -> str:
check_already_exist = kwargs.get("check_local", False)
file_name = os.path.join(folder, file['title'])
file_name = self.download_file_name(file_name)
if check_already_exist:
local_files = [os.path.join(folder, file) for file in os.listdir(folder)]
if file_name in local_files:
return file_name
file.GetContentFile(file_name)
print("downloaded", file_name)
return file_name
def download(self, character:str,folder:str):
file_list = self.getFiles(character)
os.makedirs(folder, exist_ok=True)
returnlist = []
for file in file_list:
returnlist.append(self.download_file(file, folder, check_local=True))
return tuple(returnlist)
def checkFolder(self, name):
assert isinstance(name, str)
return name in self.folders.keys()
auth.authenticate_user() # Google auth stuff, make sure to sign in with your ucsb account
gauth = GoogleAuth() # Google auth stuff
gauth.credentials = GoogleCredentials.get_application_default() # Google auth stuff
drive = GoogleDrive(gauth) # Google auth stuff
# + id="W17gb1VDIqbK" colab_type="code" outputId="a0146f64-cb73-4793-c8ba-8a70f64497b3" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#Openpose block, Run this before making any calls to Openpose
import os
import json
from os.path import exists, join, basename, splitext
def zipOpenpose():
if not os.path.isfile("openpose"):
# !zip -r openpose.zip openpose
def unzipOpenpose():
# !unzip -o openpose.zip
# !clear
print("Openpose installed!")
def getOpenpose():
git_repo_url = 'https://github.com/CMU-Perceptual-Computing-Lab/openpose.git'
project_name = splitext(basename(git_repo_url))[0]
OPENPOSE_DATABASE = "1LE7a-eAJ5zwtmht4_ItT5bIzmnikNK5Q"
openpose_database = GoogleDriveDatabase(drive,OPENPOSE_DATABASE)
storage_reference = openpose_database.getFiles("Storage")
if not exists(project_name):
# see: https://github.com/CMU-Perceptual-Computing-Lab/openpose/issues/949
# install new CMake becaue of CUDA10
# !wget -q https://cmake.org/files/v3.13/cmake-3.13.0-Linux-x86_64.tar.gz
# !tar xfz cmake-3.13.0-Linux-x86_64.tar.gz --strip-components=1 -C /usr/local
# clone openpose
# !git clone -q --depth 1 $git_repo_url
# !sed -i 's/execute_process(COMMAND git checkout master WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\/3rdparty\/caffe)/execute_process(COMMAND git checkout f019d0dfe86f49d1140961f8c7dec22130c83154 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\/3rdparty\/caffe)/g' openpose/CMakeLists.txt
# install system dependencies
# !apt-get -qq install -y libatlas-base-dev libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler libgflags-dev libgoogle-glog-dev liblmdb-dev opencl-headers ocl-icd-opencl-dev libviennacl-dev
# build openpose
# !cd openpose && rm -rf build || true && mkdir build && cd build && cmake .. && make -j`nproc`
# if len(storage_reference) == 1:
# print("Will download already compiled openpose")
# downloaded = openpose_database.download_file(storage_reference[0], "")
# unzipOpenpose()
# return
# elif len(storage_reference) > 1:
# print("More than 1 storage reference found! Will delete them and reupload")
# [x.Delete() for x in storage_reference]
# # !cd openpose && rm -rf build || true && mkdir build && cd build && cmake .. && make -j`nproc`
# zipOpenpose()
# openpose_database.upload("openpose.zip", "Storage", "zip")
# else:
# if len(storage_reference) == 0:
# print("Will upload openpose")
# zipOpenpose()
# openpose_database.upload("openpose.zip", "Storage", "zip")
getOpenpose()
# + id="7h2ONljK_1CX" colab_type="code" outputId="85b4edca-ef58-441c-a9a1-035b99228d51" colab={"base_uri": "https://localhost:8080/", "height": 563}
import os
import json
import subprocess
def imageToJson(image_directory, json_directory):
os.makedirs(image_directory, exist_ok=True)
os.makedirs(json_directory, exist_ok=True)
# !cd openpose && ./build/examples/openpose/openpose.bin --image_dir {image_directory} --hand --write_json {json_directory} --display 0 --render_pose 0
#Run this on any json file generated by Openpose to get a list of the coordinates of the right hand
def getHandCoords(directory) -> list:
input_file = open (directory)
json_array = json.load(input_file)
if len(json_array['people'])!=0 and len(json_array['people'][0])!=0:
hand = json_array['people'][0]['hand_right_keypoints_2d']
xAx=hand[::3]
yAx=hand[1::3]
hand = []
for i in range(21):
hand.append(xAx[i])
hand.append(yAx[i])
return hand
hand = []
return hand
#Run this over the folder where the results from imageToJson were stored
#Ex. if you ran imageToJson over the folder A, then run folderRun over A_json
def folderRun(directory):
removed_count = 0
processed = []
for file in os.listdir(directory):
if file.split(".")[-1] == "json":
new_name = file.replace("_keypoints", "")
if file != new_name:
os.rename(os.path.join(directory, file), os.path.join(directory, new_name))
file = new_name
full_file_directory = os.path.join(directory, file)
hand = getHandCoords(full_file_directory)
# While we normally don't want to include files with bad or no data, it may be best to remove this type of data on the stage of download
# if (not hand) or all(not x for x in hand):
# removed_count += 1
# os.remove(full_file_directory)
# continue
# jsonStore = {"image":hand}
fileStore= open(full_file_directory,"w")
json.dump(hand,fileStore)
processed.append(full_file_directory)
# print("{} files did not contain any hands".format(removed_count))
print("{} files were processed".format(len(processed)))
return processed
def Openpose(input_directory, output_directory):
# !rm -rf {output_directory}
imageToJson(input_directory, output_directory)
return folderRun(output_directory)
DOWNLOAD_DATABASE = "1RqiJwO6i1KJx54tRC3QJcHWjiooCLJNC"
UPLOAD_DATABASE = "1kLMivfz2q7DFwa57ddy-rCJmsYFTWhMW"
download_database = GoogleDriveDatabase(drive, DOWNLOAD_DATABASE)
upload_database = GoogleDriveDatabase(drive,UPLOAD_DATABASE)
CHARACTERS = ("A", "B", "C", "D", "E", "F", "G", "H", "I", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y")
def download_characters(): # Only downloads characters
BASE_DIR = "images/"
results = {}
for c in CHARACTERS:
results[c] = download_database.download(c, os.path.join(BASE_DIR, c + "/"))
print("{} pictures downloaded for {}".format(len(results[c]), c))
return results
def DownloadCharacterAndUploadJson(character):
strip_name = lambda x: x.split("/")[-1].split(".")[0] # Function to remove extensions (i.e. .json .jpg) from filenames
character_jsons_original = upload_database.getFiles(character) # All .json files created for this character
character_jsons_names = [strip_name(x['title']) for x in character_jsons_original] # Name of all .json files already created for this character
picture_references = download_database.getFiles(character) # All current pictures for this character
picture_reference_names = [strip_name(x['title']) for x in picture_references] # Name of all pictures
already_analyzed_names = set() # Names of all already analyzed items
pictures_to_analyze = [] # Picture that yet do not have a json file
BASE_FOLDER = "images/" # Directory where all images are
character_folder = os.path.join(BASE_FOLDER, character + "/") # Folder in which the images for this character are in (i.e. images/A)
os.makedirs(character_folder, exist_ok=True) # Make the character folder if it doesn't exist
print("There are {} images for character {}".format(len(picture_references), character))
print("There are {} json files already made for character {}".format(len(character_jsons_names), character))
for json_original in character_jsons_original: # For every json file on the google drive...
if strip_name(json_original['title']) not in picture_reference_names: # ...if the json file is not in the list of pictures...
print("{} deleted because no image was found to relate to it".format(json_original['title']))
json_original.Delete() # ...then delete the file
for picture in picture_references: # For every picture on google drive...
if strip_name(picture['title']) in character_jsons_names: # ...if that picture already has a json uploaded...
already_analyzed_names.add(download_database.download_file_name(picture['title'])) # ...add its name to the list of already analyzed pictures
continue
else:
pictures_to_analyze.append(download_database.download_file(picture, character_folder, check_local=True)) # ...else download it
local_duplicates = [os.remove(os.path.join(character_folder, x)) for x in os.listdir(character_folder) if x in already_analyzed_names] # if a picture is in the already analyzed list and exists in the local directory, delete it so that openpose doesn't waste time running it
print("{} local duplicates deleted".format(len(local_duplicates)))
if len(os.listdir(character_folder)) == 0:
print("{} has no images to process".format(character))
return
BASE_IMAGES_DIR = "/content/images/"
BASE_JSON_DIR = "/content/json/"
json_created = Openpose(os.path.join(BASE_IMAGES_DIR, character), os.path.join(BASE_JSON_DIR, character)) # runs openpose on all images in a given path and outputs each image to an output path
for json_file in json_created: # For every local json file created...
upload_database.upload(json_file, character, "json") #...upload it to google drive
for c in CHARACTERS:
DownloadCharacterAndUploadJson(c)
# download_characters()
| scripts/Picture_Download_and_OpenPose.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recognising handwritten digits on MNIST dataset using KNN
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df=pd.read_csv('Data/mnist_train.csv')
print(df.shape)
print(df.columns)
df.head(n=5)
data=df.values
print(data.shape)
print(type(data))
x=data[:,1:]
y=data[:,0]
print(x.shape, y.shape)
# +
split=int(0.8*x.shape[0])
print(split)
x_train=x[:split,:]
y_train=y[:split]
x_test=x[split:,:]
y_test=y[split:]
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
# -
def drawimg(sample):
img=sample.reshape((28,28))
plt.imshow(img)
plt.show()
drawimg(x_train[88])
print(y_train[88])
# +
def dist(x1,x2):
return np.sqrt(sum((x1-x2)**2))
def knn(x,y,querypoint,k=5):
vals=[]
m=x.shape[0]
for i in range(m):
d=dist(querypoint,x[i])
vals.append((d,y[i]))
vals=sorted(vals)
vals=vals[:k]
vals=np.array(vals)
new_vals=np.unique(vals[:,1], return_counts=True)
index=new_vals[1].argmax()
pred=new_vals[0][index]
return pred
# -
pred=knn(x_train,y_train,x_test[999])
print(int(pred))
drawimg(x_test[999])
print(y_test[999])
| Recognising handwritten digits on MNIST dataset using KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import os
TRAIN_DIR = "/kaggle/input/jigsaw-toxic-comment-classification-challenge/train.csv.zip"
TEST_DIR = "/kaggle/input/jigsaw-toxic-comment-classification-challenge/test.csv.zip"
df = pd.read_csv(TRAIN_DIR)
df = df.drop("id", axis=1)
comments = df.comment_text.values
rating = df.loc[:, "toxic":].values
# -
print(len(rating[0]))
tokenizer = Tokenizer()
tokenizer.fit_on_texts(comments)
encoded_comments = tokenizer.texts_to_sequences(comments)
max_input_length = max(list(map(len, encoded_comments)))
input_data = np.array(pad_sequences(encoded_comments, maxlen=max_input_length, padding="post"))
num_words = len(tokenizer.word_index) + 1
# +
from keras import Input, Model
from keras.layers import Embedding, Dropout, Conv1D, GlobalMaxPooling1D, Dense
inputs = Input(shape=(None,), dtype="int64")
x = Embedding(num_words, 128)(inputs)
x = Dropout(0.5)(x)
x = Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x)
x = Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(128, activation="relu")(x)
x = Dropout(0.5)(x)
predictions = Dense(6, activation="sigmoid", name="predictions")(x)
model = Model(inputs, predictions)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# -
print(input_data.shape, rating.shape)
epochs = 3
model.fit(input_data, rating, epochs=epochs)
def str_to_tokens(sentence):
tokens = []
for word in sentence.lower().split():
try:
tokens.append(tokenizer.word_index[word])
except KeyError:
tokens.append(0)
return pad_sequences([tokens], maxlen=max_input_length, padding="post")
string = comments[0]
translated = str_to_tokens(string)
prediction = model.predict(translated)[0]
print(prediction)
np.argsort(prediction)[::-1]
model.save("model.h5")
import pickle
with open("tokenizer.pkl", "wb") as file:
pickle.dump(tokenizer, file, protocol=pickle.HIGHEST_PROTOCOL)
| model/Toxicity Detector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp test.export
# -
#hide
from nbdev.showdoc import *
# # Test nbdev export flags
#
# > Some functions are exported to different modules.
# ## Functions that belong in core module
#export core
def export_from_data_to_core():
"export to a different module"
pass
#exports core
def exports_from_data_to_core():
"exports to a different module"
pass
#exporti core
def exporti_from_data_to_core():
"internal export to a different module"
pass
# ## Functions that belong to this module
#expORT
from decision_tree.core import *
#EXPort
def export_no_module():
"""Doesn't really do anything."""
pass
#exporti
#Note: tiny bug in 03_export2html.ipynb means we can only use lower case exporti
def exporti_no_module():
"""Doesn't really do anything."""
pass
#exportS
def exports_no_module():
"""Doesn't really do anything."""
pass
#hide
from nbdev.export import *
notebook2script()
| 40_test_export.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import Libraries
# Dependencies include python time library, numpy, matplotlib, scipy, pandas and sklearn
# +
# import libraries
from time import time
import numpy as np
import matplotlib.pyplot as plt
import scipy
import pandas as pd
import sklearn
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.metrics import auc
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
from sklearn.model_selection import cross_val_score
# %matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
np.random.seed(1)
# -
# ## Help Functions for Classification
def read_subject_sleep(id):
###
# read csv files of subjects in and output sleeps, epochs and heartrate
###
sleepLabelled_fname = "../sleepLabelled/sleepLabelled_{}.csv".format(id)
sleep = pd.read_csv(sleepLabelled_fname)
return sleep
def read_all_subjects_sleep():
# define subject ids here to read all the files
studyIds = ["CRP{0:03d}".format(i+1) for i in range(60)]
studyIds.remove("CRP013")
all_sleeps = []
for s_id in studyIds:
sleep = read_subject_sleep(s_id)
all_sleeps.append(sleep)
return all_sleeps
def plot_roc_curve(clf, X_test, y_test):
ns_probs = [0 for _ in range(len(y_test))]
lr_probs = clf.predict_proba(X_test)
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:, 1]
# calculate scores
ns_auc = roc_auc_score(y_test, ns_probs)
lr_auc = roc_auc_score(y_test, lr_probs)
# calculate roc curves
ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs)
lr_fpr, lr_tpr, _ = roc_curve(y_test, lr_probs)
# plot the roc curve for the model
plt.plot(ns_fpr, ns_tpr, linestyle='--')
plt.plot(lr_fpr, lr_tpr, marker='.', label= str(type(clf).__name__)+' (area = {0:0.2f})'.format(lr_auc ) )
# axis labels
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic Curve', pad= 20.0, fontdict= {'fontsize':15})
# show the legend
plt.legend()
plt.show()
def plot_precision_recall_curve(clf, X_test, y_test):
ns_probs = [0 for _ in range(len(y_test))]
lr_probs = clf.predict_proba(X_test)
lr_precision, lr_recall, _ = precision_recall_curve(y_test, lr_probs)
lr_f1, lr_auc = f1_score(y_test, y_pred), auc(lr_recall, lr_precision)
# summarize scores
print('f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
# plot the precision-recall curves
no_skill = len(y_test[y_test==1]) / len(y_test)
plt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Effect')
pyplot.plot(lr_recall, lr_precision, marker='.', label= str(type(clf).__name__) + '(area = {0:0.2f})'.format(lr_auc ))
# axis labels
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall Curve', pad= 20.0, fontdict= {'fontsize':15})
# show the legend
plt.legend()
# show the plot
plt.show()
# ### Read all subject files
# Perform test and training split
# +
dir_sleep_labelled = "../sleepLabelled/"
dir_aggregate = "../aggregateData/"
# read in all subjects labelled sleep csv files
all_sleeps = read_all_subjects_sleep()
# concatenate all sleep periods
sleep_dataset = pd.concat(all_sleeps, ignore_index=True, sort=False)
# feature transform
colnames = list(sleep_dataset.columns.values)
y = sleep_dataset["sleepLabel"].copy()
X = sleep_dataset.drop(colnames[0:60]+['sleepLabel',
'steps',
'distanceInMeters',
'studyId',
'startDateTime'],axis = 1).copy()
X['HRstd'] = X['HRstd']**2
X = sklearn.preprocessing.normalize(X, axis = 0)
y[y<0]=0
y = y.to_numpy()
y=y.astype('int')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)
# -
# ### Trying models
#
from sklearn.linear_model import LogisticRegressionCV
lg = LogisticRegressionCV(cv=5, random_state=0).fit(X_train, y_train)
print("Training Accuracy")
print(lg.score(X_train, y_train))
y_pred = lg.predict(X_test)
print("Testing Accuracy")
print(lg.score(X_test, y_test))
print(classification_report(y_test, y_pred))
plot_roc_curve(lg, X_test, y_test)
# ridge regression
from sklearn.linear_model import RidgeClassifierCV
rgcv = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1, 10]).fit(X_train, y_train)
rgcv.score(X, y)
print("Training Accuracy")
print(rgcv.score(X_train, y_train))
y_pred = rgcv.predict(X_test)
print("Testing Accuracy")
print(rgcv.score(X_test, y_test))
print(classification_report(y_test, y_pred))
roc_auc_score(y_test, y_pred)
# random forest
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=6, random_state=0).fit(X,y)
rf.score(X, y)
print("Training Accuracy")
print(rf.score(X_train, y_train))
y_pred = rf.predict(X_test)
print("Testing Accuracy")
print(rf.score(X_test, y_test))
print(classification_report(y_test, y_pred))
scores = cross_val_score(rf, X, y, cv=5)
print("CV Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
def random_forest_nested_CV(clf, X_train, y_train):
# Number of random trials
NUM_TRIALS = 1
# Set up possible values of parameters to optimize over
p_grid = {"max_depth": [3, 5, 7],
"n_estimators": [10, 100]}
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=5, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=5, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=rfc, param_grid=p_grid, cv=inner_cv)
clf.fit(X_train, y_train)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_train, y=y_train, cv=outer_cv)
nested_scores[i] = nested_score.mean()
print("None nested scores:")
print(non_nested_scores)
print("Nested scores:")
print(nested_scores)
# adaboost
from sklearn.ensemble import AdaBoostClassifier
adab = AdaBoostClassifier(n_estimators=25, random_state=0).fit(X_train, y_train)
print("Training Accuracy")
print(adab.score(X_train, y_train))
y_pred = adab.predict(X_test)
print("Testing Accuracy")
print(adab.score(X_test, y_test))
print(classification_report(y_test, y_pred))
scores = cross_val_score(adab, X, y, cv=5)
print("CV Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
from sklearn import svm
svc = svm.SVC(kernel='rbf').fit(X_train, y_train)
print("Training Accuracy")
print(svc.score(X_train, y_train))
y_pred = svc.predict(X_test)
print("Testing Accuracy")
print(svc.score(X_test, y_test))
print(classification_report(y_test, y_pred))
scores = cross_val_score(svc, X, y, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# # Predict on all other epochs based on feature engineering
def read_all_aggregate():
studyIds = ["CRP{0:03d}".format(i+1) for i in range(60)]
studyIds.remove("CRP013")
all_aggregates= []
for s_id in studyIds:
a = pd.read_csv("../aggregateData/aggregate_fe_{}.csv".format(s_id))
all_aggregates.append(a)
return all_aggregates
def feature_transform(aggre):
colnames = list(aggre.columns.values)
X = aggre.copy().drop(colnames[0:60]+['sleepLabel', 'steps', 'distanceInMeters', 'studyId', 'startDateTime'],axis = 1)
X['HRstd'] = X['HRstd']**2
X = sklearn.preprocessing.normalize(X, axis = 0)
return X
def predict_sleep(clf, aggre):
aggre_fe = feature_transform(aggre)
y_pred = clf.predict(aggre_fe)
studyId = aggre['studyId'][0]
aggre['predictedSleep'] = y_pred
aggre.to_csv("../aggregateData/aggregate_fe_predlabel_{}.csv".format(studyId))
all_aggregates = read_all_aggregate()
for aggregate in all_aggregates:
predict_sleep(clf, aggregate)
| DigitalBiomarkers-Sleep/sleep_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Below is the training settings used for ImageWoof 5 epoch, 128px. You can kick off a run in the very next cell.
#There are some other sample runs showing how some of the settings were adjusted...if you find better settings please let us know!
# -
#To install maxblurpool from kornia
# !pip install git+https://github.com/kornia/kornia.git@maxblurpool-ceil
#Run this to reproduce the results of Dmytro modification: mxresnet + MaxPoolBlur2d:
# %run train.py --run 5 --woof 1 --size 128 --bs 64 --mixup 0 --epoch 5 --lr 4e-3 --gpu 1 --opt ranger --mom .95 --sched_type flat_and_anneal --ann_start 0.72
#Run this to reproduce the results of Dmytro modification: mxresnet + MaxPoolBlur2d:
# Imagenette
# %run train.py --run 5 --woof 0 --size 128 --bs 64 --mixup 0 --epoch 5 --lr 4e-3 --gpu 0 --opt ranger --mom .95 --sched_type flat_and_anneal --ann_start 0.72
| Ranger-Mish-ImageWoof-Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Experience
#
# This experience shows an approach used to perform Time-Series data analysis involving Machine Learning.
#
# Considerations:
# - Multiple features to be analyzed;
# - Possible correlation between features;
# - Unlabeled data;
# - (Un)supervised learning;
import pandas as pd
# The code bellow was extracted from a module, opentsdb client, presented in Graphy.
# This function retrieves data from OpenTSDB using the REST API defined by the database developers. To retrieve data, the metric name, start timestamp and end timestamp must be provided.
# + slideshow={"slide_type": "-"}
import requests
import json
opentsdb_address = 'http://127.0.0.1:4242/'
api_query = 'api/query'
def get_metrics(name: str, start_timestamp: int, end_timestamp: int) -> dict:
"""
Gets the metrics from OpenTSDB.
:param name: The name of the metrics.
:param start_timestamp: The start unix timestamp of the metric.
:param end_timestamp: The end unix timestamp of the metric.
:return: The metrics as a dictionary if success, None otherwise.
"""
json_body = {
"start": start_timestamp,
"end": end_timestamp,
"queries": [{"aggregator": "sum", "metric": name},
{"aggregator": "sum", "tsuids": ["000001000002000042", "000001000002000043"]}]
}
data = None
try:
response = requests.post(opentsdb_address + api_query, data=json.dumps(json_body),
headers={'content-type': 'application/json'})
if response.status_code == 200:
response_text = json.loads(response.text)
if response_text:
data = response_text[0].get('dps', None)
return data
except ConnectionError as ex:
logger.error('{}: {}'.format(type(ex), ex))
sys.exit(status=1)
# -
# # Data to be analyzed
#
# The data is gathered from a Time-Series database (OpenTSDB). In this case we get some metrics from this kind of database to perform the analysis.
#
# We are using the OUTER JOIN method to merge data from multiple features. This merge method preserves the data points and fills the missing values with NaN (Missing values).
# +
start_timestamp = 1530057600
end_timestamp = 1530316800
service_name = 'nova-api-cascading'
metric_name = 'huawei.call_count.{}'.format(service_name)
metric_name_2 = 'huawei.status_code.{}.2XX'.format(service_name)
metric_name_3 = 'huawei.status_code.{}.4XX'.format(service_name)
metric_name_4 = 'huawei.status_code.{}.5XX'.format(service_name)
metrics = get_metrics(metric_name, start_timestamp, end_timestamp)
metrics_2 = get_metrics(metric_name_2, start_timestamp, end_timestamp)
metrics_3 = get_metrics(metric_name_3, start_timestamp, end_timestamp)
metrics_4 = get_metrics(metric_name_4, start_timestamp, end_timestamp)
# print(len(metrics.items()))
# print(len(metrics_2.items()))
# print(len(metrics_3.items()))
# print(len(metrics_4.items()))
df_1 = pd.DataFrame(metrics.items(), columns=['time', 'call_count'])
df_2 = pd.DataFrame(metrics_2.items(), columns=['time', 'status_code_2xx'])
df_3 = pd.DataFrame(metrics_3.items(), columns=['time', 'status_code_4xx'])
df_4 = pd.DataFrame(metrics_4.items(), columns=['time', 'status_code_5xx'])
df = pd.merge(df_1, df_2, how='outer')
df = pd.merge(df, df_3, how='outer')
df = pd.merge(df, df_4, how='outer')
print('\nData info:\n{}'.format(df.info()))
print('\nData:\n{}'.format(df))
print('\nMissing values counting:\n{}'.format(df.isna().sum()))
# -
# # Plotting
#
# In this code block, for visualization purposes, we plot some data retrieved from the time-series database.
#
# As we can see, there are missing points in some charts. To perform a good analysis, this problem must be solved.
# +
from matplotlib import pyplot as plt
df.plot(x='time', y='call_count', figsize=(12,6))
plt.xlabel('Timestamps')
plt.ylabel('Call count')
plt.title('Time Series of Call Count')
df.plot(x='time', y='status_code_2xx', figsize=(12,6))
plt.xlabel('Timestamps')
plt.ylabel('status_code_2xx')
plt.title('Time Series of status_code_2xx')
df.plot(x='time', y='status_code_4xx', figsize=(12,6))
plt.xlabel('Timestamps')
plt.ylabel('status_code_4xx')
plt.title('Time Series of status_code_4xx')
df.plot(x='time', y='status_code_5xx', figsize=(12,6))
plt.xlabel('Timestamps')
plt.ylabel('status_code_5xx')
plt.title('Time Series of status_code_5xx')
# -
# # Missing Values -- Pain :'(
#
# Missing values are a pain in data analysis because we can not perform a good analysis of the data with missing values, and sometimes we do not know clearly what is the best option to fulfill the data. This values are represented by NaN in the dataframes. We can not use any clustering in dataframes with this kind of values.
#
# Approaches:
# - Remove rows with missing values (Degrades the overall data and may result in insufficient data);
# - Impute missing values (May be dangerous because it introduces "wrong" observations);
#
# For this example, and because we have a lot of missing values, the best option is to impute missing values, otherwise, if we remove the rows with the missing values it would cause insufficiency in the data which in turn results in inefficient training of the machine learning model.
#
# Now, there are several ways we can perform the imputation:
# - A constant value that has meaning within the domain, such as 0, distinct from all other values;
# - A value from another randomly selected record;
# - A mean, median or mode value for the column;
# - Linear interpolation;
# - A value estimated by another machine learning model.
#
# [How to handle missing data in Time-Series](https://towardsdatascience.com/how-to-handle-missing-data-8646b18db0d4)
#
# To know better the method to use to fulfill the missing values, we perform a decomposition of data to perceive how is the Trend, Seasonality and Residuals in our data.
#
# From the charts displayed bellow, we can notice that our data only represents trends. From the link provided above, through the path "Handling Missing Data --> Imputation --> Time-Series Problem --> Data with Trend and without Seasonality", we can select "Linear Interpolation" as the best method to fulfill our data.
df_copy = df.copy()
# +
from random import randrange
from pandas import Series
from matplotlib import pyplot
from statsmodels.tsa.seasonal import seasonal_decompose
result = seasonal_decompose(df['call_count'].dropna(), model='additive', freq=1)
result.plot()
pyplot.show()
result = seasonal_decompose(df['status_code_2xx'].dropna(), model='additive', freq=1)
result.plot()
pyplot.show()
result = seasonal_decompose(df['status_code_4xx'].dropna(), model='additive', freq=1)
result.plot()
pyplot.show()
result = seasonal_decompose(df['status_code_5xx'].dropna(), model='additive', freq=1)
result.plot()
pyplot.show()
# -
# # Missing values fulfill
#
# As explained before, with time-series data decomposed and showing only sines of Trend without Seasonality, the best option to fulfill data is to perform a linear interpolation on the data. From this, bellow is a piece of code that applies linear interpolation to the dataframe.
# +
df = df_copy.copy()
df = df.interpolate(method ='linear')
df_copy.plot(x='time', y='status_code_2xx', figsize=(12,6))
plt.xlabel('Timestamps')
plt.ylabel('status_code_2xx')
plt.title('Time Series of status_code_2xx (MISSING_VALUES)')
df.plot(x='time', y='status_code_2xx', figsize=(12,6))
plt.xlabel('Timestamps')
plt.ylabel('status_code_2xx')
plt.title('Time Series of status_code_2xx (COMPLETE)')
df.plot(x='time', y='status_code_4xx', figsize=(12,6))
plt.xlabel('Timestamps')
plt.ylabel('status_code_4xx')
plt.title('Time Series of status_code_4xx (COMPLETE)')
df.plot(x='time', y='status_code_5xx', figsize=(12,6))
plt.xlabel('Timestamps')
plt.ylabel('status_code_5xx')
plt.title('Time Series of status_code_5xx (COMPLETE)')
# +
import numpy as np
df['status'] = np.where(df['status_code_2xx'] > df['status_code_4xx'] + df['status_code_5xx'], 1, 0)
df['status'].value_counts()
# +
from sklearn.cluster import KMeans
data = df[['call_count', 'status']]
n_cluster = range(1, 10)
kmeans = [KMeans(n_clusters=i).fit(data) for i in n_cluster]
scores = [kmeans[i].score(data) for i in range(len(kmeans))]
fig, ax = plt.subplots(figsize=(10,6))
ax.plot(n_cluster, scores)
plt.xlabel('Number of Clusters')
plt.ylabel('Score')
plt.title('Elbow Curve')
plt.show();
# +
from sklearn.cluster import KMeans
data = df[['call_count', 'status_code_2xx', 'status_code_4xx', 'status_code_5xx']]
n_cluster = range(1, 10)
kmeans = [KMeans(n_clusters=i).fit(data) for i in n_cluster]
scores = [kmeans[i].score(data) for i in range(len(kmeans))]
fig, ax = plt.subplots(figsize=(10,6))
ax.plot(n_cluster, scores)
plt.xlabel('Number of Clusters')
plt.ylabel('Score')
plt.title('Elbow Curve')
plt.show();
# +
df.info()
X = df[['call_count', 'status_code_2xx', 'status_code_4xx', 'status_code_5xx']]
X = X.reset_index(drop=True)
km = KMeans(n_clusters=3.5)
km.fit(X)
km.predict(X)
labels = km.labels_
#Plotting
fig = plt.figure(1, figsize=(7,7))
ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)
ax.scatter(X.iloc[:,0], X.iloc[:,1], X.iloc[:,2],
c=labels.astype(np.float), edgecolor="k")
ax.set_xlabel("price_usd")
ax.set_ylabel("srch_booking_window")
ax.set_zlabel("srch_saturday_night_bool")
plt.title("K Means", fontsize=14);
# +
from sklearn.preprocessing import StandardScaler
data = df[['call_count', 'status']]
X = data.values
X_std = StandardScaler().fit_transform(X)
mean_vec = np.mean(X_std, axis=0)
cov_mat = np.cov(X_std.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
eig_pairs = [ (np.abs(eig_vals[i]),eig_vecs[:,i]) for i in range(len(eig_vals))]
eig_pairs.sort(key = lambda x: x[0], reverse= True)
tot = sum(eig_vals)
var_exp = [(i/tot)*100 for i in sorted(eig_vals, reverse=True)] # Individual explained variance
cum_var_exp = np.cumsum(var_exp) # Cumulative explained variance
plt.figure(figsize=(10, 5))
plt.bar(range(len(var_exp)), var_exp, alpha=0.3, align='center', label='individual explained variance', color = 'g')
plt.step(range(len(cum_var_exp)), cum_var_exp, where='mid',label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.show();
# +
from sklearn.preprocessing import StandardScaler
data = df[['call_count', 'status_code_2xx', 'status_code_4xx', 'status_code_5xx']]
X = data.values
X_std = StandardScaler().fit_transform(X)
mean_vec = np.mean(X_std, axis=0)
cov_mat = np.cov(X_std.T)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
eig_pairs = [ (np.abs(eig_vals[i]),eig_vecs[:,i]) for i in range(len(eig_vals))]
eig_pairs.sort(key = lambda x: x[0], reverse= True)
tot = sum(eig_vals)
var_exp = [(i/tot)*100 for i in sorted(eig_vals, reverse=True)] # Individual explained variance
cum_var_exp = np.cumsum(var_exp) # Cumulative explained variance
plt.figure(figsize=(10, 5))
plt.bar(range(len(var_exp)), var_exp, alpha=0.3, align='center', label='individual explained variance', color = 'g')
plt.step(range(len(cum_var_exp)), cum_var_exp, where='mid',label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.show();
# +
def getDistanceByPoint(data, model):
distance = pd.Series()
for i in range(0,len(data)):
Xa = np.array(data.loc[i])
Xb = model.cluster_centers_[model.labels_[i]-1]
distance.set_value(i, np.linalg.norm(Xa-Xb))
return distance
outliers_fraction = 0.01
# get the distance between each point and its nearest centroid. The biggest distances are considered as anomaly
distance = getDistanceByPoint(data, kmeans[8])
number_of_outliers = int(outliers_fraction*len(distance))
threshold = distance.nlargest(number_of_outliers).min()
# anomaly1 contain the anomaly result of the above method Cluster (0:normal, 1:anomaly)
df['anomaly1'] = (distance >= threshold).astype(int)
# visualisation of anomaly with cluster view
fig, ax = plt.subplots(figsize=(10,6))
colors = {0:'blue', 1:'red'}
ax.scatter(df['call_count'], df['status'], c=df["anomaly1"].apply(lambda x: colors[x]))
plt.xlabel('principal feature1')
plt.ylabel('principal feature2')
plt.show();
# +
df = df.sort_values('time')
fig, ax = plt.subplots(figsize=(10,6))
a = df.loc[df['anomaly1'] == 1, ['time', 'call_count']] #anomaly
ax.plot(df['time'], df['call_count'], color='blue', label='Normal')
ax.scatter(a['time'],a['call_count'], color='red', label='Anomaly')
plt.xlabel('Timestamp')
plt.ylabel('Call count')
plt.legend()
plt.show();
fig, ax = plt.subplots(figsize=(10,6))
a = df.loc[df['anomaly1'] == 1, ['time', 'status']] #anomaly
ax.plot(df['time'], df['status'], color='blue', label='Normal')
ax.scatter(a['time'],a['status'], color='red', label='Anomaly')
plt.xlabel('Timestamp')
plt.ylabel('Status')
plt.legend()
plt.show();
| notebooks/Data Analysis [nova-api-cascading] Call Count -- Status Codes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Requirements Documentation and Notes
#
# # SQL Samples
#
#
# 2. Total monthly commits
# ```sql
# SELECT
# date_trunc( 'month', commits.cmt_author_timestamp AT TIME ZONE'America/Chicago' ) AS DATE,
# repo_name,
# rg_name,
# cmt_author_name,
# cmt_author_email,
# COUNT ( cmt_author_email ) AS author_count
# FROM
# commits,
# repo,
# repo_groups
# WHERE
# commits.repo_id = repo.repo_id
# AND repo.repo_group_id = repo_groups.repo_group_id
# AND commits.cmt_author_timestamp AT TIME ZONE'America/Chicago' BETWEEN '2019-11-01'
# AND '2019-11-30'
# GROUP BY
# DATE,
# repo_name,
# rg_name,
# cmt_author_name,
# cmt_author_email
# ORDER BY
# DATE,
# cmt_author_name,
# cmt_author_email;
#
# ```
#
# ### Metrics: Lines of Code and Commit Summaries by Week, Month and Year
# There are six summary tables :
# 1. dm_repo_annual
# 2. dm_repo_monthly
# 3. dm_repo_weekly
# 4. dm_repo_group_annual
# 5. dm_repo_group_monthly
# 6. dm_repo_group_weekly
#
# ```sql
# SELECT
# repo.repo_id,
# repo.repo_name,
# repo_groups.rg_name,
# dm_repo_annual.YEAR,
# SUM ( dm_repo_annual.added ) AS lines_added,
# SUM ( dm_repo_annual.whitespace ) AS whitespace_added,
# SUM ( dm_repo_annual.removed ) AS lines_removed,
# SUM ( dm_repo_annual.files ) AS files,
# SUM ( dm_repo_annual.patches ) AS commits
# FROM
# dm_repo_annual,
# repo,
# repo_groups
# WHERE
# dm_repo_annual.repo_id = repo.repo_id
# AND repo.repo_group_id = repo_groups.repo_group_id
# GROUP BY
# repo.repo_id,
# repo.repo_name,
# repo_groups.rg_name,
# YEAR
# ORDER BY
# YEAR,
# rg_name,
# repo_name
#
# ```
#
#
# ### Metrics: Value / Labor / Lines of Code (Total, NOT Commits)
# 1. Total lines in a repository by language and line type. This is like software as an asset. Its lines of code, at a point in time, ++
# ```sql
# SELECT
# repo.repo_id,
# repo.repo_name,
# programming_language,
# SUM ( total_lines ) AS repo_total_lines,
# SUM ( code_lines ) AS repo_code_lines,
# SUM ( comment_lines ) AS repo_comment_lines,
# SUM ( blank_lines ) AS repo_blank_lines,
# AVG ( code_complexity ) AS repo_lang_avg_code_complexity
# FROM
# repo_labor,
# repo,
# repo_groups
# WHERE
# repo.repo_group_id = repo_groups.repo_group_id
# and
# repo.repo_id = repo_labor.repo_id
# GROUP BY
# repo.repo_id,
# programming_language
# ORDER BY
# repo_id
#
#
# --
#
# ```
#
# #### Estimated Labor Hours by Repository
# ```sql
# SELECT C
# .repo_id,
# C.repo_name,
# SUM ( estimated_labor_hours )
# FROM
# (
# SELECT A
# .repo_id,
# b.repo_name,
# programming_language,
# SUM ( total_lines ) AS repo_total_lines,
# SUM ( code_lines ) AS repo_code_lines,
# SUM ( comment_lines ) AS repo_comment_lines,
# SUM ( blank_lines ) AS repo_blank_lines,
# AVG ( code_complexity ) AS repo_lang_avg_code_complexity,
# AVG ( code_complexity ) * SUM ( code_lines ) + 20 AS estimated_labor_hours
# FROM
# repo_labor A,
# repo b
# WHERE
# A.repo_id = b.repo_id
# GROUP BY
# A.repo_id,
# programming_language,
# repo_name
# ORDER BY
# repo_name,
# A.repo_id,
# programming_language
# ) C
# GROUP BY
# repo_id,
# repo_name;
# ```
#
# #### Estimated Labor Hours by Language
# ```sql
# SELECT C
# .repo_id,
# C.repo_name,
# programming_language,
# SUM ( estimated_labor_hours )
# FROM
# (
# SELECT A
# .repo_id,
# b.repo_name,
# programming_language,
# SUM ( total_lines ) AS repo_total_lines,
# SUM ( code_lines ) AS repo_code_lines,
# SUM ( comment_lines ) AS repo_comment_lines,
# SUM ( blank_lines ) AS repo_blank_lines,
# AVG ( code_complexity ) AS repo_lang_avg_code_complexity,
# AVG ( code_complexity ) * SUM ( code_lines ) + 20 AS estimated_labor_hours
# FROM
# repo_labor A,
# repo b
# WHERE
# A.repo_id = b.repo_id
# GROUP BY
# A.repo_id,
# programming_language,
# repo_name
# ORDER BY
# repo_name,
# A.repo_id,
# programming_language
# ) C
# GROUP BY
# repo_id,
# repo_name,
# programming_language
# ORDER BY
# programming_language;
# ```
#
#
#
# ## Issues
# ### Issue Collection Status
# 1. Currently 100% Complete
# ```sql
# SELECT a.repo_id, a.repo_name, a.repo_git,
# b.issues_count,
# d.repo_id AS issue_repo_id,
# e.last_collected,
# COUNT ( * ) AS issues_collected_count,
# (
# b.issues_count - COUNT ( * )) AS issues_missing,
# ABS (
# CAST (( COUNT ( * )) AS DOUBLE PRECISION ) / CAST ( b.issues_count AS DOUBLE PRECISION )) AS ratio_abs,
# (
# CAST (( COUNT ( * )) AS DOUBLE PRECISION ) / CAST ( b.issues_count AS DOUBLE PRECISION )) AS ratio_issues
# FROM
# augur_data.repo a,
# augur_data.issues d,
# augur_data.repo_info b,
# ( SELECT repo_id, MAX ( data_collection_date ) AS last_collected FROM augur_data.repo_info GROUP BY repo_id ORDER BY repo_id ) e
# WHERE
# a.repo_id = b.repo_id
# AND a.repo_id = d.repo_id
# AND b.repo_id = d.repo_id
# AND e.repo_id = a.repo_id
# AND b.data_collection_date = e.last_collected
# AND d.pull_request_id IS NULL
# GROUP BY
# a.repo_id,
# d.repo_id,
# b.issues_count,
# e.last_collected,
# a.repo_git
# ORDER BY
# repo_name, repo_id, ratio_abs;
# ```
#
# ### Repositories with GitHub Issue Tracking
# ```sql
#
# select repo_id, count(*) from repo_info where issues_count > 0
# group by repo_id;
# ```
| code/pivotal-samples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import altair as alt
import anthro.io
data = pd.read_csv('../processed/FAOSTAT_pesticide.csv')
data['year'] = pd.to_datetime(data['year'], format='%Y')
for g, d in data.groupby(['item']):
chart = alt.Chart(d).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y(field='tonnes', type='quantitative', title=f'{g} [tonnes]'),
tooltip=[alt.Tooltip(field='year', type='temporal', title='year', format='%Y'),
alt.Tooltip(field='tonnes', type='nominal', title='tonnes')]
).properties(width="container", height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(l, p)
layer.save(f'{g}.json')
| data/flora_fauna/FAOSTAT_pesticide/viz/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ancka019/ComputationsMethods6sem/blob/main/method8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="vbciPKEo8Gzv"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import eval_jacobi
from scipy.misc import derivative
from scipy.integrate import quad
from numpy.linalg import solve, det
from math import exp, sin, log
from copy import copy
# + id="GtLpkU8a8LbB"
def p_n_k(n,k): #многочлен Якоби
return lambda t: (1-t**2)*eval_jacobi(n,k,k,t)
def p_n_k_first_der(n,k): #первая производная многочлена Якоби
return lambda t: derivative(p_n_k(n,k),t)
def p_n_k_second_der(n,k): #вторая производная
return lambda t: derivative(p_n_k_first_der(n,k),t)
# + [markdown] id="gL_jAyOZ8Wqb"
# функции A_i из метода Галеркина
# + id="rq1ce8gD8R5I"
def A_i(funs,phi,dphi,ddphi,i):
k,p,q,f = funs
return lambda x: k(x)*ddphi[i](x)+p(x)*dphi[i](x)+q(x)*phi[i](x)
# + [markdown] id="rouHNIki8d4s"
# #метод Галеркина
#
# + id="H5d1ccC68aNU"
def galerkin_method(segment,funs,N):
a,b = segment
k,p,q,f = funs
phi = [p_n_k(i,1) for i in range(N)]
dphi = [p_n_k_first_der(i,1) for i in range(N)]
ddphi = [p_n_k_second_der(i,1) for i in range(N)]
A = np.array([A_i(funs,phi,dphi,ddphi,i) for i in range(N)])
C = np.array([quad(lambda t: f(t)*phi[i](t),a,b)[0] for i in range(N)])
B = np.zeros([N,N])
for i in range(N):
for j in range(N):
B[i,j] = quad(lambda t: phi[i](t)*A[j](t),a,b)[0]
alpha = solve(B,C)
return lambda t: sum([alpha[i]*phi[i](t) for i in range(N)])
# + [markdown] id="b1sNjdXU8j99"
# из методички <NAME>:
# + id="KUeQ-1fX8fXv"
funs = [[lambda x: -(4-x)/(5-2*x),
lambda x: (1-x)/2,
lambda x: 0.5*log(x+3),
lambda x: 1+x/3],
[lambda x: (x-2)/(x+2),
lambda x: x,
lambda x: 1-sin(x),
lambda x: x**2],
[lambda x: -(7-x)/(8+3*x),
lambda x: (1+x/3),
lambda x: (1-exp(x/2)/2),
lambda x: 1/2-x/3]]
segment = [[-1,1],[-1,1],[-1,1]]
# + colab={"base_uri": "https://localhost:8080/", "height": 879} id="p2hhIFMl8o40" outputId="e7c51daa-c5fc-43ae-a659-15c844844ec6"
fig, axes = plt.subplots(3, 2, figsize=(20, 15))
for i in range(3):
for j in range(2):
if j == 0:
N, h = 4, 0.05
else:
N, h = 9, 0.01
u = galerkin_method(segment[i],funs[i],N)
a,b = segment[i]
n = round((b - a) / h)
x1 = np.zeros(n + 1)
y = np.zeros(n + 1)
for t in range(n + 1):
x1[t] = a + t* h
y[t] = u(x1[t])
axes[i,j].plot(x1, y, marker='.', color='blue', mec='black', ms=10)
axes[i,j].set_title("Задача {}, N = {}".format(i+1,N-1))
| method8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # [모듈 3.5] 엔드포인트 배포 스텝 개발 (SageMaker Model Building Pipeline 배포 스텝)
#
# 이 노트북은 아래와 같은 목차로 진행 됩니다. 전체를 모두 실행시에 완료 시간은 약 10분 소요 됩니다.
#
# - 1. 모델 엔드포인트 배포 개요
# - 2. 기본 라이브러리 로딩
# - 3. 모델 빌딩 파이프라인 의 스텝(Step) 생성
# - 4. 파리마터, 단계, 조건을 조합하여 최종 파이프라인 정의 및 실행
# - 5. 세이지 메이커 스튜디오에서 확인하기
# - 6. 앤드포인트 추론 테스트
# - 7. 엔드포인트 삭제
#
# # 1. 모델 엔드포인트 배포 개요
# ---
# ## 1.1. SageMaker 호스팅 아키텍쳐
# - 일반적인 아키텍쳐의 그림이고, 오토 스케일링이 적용이 되어 있습니다.
#
# 
# ## 1.2 프로세싱 스텝
# - 엔드포인트 배포는 "deploy_model.py" 의 스크립트를 실행하여 배포를 합니다. 그래서 여기서는 "프로세싱 스텝" 을 사용합니다.
# - 프로세싱 단계의 개발자 가이드
# - [프로세싱 스텝](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/build-and-manage-steps.html#step-type-processing)
#
#
# # 2. 기본 라이브러리 로딩
#
# 세이지 메이커 관련 라이브러리를 로딩 합니다.
# +
import boto3
import sagemaker
import pandas as pd
region = boto3.Session().region_name
role = sagemaker.get_execution_role()
sagemaker_session = sagemaker.session.Session()
# %store -r
# %store
# -
# # 3. 모델 빌딩 파이프라인 의 스텝(Step) 생성
#
# ## 3.1 모델 빌딩 파이프라인 변수 생성
#
# +
from sagemaker.workflow.parameters import (
ParameterInteger,
ParameterString,
)
processing_instance_count = ParameterInteger(
name="ProcessingInstanceCount",
default_value=1
)
processing_instance_type = ParameterString(
name="ProcessingInstanceType",
default_value="ml.m5.xlarge"
)
endpoint_instance_type = ParameterString(
name="EndpointInstanceType",
default_value="ml.m5.xlarge"
)
# -
# ## 3.2 배포에 사용할 스크립트 코드 S3 업로딩
#
# +
from datetime import datetime
suffix = datetime.now().microsecond
print("suffix: ", suffix)
local_deploy_code_path = 'src/deploy_model.py'
s3_deploy_code_path = f"s3://{bucket}/{project_prefix}/code"
s3_deploy_code_uri = sagemaker.s3.S3Uploader.upload(
local_path=local_deploy_code_path,
desired_s3_uri=s3_deploy_code_path,
)
print("s3_deploy_code_uri: \n", s3_deploy_code_uri)
pipeline_endpoint_name = 'pipeline-endpoint-' + str(suffix)
# -
# ## 3.3. 배포에 사용할 프로세서 정의
# +
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.workflow.steps import ProcessingStep
deploy_model_processor = SKLearnProcessor(
framework_version='0.23-1',
role= role,
instance_type= processing_instance_type,
instance_count= processing_instance_count,
base_job_name='fraud-scratch-deploy-model',
sagemaker_session=sagemaker_session)
# -
# ## 3.4 모델 엔트포인트 생성 스탭 생성
#
deploy_step = ProcessingStep(
name='Fraud-Basic-Endpoint',
processor=deploy_model_processor,
job_arguments=[
"--model_name", sagemaker_model,
"--region", region,
"--endpoint_instance_type", endpoint_instance_type,
"--endpoint_name", pipeline_endpoint_name
],
code=s3_deploy_code_uri)
# # 4. 파리마터, 단계, 조건을 조합하여 최종 파이프라인 정의 및 실행
#
#
# ## 4.1 파이프라인 정의
#
# [중요] `pipeline_endpoint_name` 에 '_' 언데바를 넣으면 에러가 납니다. '-' 대시는 가능합니다.
# +
from sagemaker.workflow.pipeline import Pipeline
pipeline_name = project_prefix
pipeline = Pipeline(
name=pipeline_name,
parameters=[
processing_instance_type,
processing_instance_count,
endpoint_instance_type
],
steps=[deploy_step],
)
# -
# ## 4.2 파이프라인 정의 확인
# 위에서 정의한 파이프라인 정의는 Json 형식으로 정의 되어 있습니다.
# +
import json
definition = json.loads(pipeline.definition())
definition
# -
# ## 4.3 파이프라인 정의를 제출하고 실행하기
#
# 파이프라인 정의를 파이프라인 서비스에 제출합니다. 함께 전달되는 역할(role)을 이용하여 AWS에서 파이프라인을 생성하고 작업의 각 단계를 실행할 것입니다.
pipeline.upsert(role_arn=role)
execution = pipeline.start()
execution.describe()
# ## 4.4 파이프라인 실행 기다리기
execution.wait()
# ## 4.5 파이프라인 실행 단계 기록 보기
execution.list_steps()
# # 5. 세이지 메이커 스튜디오에서 확인하기
# - 아래의 그림 처럼 SageMaker Studio에 로긴후에 따라하시면, SageMaker Studio 에서도 실행 내역을 확인할 수 있습니다.
# - SageMaker Studio 개발자 가이드 --> [SageMaker Studio](https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/studio.html)
#
#
# 
# # 6. 앤드포인트 추론 테스트
# ### 추론에 사용할 Payload 생성 및 추론
# - 테스트 데이터를 통해서 엔드포인트에 전달할 CSV 형태의 String을 생성합니다. (payload).
# - payload 를 엔드포인트에 제공하면, 확률값을 0.072 을 리턴합니다.
# - 보통 확률값이 0.5 보다 작으면 0 (Non-Fruad), 0.5 보다 크면 1 (Fruad) 로 변환해서 예측값으로 사용합니다.
#
# ```
# payload:
# 6038.102399076349,15838.10239907635,39,64,0,1,750,2750,95660,2012,1,0,0,9800,1,9,2,16,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,1,0
# Reponse:
# [['0.0726071447134018']]
#
#
# ```
# ## 6.1. 테스트 데이터 준비
# +
import boto3
sagemaker_boto_client = boto3.client('sagemaker')
from src.p_utils import get_predictor, get_payload, predict
from sagemaker.deserializers import CSVDeserializer
csv_deserializer = CSVDeserializer(accept='text/csv') # 디폴트가 accept가 'text/csv' 이지만 직관적 이유로 기술함.
# -
test_preproc_path = f"{test_preproc_dir_artifact}/test.csv"
test_df = pd.read_csv(test_preproc_path)
test_df.head(1)
# ## 6.2. 엔드포인트에 입력이 될 데이터 (payload) 생성
# +
# test_df 의 상위 1개 레코드를 사용
payload = get_payload(test_df, label_col = 'fraud', verbose=False)
print("payload: \n", payload)
# -
# ## 6.3. 추론 하여 예측 확률 얻기
# +
predictor = get_predictor(pipeline_endpoint_name, sagemaker_session, csv_deserializer)
pred_prob = predict(predictor, payload)
print("prediction_prob: \n", pred_prob)
# -
# # 7. 엔드포인트 삭제
#
# 파이프라인 실행을 통하여 생성된 앤드포인트 및 관련 오브젝트 (세이지 메이커 모델, 엔드포인트 컨피그) 를 삭제 합니다.
# +
from src.p_utils import delete_endpoint
delete_endpoint(sagemaker_boto_client, pipeline_endpoint_name, is_del_model = True )
# -
| sagemaker/sm-special-webinar/lab_3_pipeline/3.5.Deploy-Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Brownian Motion
# +
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
import mpl_toolkits.mplot3d.axes3d as p3
fig = plt.figure()
ax = p3.Axes3D(fig)
N = 1000 # Number of points
T = 1.0
dt = T/(N-1)
dX = np.sqrt(dt) * np.random.randn(1, N)
X = np.cumsum(dX) # Cumulatively add these values to get the x positions of the particle
dY = np.sqrt(dt) * np.random.randn(1, N)
Y = np.cumsum(dY) # Cumulatively add these values to get the y positions of the particle
dZ = np.sqrt(dt) * np.random.randn(1, N)
Z = np.cumsum(dZ) # Cumulatively add these values to get the z positions of the particle
line, = ax.plot(X, Y, Z, color='blue', lw=1)
def animate(i):
'''
This function is iterated over by FuncAnimation to produce the frames necessary to produce the animation
Parameters:
-----------
i : int
This is the index used to iterate through the coordinate arrays
Returns:
--------
line : mpl_toolkits.mplot3d.art3d.Line3D
This contains the line information which is updated each time the function is iterated over
'''
line.set_data(X[:i], Y[:i])
line.set_3d_properties(Z[:i])
return line
anim = FuncAnimation(fig, animate, interval=10, frames=N, repeat=False)
HTML(anim.to_html5_video()) # This is just for the Jupyter notebook which will not show the animation when using plt.show()
# if running locally, uncomment these two lines and remove the HTML command above.
#plt.show()
| _posts/BrownianMotionCode/BrownianMotion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2D and 3D Face Alignment Network
# **2D FAN:**
#
# 
#
# **3D FAN:**
# 
#
# Here we'll show you a facial landmark detection example using [FAN (Face Alignment Network)](https://arxiv.org/pdf/1703.07332.pdf).
# # Preparation
#
# First, we need to have [nnabla-examples repository](https://github.com/sony/nnabla-examples) and install nnabla. The following cell does both. Also, when you're running on Colab, make sure that your Runtime setting is set as GPU, which can be set up from the top menu (Runtime → change runtime type), and make sure to click **Connect** on the top right-hand side of the screen before you start.
# !pip install nnabla-ext-cuda100
# !git clone https://github.com/sony/nnabla-examples.git
# %run nnabla-examples/interactive-demos/colab_utils.py
# %cd nnabla-examples/facial-keypoint-detection/face-alignment
# Then, we need to download the pretrained weights and face detection model.
# +
# get 2D FAN pretrained weights.
# !wget https://nnabla.org/pretrained-models/nnabla-examples/face-alignment/2DFAN4_NNabla_model.h5
# to run the 3D facial landmark detection, we need to have additional weights as well.
# !wget https://nnabla.org/pretrained-models/nnabla-examples/face-alignment/3DFAN4_NNabla_model.h5
# !wget https://nnabla.org/pretrained-models/nnabla-examples/face-alignment/Resnet_Depth_NNabla_model.h5
# get dlib's face detection model.
# !wget http://dlib.net/files/mmod_human_face_detector.dat.bz2
# -
# # Upload an image
#
# Run the below cell to upload an image to use FAN. Make sure to select **just 1 image** (if you upload multiple images, all the images but the last one will be ignored) and that image must contain at least one face.
# +
from google.colab import files
img = files.upload()
# -
# For convenience, rename the image file.
# +
import os
ext = os.path.splitext(list(img.keys())[-1])[-1]
os.rename(list(img.keys())[-1], "input_image{}".format(ext))
input_img = "input_image" + ext
# -
# # Run 2D FAN & Visualize the result
#
# Now that we have the image to use, let's run 2D FAN and see the result. The following cell executes FAN network and generates the output image named "output_image.png".
# +
# !python model_inference.py --model 2DFAN4_NNabla_model.h5 --test-image $input_img --output output_image.png
from IPython.display import Image,display
display(Image('output_image.png'))
# -
# # Run 3D FAN & Visualize the result
#
# Next, let's see what happens if we use 3D landmark detection. The following cell executes 3D FAN and generate the output image named "output_image_3D.png".
# +
# !python model_inference.py --landmarks-type-3D --model 3DFAN4_NNabla_model.h5 --resnet-depth-model Resnet_Depth_NNabla_model.h5 --test-image $input_img --output output_image_3D.png
display(Image('output_image_3D.png'))
# -
# # Visualize the 3D FAN result with 3D plot
#
# The image above shows the keypoints, but since it is rendered on 2D image it's hard to tell the difference from the 2D result. So next we use mplot3d to see how the detected keypoints are represented in the 3D space. Note that the script below is a partially modified version of model_inference.py [as of ver 1.9.0](https://github.com/sony/nnabla-examples/blob/release/v1.9.0-branch/facial-keypoint-detection/face-alignment/model_inference.py), so there might be some difference in the later version.
#
# + cellView="form"
#@title First let's start by importing dependencies. (double-click to see the codes)
import cv2
import dlib
import nnabla as nn
import nnabla.functions as F
from skimage import io, color
from model import fan, resnet_depth
from external_utils import *
import numpy as np
# -
# This is the main part of the keypoint detection. Using the uploaded images above, we first detect the face region with dlib, then crop that area to extract the facial image, and finally run the FAN to get the predicted keypoints.
# + cellView="form"
#@title Execute Face Detection and FAN. (double-click to see the codes)
from nnabla.ext_utils import get_extension_context
ctx = get_extension_context("cudnn")
nn.set_default_context(ctx)
nn.set_auto_forward(True)
image = io.imread(input_img)
if image.ndim == 2:
image = color.gray2rgb(image)
elif image.ndim == 4:
image = image[..., :3]
face_detector = dlib.cnn_face_detection_model_v1("mmod_human_face_detector.dat")
detected_faces = face_detector(cv2.cvtColor(image[..., ::-1].copy(), cv2.COLOR_BGR2GRAY))
detected_faces = [[d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()] for d in detected_faces]
if len(detected_faces) == 0:
print("Warning: No faces were detected.")
sys.exit()
# Load FAN weights
with nn.parameter_scope("FAN"):
print("Loading FAN weights...")
nn.load_parameters("3DFAN4_NNabla_model.h5")
# Load ResNetDepth weights
with nn.parameter_scope("ResNetDepth"):
print("Loading ResNetDepth weights...")
nn.load_parameters("Resnet_Depth_NNabla_model.h5")
landmarks = []
for i, d in enumerate(detected_faces):
center = [d[2] - (d[2] - d[0]) / 2.0, d[3] - (d[3] - d[1]) / 2.0]
center[1] = center[1] - (d[3] - d[1]) * 0.12
scale = (d[2] - d[0] + d[3] - d[1]) / 195
inp = crop(image, center, scale)
inp = nn.Variable.from_numpy_array(inp.transpose((2, 0, 1)))
inp = F.reshape(F.mul_scalar(inp, 1 / 255.0), (1,) + inp.shape)
with nn.parameter_scope("FAN"):
out = fan(inp, 4)[-1]
pts, pts_img = get_preds_fromhm(out, center, scale)
pts, pts_img = F.reshape(pts, (68, 2)) * \
4, F.reshape(pts_img, (68, 2))
heatmaps = np.zeros((68, 256, 256), dtype=np.float32)
for i in range(68):
if pts.d[i, 0] > 0:
heatmaps[i] = draw_gaussian(
heatmaps[i], pts.d[i], 2)
heatmaps = nn.Variable.from_numpy_array(heatmaps)
heatmaps = F.reshape(heatmaps, (1,) + heatmaps.shape)
with nn.parameter_scope("ResNetDepth"):
depth_pred = F.reshape(resnet_depth(
F.concatenate(inp, heatmaps, axis=1)), (68, 1))
pts_img = F.concatenate(
pts_img, depth_pred * (1.0 / (256.0 / (200.0 * scale))), axis=1)
# -
# We now prepare for the visualization with mplot3d. The code below is from [the original author's repository](https://github.com/1adrianb/face-alignment/blob/master/examples/detect_landmarks_in_image.py).
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import collections
pred_type = collections.namedtuple('prediction_type', ['slice', 'color'])
pred_types = {'face': pred_type(slice(0, 17), (0.682, 0.780, 0.909, 0.5)),
'eyebrow1': pred_type(slice(17, 22), (1.0, 0.498, 0.055, 0.4)),
'eyebrow2': pred_type(slice(22, 27), (1.0, 0.498, 0.055, 0.4)),
'nose': pred_type(slice(27, 31), (0.345, 0.239, 0.443, 0.4)),
'nostril': pred_type(slice(31, 36), (0.345, 0.239, 0.443, 0.4)),
'eye1': pred_type(slice(36, 42), (0.596, 0.875, 0.541, 0.3)),
'eye2': pred_type(slice(42, 48), (0.596, 0.875, 0.541, 0.3)),
'lips': pred_type(slice(48, 60), (0.596, 0.875, 0.541, 0.3)),
'teeth': pred_type(slice(60, 68), (0.596, 0.875, 0.541, 0.4))
}
# + cellView="form"
#@title Visualize the result. (double-click to see the codes)
# uncomment the line below if you run this in jupyter notebook.
#matplotlib notebook
fig = plt.figure(figsize=plt.figaspect(.5))
ax = fig.add_subplot(1, 2, 2, projection='3d')
surf = ax.scatter(pts_img.d[:, 0] * 1.2,
pts_img.d[:, 1],
pts_img.d[:, 2],
c='cyan',
alpha=1.0,
edgecolor='b')
for pred_type in pred_types.values():
ax.plot3D(pts_img.d[pred_type.slice, 0] * 1.2,
pts_img.d[pred_type.slice, 1],
pts_img.d[pred_type.slice, 2], color='blue')
ax.view_init(elev=100., azim=90.)
ax.set_xlim(ax.get_xlim()[::-1])
plt.show()
# -
# # For Smartphone Users
# If you're using a smartphone with a camera, you can take a photo and use that image for this demo.
# Just execute the following cell and tap `Capture` button.
#
# If your device has multiple cameras (such as front and back) you need to select which one to use by tapping the corresponding button (which should appear near the 'Capture' button).
#
# **Note this is an experimental support and may not work in some devices.**
#
try:
filename = take_photo(cam_width=256, cam_height=256)
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do not
# grant the page permission to access it.
print(str(err))
# If the photo is OK, let's run (2D) FAN. If you want to use another photo, just re-run the previous cell again.
#
# The following cell will execute the same as done above on your photo.
# !python model_inference.py --model 2DFAN4_NNabla_model.h5 --test-image $input_img --output output_image.png
from IPython.display import Image,display
display(Image('output_image.png'))
| interactive-demos/fan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: yggJLNE
# language: python
# name: yggjlne
# ---
import jupyterlab_nodeeditor as jlne
from yggdrasil import yamlfile
# +
filename = "model_example.yml"
# filename = "X:\College\Grad School\Research\jupyterlab_nodeeditor\examples\model_example.yml"
model_set = yamlfile.parse_yaml(filename, model_only=True)
schema = yamlfile.get_schema()
socket_types = tuple(schema.form_schema['definitions']['schema']['definitions']['simpleTypes']['enum'])
ne = jlne.NodeEditor(socket_types = socket_types)
# -
ne
ne.node_editor.nodes[0].display_element.children[0].value
from yggdrasil.runner import YggRunner
runner = YggRunner("gs_lesson4/gs_lesson4_python.yml")
# +
# runner.drivers
# +
# Updating a label in realtime
# This will be used to update data on the front-end and have the backend automatically adjust it
class Something:
val = 0
def update_label(self, *args, **kwargs):
ne.node_editor.nodes[0].display_element.children[0].value = f"Hello {self.val}"
self.val += 1
s = Something()
# +
# Add in a callbacks key that just updates the label of the runner
# This basically allows us to modify the
runner.connectiondrivers['python_modelA:outputA_to_python_modelB:inputB']['callbacks'] = [s.update_label]
# -
runner.run()
# # TO-DO
# 1. Figure out all the methods of a YggRunner()
# 2. Understand what part of the model yamls get configured (to help with connection to JLNE)
# 3. Create a visual in JLNE where values can be modifed
# 4. Connect the visual to the backend YggRunner to update models/connections
# 5. Optional(?) - Auto re-run JLNE to show the updated values/connections or just be able to output them right away
| examples/autogenerate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import sympy as sy
from sympy.utilities.codegen import codegen
import control.matlab as cm
import re
import matplotlib.pyplot as plt
from scipy import signal
# +
z = sy.symbols('z', real=False)
r1,s0,s1,s2 = sy.symbols('r1,s0,s1,s2', real=True)
hh,a = sy.symbols('h,a', real=True, positive=True)
Bp = sy.poly(z+0.7, z)
Ap = sy.poly( z**2 - 1.8*z + 0.81, z)
Ac = sy.poly( z**2 - 1.5*z + 0.7, z)
Ao = sy.poly((z-a)**2, z)
Acl = Ac*Ao
Rp = sy.poly(z+r1, z)
Sp = sy.poly(s0*z**2 + s1*z + s2, z)
dioph=(Ap*Rp*(z-1)+Bp*Sp-Acl).all_coeffs()
print dioph
print Acl
print Ap*Rp
print Ac
print Ap*Rp
print Ap*Rp + Bp*Sp
# +
sol = sy.solve(dioph, (r1,s0,s1,s2))
print 'r_1 = %f' % sol[r1]
print 's_0 = %f' % sol[s0]
print 's_1 = %f' % sol[s1]
print 's_1 = %f' % sol[s1]
t0 = Ac.evalf(subs={z:1})/Bp.evalf(subs={z:1,})
print 't_0 = %f' % t0
R = Rp.subs(sol)
S = Sp.subs(sol)
T = t0*Ao
Hc = T*Bp/(Ac*Ao)
Hcc = t0*0.8/Ac
sy.pretty_print(sy.expand(Hc))
sy.pretty_print(sy.expand(Hcc))
sy.pretty_print(Hc.evalf(subs={z:1}))
sy.pretty_print(sy.simplify(Ap*R + Bp*S))
# -
1.0/0.3125
#
num = sy.list2numpy((Ap*R).all_coeffs(), dtype=np.float64)
den = sy.list2numpy((Ac*Ao).all_coeffs(), dtype=np.float64)
print num
print den
print type(num[0])
#Hd = cm.tf(num[:-1], den[:-1], -1)
Hd = cm.tf([1], [1, 0.5])
print Hd
ystep, t = cm.step(Hd, np.arange(30))
plt.figure()
plt.plot(t, ystep)
plt.show()
# Reorganize solution expression for matlab code generation
sol_expr = ('RST_DC_lab', [Bp.all_coeffs()[0], Bp.all_coeffs()[1],
Ap.all_coeffs()[1], Ap.all_coeffs()[2],
sol[r1], sol[s0], sol[s1], A2p.subs(z, 1)/Bp.subs(z,1), h,np.exp(h*po1) ])
# Export to matlab code
[(m_name, m_code)] = codegen(sol_expr, 'octave')
m_code = m_code.replace("out1", "b0").replace("out2", "b1").replace("out3", "a1").replace("out4", "a2")
m_code = m_code.replace("out5", "r1").replace("out6", "s0").replace("out7", "s1").replace("out8", "t0")
m_code = m_code.replace("out9", "h").replace("out10", "obsPole")
m_code = m_code.replace("function ", "% function ")
m_code = m_code.replace("end", "")
print m_code
with open("/home/kjartan/Dropbox/undervisning/tec/MR2007/labs/dc_rst_design.m", "w") as text_file:
text_file.write(m_code)
# +
# cm.step?
# +
G = Km * cm.tf([1], [tau, 1, 0])
Gd = Km * cm.tf([tau*(hpt-1+np.exp(-hpt)), tau*(1-(1+hpt)*np.exp(-hpt))], [1, -(1+np.exp(-hpt)), np.exp(-hpt)], h)
Gd2 = cm.c2d(G, h)
print Gd
print Gd2
# -
print A2p
print A2p.evalf(subs={z:1})
print Bp
print Bp.evalf(subs={z:1})
0.3/(5*np.sqrt(2))
np.exp(-0.21)*np.sin(0.21)
np.exp(0.03*(-14))
0.746*41.8
| polynomial-design/notebooks/RST-integrator-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 6. NumPy
# NumPy 是一个强大的 Python 的数学计算库,可以处理例如矩阵乘法、统计函数等数学操作。
#
# (若在自己的电脑上运行 NumPy,请先安装 NumPy:)
# %pip install numpy
# 要在 Python 里调用一个库,使用 `import <package> as <name>` 的语法就可以了。
import numpy as np
# ## 数组 `np.array`
# 为什么我们要用 NumPy 呢?这是因为它可以以数组的结构,存储并操作大量数据。一个基本的数组可以是一个 1 维的向量。下面就是几种创建向量数组的方式:
# +
a = np.array([1, 2, 3, 4])
b = np.array([4, 3, 2, 1])
c = np.arange(4) # 相当于 Python range(4)
d = np.zeros(4) # 全是 0 的长度为 4 的向量
e = np.ones(4) # 全是 1 的长度为 4 的向量
print(a)
print(b)
print(c)
print(d)
print(e)
# -
# 用 NumPy 的好处就是可以对大批数据进行各种操作,比如加减乘除:
# +
# 对每个元素操作
print(a + 1)
print(a - 1)
print(a * 1)
print(a / 1)
# 对 a 和 b 之间对应的元素操作
print(a + b)
print(a - b)
print(a * b)
print(a / b)
# -
# 是不是很方便呢?注意,我们现在不需要 `for` 语法,就可以来对一组数据操作了。
#
# 我们在线性代数里学习了点乘,那如果我们要点乘 $a \cdot b = [1, 2, 3, 4] \cdot [4, 3, 2, 1]$,该怎么做呢?
np.dot(a, b)
# ## 2 维矩阵和数组形状
# 我们看到了怎么在数组里创建 1 维的向量,那怎么创建 2 维的矩阵呢?使用多层的列表 `list` 就可以了:
f = np.array([[1, 2, 3], [4, 5, 6]])
print(f)
# 上面的 `f` 就是一个 2 × 3 的矩阵。要查看它的形状等信息,可以用 `f.ndim`、`f.shape` 和 `f.size`:
print(f.ndim) # 2 维矩阵
print(f.shape) # (2 × 3) 的形状
print(f.size) # 6 个总共元素
# 那我们要改变 `f` 的形状,怎么办呢?
print(f)
# 要做矩阵转置可以用 `.transpose()`:
print(f.transpose())
# 矩阵展开成一个 1 维向量 `.flatten()`:
print(f.flatten())
# 转换成给定的形状(3 × 2)`.reshape(shape)`:
print(f.reshape((3, 2)))
# 同样地,我们可以对矩阵进行各种计算。比如说,如果我们要算 `f` 和它的转置的乘积 $\mathbf{F}\mathbf{F}^\top$ 的话,可以用 `np.matmul`:
ft = f.transpose()
np.matmul(f, ft)
# 或者用 `@` 来更简便地写矩阵乘积:
f @ ft
# ## 数组索引
# 如果我们想要访问 1 维向量里面的个别元素,可以用和 `list` 一样的方法 `a[index]`(注意索引 `index` 从 0 开始算):
# +
a = np.array([1, 2, 3, 4, 5, 6, 7, 8]) # 长度为 8 的数组
print(a)
print(a[1]) # 第 2 个元素
print(a[1:]) # 第 2(含)个之后的所有元素
print(a[:1]) # 第 2(不含)个之前的所有元素
print(a[1:3]) # 第 2(含)到 4(不含)个元素
# -
# 但是如果是多维数组,比如 2 维的矩阵或者 3 维的张量,怎么访问它的元素呢?我们可以在 `[]` 里间加逗号 `,` 分隔每个轴的索引:
# +
a = np.arange(9).reshape((3, 3)) # 3 × 3 矩阵
print(a)
print(a[1, 2]) # 第 2 行第 3 列
print(a[1, 1:]) # 第 2 行第 2+ 列(第 2(含)以上的列)
print(a[1, :1]) # 第 2 行第 2- 列(第 2(不含)以下的列)
print(a[1, :]) # 第 2 行所有列
print(a[1:, 0]) # 第 2+ 行第 1 列
# -
# 同样地,3 维张量也可以做类似的操作:
# +
a = np.arange(27).reshape((3, 3, 3)) # 3 × 3 × 3 张量
print(a)
print(a[0, 1, 2])
print(a[0, :, 2])
# 以此类推
# -
# ## 其它函数
# NumPy 提供了很多方便的数学函数,请见下面代码例子:
# +
a = np.array([1, 2, 3])
print(np.log(a)) # 自然对数
print(np.exp(a)) # e 的 a 次方
print(np.sin(a)) # 正弦 sin
print(np.cos(a)) # 余弦 cos
print(np.sum(a)) # a 向量之和(也可以写作 a.sum(),下同)
print(np.mean(a)) # a 的平均值
print(np.std(a)) # a 的标准差
print(np.max(a)) # a 的最大值
print(np.min(a)) # a 的最小值
print(np.argmax(a)) # a 的最大值位置
print(np.argmin(a)) # a 的最小值位置
# -
# 这些函数很好用,但是不需要一次全部记住,只要需要用的时候来看一眼它们的名字,或者网上查一下就可以了!
# ## 任务
# 你可以求出下面这个矩阵第 1 行、第 2 列的值的自然对数吗?
a = np.random.randn(3, 3) # np.random.randn 是创建随机化的数组的意思
a
# ## 小结
# 这一章我们学习了 NumPy 库里面的数组以及它们的各种运算方法。
| python-numpy/6. NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Satellite Image Analysis GGS416
#
# In this tutorial we are going to plot a histogram for one of our images.
#
# Let's begin by loading in many of the packages we have already worked with in this class.
#
# Specifically, `rasterio`, `numpy`, `math` and `matplotlib`.
#
# We can also load in one of our previously downloaded 4-band PlanetScope images.
# +
# %matplotlib inline
import rasterio
import numpy
import math
from matplotlib import pyplot as plt
# Our single 4 band (blue, green, red, NIR) PlanetScope image.
# The CRS is a UTM projection using meters
image_file = "../week3/20190321_174348_0f1a_3B_AnalyticMS.tif"
# Let's get our rasterio object:
my_image = rasterio.open(image_file)
my_image
# -
# Now we have our image, we can parse out our four bands into individual variable names for blue, green, red and near-infrared.
# The PlanetScope band order is BGRN.
blue, green, red, nir = my_image.read()
blue
# Plotting in `matplotlib` takes a bit of effort, but is no different from any script-based plotting package.
#
# The benefit is that once you've written the code, it's very easy to rerun with new data (as opposed to needing to manually reproduce the graphic).
# Let's define a new figure
# The (1, 1) means we want one colum and one row, for a single plot (as opposed to a panel plot).
fig, ax1 = plt.subplots(1,1)
# ## Quick Task
#
# Play around with changing the number of subplots and see what you are able to produce!
#
# Can you see how the structure of the plot changes?
# ### A Single Histogram Plot
#
# Now we can begin plotting.
#
# Below we are going to obtain those values in the blue band layer which are not null.
#
# Null data values are those with no data.
#
# You need to be aware of these null values, and ideally control for them.
# let's plot non-null blue band values as an example
# For the x-axis, we want to count every pixel that has a value
x = blue[numpy.not_equal(blue, my_image.nodata)]
x
# Now we can plot this data.
#
# See the way we create a single plot below for the frequency of blue values.
#
# First the `plt.subplots()` function is specified to set up figure and axis objects.
#
# Then we add our data to the axis using `ax.hist()`.
#
# We can also add a title to the plot using `ax.set_title()`.
# +
# Create just a figure and only one subplot
fig, ax = plt.subplots(1,1)
ax.hist(x)
# We can state the title for this plot too
ax.set_title('Histogram of Blue Values')
# -
# ## A Two-Plot Panel Plot
#
# Single plots are useful, but often we will want to be able to compared histograms against one another.
#
# Thus, panel plots are a very nice tool for this purpose.
#
# Let's examine creating a panel plot below for both blue and red bands:
# +
# Create two subplots and unpack the output array immediately
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.hist(blue[numpy.not_equal(blue, my_image.nodata)])
ax2.hist(red[numpy.not_equal(red, my_image.nodata)])
# We can state the title for these plots too
ax1.set_title('Blue Histogram')
ax2.set_title('Red Histogram')
# -
# While this looks nice, it would be more helpful to also have this suitably colored.
#
# Let's just use colors which match the bands for now.
#
# But be aware of more examples of `matplotlib` colors here:
#
# https://matplotlib.org/2.0.0/examples/color/named_colors.html
#
#
# +
# Create two subplots and unpack the output array immediately
fig, (ax1, ax2) = plt.subplots(1, 2)
# Let's get our data
blue_data = blue[numpy.not_equal(blue, my_image.nodata)]
red_data = red[numpy.not_equal(red, my_image.nodata)]
# Specify the .hist() function to create the hist
ax1.hist(blue_data, color='blue')
ax2.hist(red_data, color='red')
# Now add plot titles
ax1.set_title('Blue Histogram')
ax2.set_title('Red Histogram')
# -
# You just need to be aware when selecting colors for your histogram plots, that some people are colorblind.
#
# There are even packages which have pre-existing color sequences which are colorblind-friendly:
#
# https://github.com/sjmgarnier/viridis
#
# Also, be aware many academic journals may still print in black and white for the physical copies sent out to subscribers!
#
# This means, you may need to select color combinations which still differentiate when converted to black and white.
# ### A Four-Plot Panel Plot
#
# Next, we can play with creating a panel plot for the four colors.
#
# The trick here is understanding that we specify two different y axes (so here, the left and right):
# - ax1 (the left hand axis)
# - ax2 (the right hand axis)
#
# And then within these axes, we can specify the rows by using the row index (for the top and bottom plots). For example:
# - ax1[0] (the first plot in the left hand axis)
# - ax1[1] (the second plot in the left hand axis)
#
# Which means in total we have four plots, as follows:
# - ax1[0] (the first plot in the left hand axis)
# - ax1[1] (the second plot in the left hand axis)
# - ax2[0] (the first plot in the right hand axis)
# - ax2[1] (the second plot in the right hand axis)
#
# Let's have a look at how we implement this:
# +
# Create two subplots and unpack the output array immediately
fig, (ax1, ax2) = plt.subplots(2, 2)
# Let's get our data
blue_data = blue[numpy.not_equal(blue, my_image.nodata)]
red_data = red[numpy.not_equal(red, my_image.nodata)]
green_data = green[numpy.not_equal(green, my_image.nodata)]
nir_data = nir[numpy.not_equal(nir, my_image.nodata)]
# Specify the .hist() function to create the hist
ax1[0].hist(blue_data, color='blue')
ax1[1].hist(red_data, color='red')
ax2[0].hist(green_data, color='green')
ax2[1].hist(nir_data, color='grey')
# Now add plot titles
ax1[0].set_title('Blue Histogram')
ax1[1].set_title('Red Histogram')
ax2[0].set_title('Green Histogram')
ax2[1].set_title('NIR Histogram')
# This makes sure we have sufficient space between our plots
fig.tight_layout()
# -
# The thing to note here, is that we actually have the y-axis adjusting to maximize the available space.
#
# This means we have different y-axis ranges per histogram.
#
# Let's try implement a shared y axis, so they are all on the same scale and it's easier to make comparative differences.
#
# We just need to add the `sharey=True` argument into the `plt.subplots()` function:
#
# +
# Create two subplots and unpack the output array immediately
fig, (ax1, ax2) = plt.subplots(2, 2, sharey=True) #<- here is the difference as we state we want a shared axis
# Let's get our data
blue_data = blue[numpy.not_equal(blue, my_image.nodata)]
red_data = red[numpy.not_equal(red, my_image.nodata)]
green_data = green[numpy.not_equal(green, my_image.nodata)]
nir_data = nir[numpy.not_equal(nir, my_image.nodata)]
# Specify the .hist() function to create the hist
ax1[0].hist(blue_data, color='blue')
ax1[1].hist(red_data, color='red')
ax2[0].hist(green_data, color='green')
ax2[1].hist(nir_data, color='grey')
# Now add plot titles
ax1[0].set_title('Blue Histogram')
ax1[1].set_title('Red Histogram')
ax2[0].set_title('Green Histogram')
ax2[1].set_title('NIR Histogram')
# This makes sure we have sufficient space between our plots
fig.tight_layout()
# -
# We have lots of functionality control over the `.hist()` function.
#
# Obviously, a histogram splits the data up into frequency bins.
#
# Thankfully, we have control over how many bins we desire. Let's try 25:
# +
# Create two subplots and unpack the output array immediately
fig, (ax1, ax2) = plt.subplots(2, 2, sharey=True) #<- here is the difference as we state we want a shared axis
# Let's get our data
blue_data = blue[numpy.not_equal(blue, my_image.nodata)]
red_data = red[numpy.not_equal(red, my_image.nodata)]
green_data = green[numpy.not_equal(green, my_image.nodata)]
nir_data = nir[numpy.not_equal(nir, my_image.nodata)]
bin_number = 15
# Specify the .hist() function to create the hist
ax1[0].hist(blue_data, color='blue', bins=bin_number)
ax1[1].hist(red_data, color='red', bins=bin_number)
ax2[0].hist(green_data, color='green', bins=bin_number)
ax2[1].hist(nir_data, color='grey', bins=bin_number)
# Now add plot titles
ax1[0].set_title('Blue Histogram')
ax1[1].set_title('Red Histogram')
ax2[0].set_title('Green Histogram')
ax2[1].set_title('NIR Histogram')
# This makes sure we have sufficient space between our plots
fig.tight_layout()
# -
# ## Quick Task:
#
# Explore the implications of changing the bin number and to see how it changes the shape of the histogram plots.
#
# ### Exporting our Histogram Plots
#
# To export the panelplot graphs produced we can, use the `fig.savefig()` function.
#
# We can apply this to our existing code, saving the image as a .png named 'histogram.png'.
#
# You also have lots of functionality, including:
# - dpi: The dots per square inch, which affects the quality.
# - pad_inches: the amount of whitespace padding desired around the plot image.
#
# Let's see how we execute this function:
# Save the generated figure to an external image file
fig.savefig("histogram.png", dpi=200, bbox_inches='tight', pad_inches=0.7)
# +
# Create two subplots and unpack the output array immediately
fig, (ax1, ax2) = plt.subplots(2, 2, sharey=True) #<- here is the difference as we state we want a shared axis
# Let's get our data
blue_data = blue[numpy.not_equal(blue, my_image.nodata)]
red_data = red[numpy.not_equal(red, my_image.nodata)]
green_data = green[numpy.not_equal(green, my_image.nodata)]
nir_data = nir[numpy.not_equal(nir, my_image.nodata)]
bin_number = 15
# Specify the .hist() function to create the hist
ax1[0].hist(blue_data, color='blue', bins=bin_number)
ax1[1].hist(red_data, color='red', bins=bin_number)
ax2[0].hist(green_data, color='green', bins=bin_number)
ax2[1].hist(nir_data, color='grey', bins=bin_number)
# Now add plot titles
ax1[0].set_title('Blue Histogram')
ax1[1].set_title('Red Histogram')
ax2[0].set_title('Green Histogram')
ax2[1].set_title('NIR Histogram')
# Let's add an overall title to a plot
# The y argument moves the title higher, so we don't have overlapping text
plt.suptitle('Histogram Panel-Plot by Band', y=1.05)
# This makes sure we have sufficient space between our plots
fig.tight_layout()
# Now export the final plot!
fig.savefig("histogram.png", dpi=200, bbox_inches='tight', pad_inches=0.7)
| notebooks/week7/7.01_histograms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
temp = [14.2, 16.4, 11.9, 15.2, 18.5, 22.1, 19.4, 25.1, 23.4, 18.1, 22.6, 17.2]
sales = [215, 325, 185, 332, 406, 522, 412, 614, 544, 421, 445, 408]
# +
# Tell matplotlib to create a scatter plot based upon the above data
# Without scoop_price
plt.scatter(temp, sales, marker="o", facecolors="red", edgecolors="black")
# BONUS: With scoop_price set to the scalar value
# scoop_price = [89, 18, 10, 28, 79, 46, 29, 38, 89, 26, 45, 62]
# plt.scatter(temp, sales, marker="o", facecolors="red", edgecolors="black", s=scoop_price)
# -
# Set the upper and lower limits of our y axis
plt.ylim(180,620)
# Set the upper and lower limits of our x axis
plt.xlim(11,26)
# +
# Create a title, x label, and y label for our chart
plt.title("Ice Cream Sales v Temperature")
plt.xlabel("Temperature (Celsius)")
plt.ylabel("Sales (Dollars)")
# -
# Save an image of the chart and print to screen
# NOTE: If your plot shrinks after saving an image,
# update matplotlib to 2.2 or higher,
# or simply run the above cells again.
plt.savefig("../Images/IceCreamSales.png")
plt.show()
| 05-Matplotlib/1/Activities/12-Stu_ScatterPy/Solved/ice_cream_sales.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# # Dimensionality Reduction
#
# In this notebook we use gensim and sklearn to reduce the dimensionality
# of our (extremely sparse and wide) input.
# + deletable=true editable=true
import pandas as pd
import numpy as np
import sys
sys.path.append("../..") # Append source directory to our Python path
from toxicity.predictor import Predictor
from toxicity.linear_predictor import LogisticPredictor, SVMPredictor
from common.nlp.preprocessing import *
from toxicity.utils import *
import nltk
DATA_ROOT = "../data/"
train = pd.read_csv("../data/train.csv")
test = pd.read_csv("../data/test.csv")
# + deletable=true editable=true
# Extract the true labels needed for training
train_ys = {tag: train[tag].values for tag in TAGS}
# Extract the test set ids needed for submitting
ids = test['id']
# + [markdown] deletable=true editable=true
# ## Run the dimensionality reduction algorithms
#
# This will take a lot of time to run (around 20 minutes total on my machine). This is because the algorithms comprises of
# several computationally expensive steps:
#
# 1. Tokenize text using NLTK's tokenizer.
# 2. Create the train and test corpora.
# 3. Get the TFIDF sparse representations.
# 4. Apply dimensionality reduction using Latent Semantic Analysis (LSA).
# + deletable=true editable=true
train_x, test_x = truncatedsvd_preprocess(train, test, num_topics=500, use_own_tfidf=True, report_progress=True, data_dir=DATA_ROOT, save=True)
# + [markdown] deletable=true editable=true
# ## Feeding the reduced input to sklearn
#
# Let's how our reduced input does using an (untuned) classifier from `sklearn`.
# + deletable=true editable=true
# Create a logistic regression classifier.
svm_params = {"C": 1, "dual": True}
predictor = SVMPredictor(**svm_params)
split_loss = predictor.evaluate(train_x, train_ys, method='split')
print("Split CV log loss: {}".format(split_loss))
# + [markdown] deletable=true editable=true
# ## Create a submission
#
# Let's use our classifier to create a sample submittion and submit to [kaggle](https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/submit)
# + deletable=true editable=true
create_submission(predictor, train_x, train_ys, test_x, ids, '../submissions/using_lsi.csv')
# + [markdown] deletable=true editable=true
# ## Next steps
#
# We could improve this pipeline by carefully tuning the dimensionality reduction steps (trying another `gensim.model`) and a stronger classifier (perhaps `XGBoost`?)
#
#
| toxicity/notebooks/dimensionality_reduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Sensitivity Analysis for Evapotranspiration and Runoff according to the input paramters in the Reynolds Mountain East catchment using pySUMMA
# ## 1. Introduction
# This study is to analyze the sensitivity of input parameters related with spatial paramters on Evapotranspiration and Runoff using a SUMMA model for the Reynolds Mountain East catchment. This study looked at the twelve parameters to analysis the impact on Evapotranspiration and Runoff.
#
# In this Jupyter Notebook, the pySUMMA library is used to produce this analysis. First, the input parameters are described. Next, the Methods section describes how the pySUMMA can be used to
#
# 1) To explain the study area : the Reynolds Mountain East catchment
#
# 2) Download TestCases from HS and Installation to prepare SUMMA simulation
#
# 3) Create pySUMMA Simulation Object and set SUMMA executable
#
# 4) Create Param Trial obejct to read "Param Trial input netcdf file"
#
# 5) Running SUMMA with input parameters
#
# 6) Create Text from output netcdf of SUMMA
#
# Collectively, this Jupyter Notebook serves as an example of how hydrologic modeling can be conducted directly within a Jupyter Notebook by leveraging the pySUMMA library.
# ## 2. Background
# ### The Sensitivity Analysis with the input parameters in SUMMA
#import libraries to display equations within the notebook
from IPython.display import display, Math, Latex
# ### 1.) Input parameter in para_trial netcdf for SUMMA
# | Parameters | Min | Max | Step (Max-Min)/10 | Description |
# |:------------------:|------------|-------------|---------------------|:-------------|
# |critSoilTranspire | 0.0 | 1.0 | 0.1 | critical vol. liq. water content when transpiration is limited (-) |
# |critSoilWilting | 0.0 | 1.0 | 0.1 | critical vol. liq. water content when plants are wilting (-) |
# |k_soil | 0.0000001 | 0.00001 | 0.00000099 | hydraulic conductivity of soil (m s-1) |
# |qSurfScale | 1.0 | 100.0 | 9.9 | scaling factor in the surface runoff parameterization (-) |
# |rootDistExp | 0.01 | 1.0 | 0.099 | exponent controlling the vertical distribution of root density (-) |
# |summerLAI | 0.01 | 10.0 | 0.999 | maximum leaf area index at the peak of the growing season (m2 m-2) |
# |theta_mp | 0.3 | 0.6 | 0.03 | volumetric liquid water content when macropore flow begins (-) |
# |theta_res | 0.001 | 0.1 | 0.0099 | volumetric residual water content (-) |
# |theta_sat | 0.3 | 0.6 | 0.03 | porosity (-) |
# |vGn_alpha | -1.0 | -0.01 | 0.099 | van Genuchten "alpha" parameter (m-1) |
# |vGn_n | 1.0 | 3.0 | 0.2 | van Genuchten "n" parameter (-) |
# |winterSAI | 0.01 | 3.0 | 0.299 | stem area index prior to the start of the growing season (m2 m-2) |
# ## 3. Methods
# ### 1) Study Area
# #### The Reynolds Mountain East catchment is located in southwestern Idaho as shown in the figure below.
from ipyleaflet import Map, GeoJSON
import json
m = Map(center=[43.06745, -116.75489], zoom=15)
with open('reynolds_geojson_latlon.geojson') as f:
data = json.load(f)
g = GeoJSON(data=data)
m.add_layer(g)
m
# ### 2) Download TestCases from HS and Installation to prepare SUMMA simulation
# To authenticate using HTTP Basic authentication.
from hs_restclient import HydroShare, HydroShareAuthBasic
hs = HydroShare()
# import utils.py to download TestCases from HS, unzip and installation
from pysumma.utils import utils
# define directory where you save SUMMA TestCases
save_filepath = '/media/sf_pysumma'
# assign resource id of SUMMA TestCases on HS (Now I uploaded SUMMA TestCases in My Resources(status=Private) on HS )
resource_id = 'a0105d479c334764ba84633c5b9c1c01'
# call install_test_cases_hs method to download TestCase from HS, unzip and install the TestCase.
hs_path = utils.install_test_cases_hs(hs, resource_id, save_filepath)
hs_path = '/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents'
# ### 3) Create pySUMMA Simulation Object and set SUMMA executable
from pysumma.Simulation import Simulation
# create a pySUMMA simulation object using the SUMMA 'file manager' input file
S = Simulation(hs_path + '/summaTestCases_2.x/settings/wrrPaperTestCases/figure07/summa_fileManager_riparianAspenSimpleResistance.txt')
S.executable = "/media/sf_pysumma/compile/summa/bin/summa.exe"
# set the simulation start and finish times
S.decision_obj.simulStart.value = "1998-10-01 01:00"
S.decision_obj.simulFinsh.value = "2008-10-01 00:00"
# ### 4) Create Param Trial obejct to read "Param Trial input netcdf file"
import netCDF4 as nc
import numpy as np
Param_Trial_file = S.setting_path.value + S.para_trial.value
Param_Trial_file1 = Param_Trial_file.replace("/", "//")
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial
# ### 6) Running SUMMA with input parameters
# ### 6-1) parameter = rootDistExp
name = 'rootDistExp'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0.01
max_para = 1
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="rootDistExp"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-2) parameter = critSoilTranspire (Not Working)
name = 'critSoilTranspire'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0
max_para = 1
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="critSoilTranspire"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-3) parameter = critSoilWilting (Not Working)
name = 'critSoilWilting'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0
max_para = 1
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="critSoilWilting"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-4) parameter = k_soil
name = 'k_soil'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0.0000001
max_para = 0.00001
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="k_soil"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-5) parameter = qSurfScale
name = 'qSurfScale'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 1
max_para = 100
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="qSurfScale"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-6) parameter = summerLAI
name = 'summerLAI'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0.01
max_para = 10
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="summerLAI"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-7) parameter = theta_mp
name = 'theta_mp'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0.3
max_para = 0.6
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="theta_mp"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-7) parameter = theta_res
name = 'theta_res'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0.001
max_para = 0.1
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="theta_res"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-9) parameter = theta_sat
name = 'theta_sat'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0.3
max_para = 0.6
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="theta_sat"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-10) parameter = vGn_alpha
name = 'vGn_alpha'
name_value = Param_Trial.variables[name][:]
name_value
min_para = -1
max_para = -0.01
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="vGn_alpha"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-11) parameter = vGn_n (Not Working)
name = 'vGn_n'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 1
max_para = 3
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="vGn_n"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 6-12) parameter = winterSAI
name = 'winterSAI'
name_value = Param_Trial.variables[name][:]
name_value
min_para = 0.01
max_para = 3
delta = (max_para-min_para)/10
for i in np.arange(min_para, max_para+delta, delta):
name_value[0] = i
Param_Trial = nc.Dataset(Param_Trial_file1,'r+')
Param_Trial.variables[name][:] = name_value
results_simpleResistance, out_file = S.execute(run_suffix="winterSAI"+str(i), run_option = 'local')
Param_Trial.close()
print (name_value[0])
# ### 7) Create Text from output netcdf of SUMMA
from pysumma.Plotting import Plotting
from jupyterthemes import jtplot
import matplotlib.pyplot as plt
import pandas as pd
import os
jtplot.figsize(x=10, y=10)
import glob
aa = glob.glob(hs_path + "/summaTestCases_2.x/output/wrrPaperTestCases/figure07/*.nc")
# ### 7-1) Create Evapotranspiration Text file from SUMMA output
for i in range(0,120,1):
file = aa[i].split('/')[-1]
output_evap = Plotting(hs_path + '/summaTestCases_2.x/output/wrrPaperTestCases/figure07/'+file)
sim_scalarCanopyTranspiration = output_evap.ds['scalarCanopyTranspiration']*3600
sim_scalarCanopyEvaporation = output_evap.ds['scalarCanopyEvaporation']*3600
sim_scalarGroundEvaporation = output_evap.ds['scalarCanopyTranspiration']*3600
dates = sim_scalarCanopyTranspiration.coords['time'].data
scalarCanopyTranspiration = pd.DataFrame(sim_scalarCanopyTranspiration.data, index=dates)
scalarCanopyEvaporation = pd.DataFrame(sim_scalarCanopyEvaporation.data, index=dates)
scalarGroundEvaporation = pd.DataFrame(sim_scalarGroundEvaporation.data, index=dates)
ET_Combine = pd.concat([scalarCanopyTranspiration, scalarCanopyEvaporation, scalarGroundEvaporation], axis=1)
ET_Combine.columns = ['scalarCanopyTranspiration', 'scalarCanopyEvaporation', 'scalarGroundEvaporation']
ET_Combine.to_csv('{}.txt'.format(file), sep='\t', index=True)
# ### 7-2) Create Runoff Text file from SUMMA output
for i in range(0,120,1):
file = aa[i].split('/')[-1]
output_runoff = Plotting(hs_path + '/summaTestCases_2.x/output/wrrPaperTestCases/figure07/'+file)
sim_runoff = output_runoff.ds['averageInstantRunoff']*86400000
dates = sim_runoff.coords['time'].data
sim_runoff_df = pd.DataFrame(sim_runoff.data, index=dates)
sim_runoff_df.to_csv('{}.txt'.format(file), sep='\t', index=True)
# ## 4. Results
# ### Recreate the Figure 7 plot from Clark et al., 2015: The total ET for the three different stomatal resistance methods
from pysumma.Plotting import Plotting
from jupyterthemes import jtplot
import matplotlib.pyplot as plt
import pandas as pd
jtplot.figsize(x=10, y=10)
# #### 4.1) Create function to calculate Total ET from SUMMA output
def calc_total_et(et_output_df):
# Total Evapotranspiration = Canopy Transpiration + Canopy Evaporation + Ground Evaporation
# Change unit from kgm-2s-1 to mm/hr (mulpitle 3600)
total_et_data = (et_output_df['scalarCanopyTranspiration'] + et_output_df['scalarCanopyEvaporation'] + et_output_df['scalarGroundEvaporation'])*3600
# create dates(X-axis) attribute from ouput netcdf
dates = total_et_data.coords['time'].data
# create data value(Y-axis) attribute from ouput netcdf
data_values = total_et_data.data
# create two dimensional tabular data structure
total_et_df = pd.DataFrame(data_values, index=dates)
# round time to nearest hour (ex. 2006-10-01T00:59:59.99 -> 2006-10-01T01:00:00)
total_et_df.index = total_et_df.index.round("H")
# set the time period to display plot
total_et_df = total_et_df.loc["2007-06-01":"2007-08-20"]
# resample data by the average value hourly
total_et_df_hourly = total_et_df.resample("H").mean()
# resample data by the average for hour of day
total_et_by_hour = total_et_df_hourly.groupby(total_et_df_hourly.index.hour).mean()
return total_et_by_hour
# #### 4.2) Get hour of day output of the three stomatal resistance methods for the period 1 June to 20 August 2007
simResis_hour = calc_total_et(results_simpleResistance)
BallBerry_hour = calc_total_et(results_BallBerry)
Jarvis_hour = calc_total_et(results_Jarvis)
# #### 4.3) Combine the stomatal resistance parameterizations into a single Pandas Dataframe
# Combine each stomatal resistance parameterizations
ET_Combine = pd.concat([simResis_hour, BallBerry_hour, Jarvis_hour], axis=1)
# add label
ET_Combine.columns = ['Simple resistance', 'Ball-Berry', 'Jarvis']
ET_Combine
# #### 4.4) Add obervation data in Aspen station in Reynolds Mountain East to the plot
# create pySUMMA Plotting Object
Val_eddyFlux = Plotting(hs_path + '/summaTestCases_2.x/testCases_data/validationData/ReynoldsCreek_eddyFlux.nc')
#Val_eddyFlux = Plotting(hs_path + '/summaTestCases_2.x/testCases_data/validationData/ReynoldsCreek_eddyFlux.nc')
Obs_Evapotranspitaton = Val_eddyFlux.ds['LE-wpl']
dates = Obs_Evapotranspitaton.coords['time'].data
data_values = Obs_Evapotranspitaton.data*0.0864*0.408/24
df = pd.DataFrame(data_values, index=dates)
df[2].to_csv('obs_evapotranspiration.txt', sep='\t', index=True)
# create pySUMMA Plotting Object
Val_Streamflow = Plotting(hs_path + '/summaTestCases_2.x/testCases_data/validationData/ReynoldsCreek_valData.nc')
# read Runoff data(Q) from validation netcdf file
obs_streamflow = Val_Streamflow.ds['Q']
# create dates(X-axis) attribute from validation netcdf file
dates = obs_streamflow.coords['time'].data
# Change unit from cm/hr to mm/day
data_values = obs_streamflow.data*24*10
df = pd.DataFrame(data_values, index=dates)
df.to_csv('obs_runoff.txt', sep='\t', index=True)
# read Total Evapotranspiration(LE-wpl) from validation netcdf file
Obs_Evapotranspitaton = Val_eddyFlux.ds['LE-wpl']
# create dates(X-axis) attribute from validation netcdf file
dates = Obs_Evapotranspitaton.coords['time'].data
# Change unit from Wm-2 to mm/hr (1 Wm-2 = 0.0864 MJm-2day-1, 1 MJm-2day-1 = 0.408 mmday-1, 1day = 24h)
data_values = Obs_Evapotranspitaton.data*0.0864*0.408/24
# create two dimensional tabular data structure
df = pd.DataFrame(data_values, index=dates)
# set the time period to display plot
df_filt = df.loc["2007-06-01":"2007-08-20"]
# select aspen obervation station among three different stations
df_filt.columns = ['-','Observation (aspen)','-']
# resample data by the average for hour of day
df_gp_hr = df_filt.groupby([df_filt.index.hour, df_filt.index.minute]).mean()
# reset index so each row has an hour an minute column
df_gp_hr.reset_index(inplace=True)
# add hour and minute columns for plotting
xvals = df_gp_hr.reset_index()['level_0'] + df_gp_hr.reset_index()['level_1']/60.
# #### 4.5) Plotting output of three different stomatal resistance parameterizations and observation data
# create plot with three different stomatal resistance parameterizations
ET_Combine_Graph = ET_Combine.plot()
# invert y axis
ET_Combine_Graph.invert_yaxis()
# plot scatter with x='xvals', y='Observation (aspen)'
ET_Combine_Graph.scatter(xvals, df_gp_hr['Observation (aspen)'])
# add x, y label
ET_Combine_Graph.set(xlabel='Time of day (hr)', ylabel='Total evapotranspiration (mm h-1) ')
# show up the legend
ET_Combine_Graph.legend()
jtplot.figsize(x=10, y=10)
# ## 5. Discussion
# As stated in Clark et al., 2015, the following insights can be gained from this analysis:
# * The simulations in Figure 7 illustrate substantial differences in the estimates of the diurnal cycle of transpiration depending on the choice of stomatal resistance parameterization.
# * The simple soil resistance parameterization [Liang et al., 1994], when combined with the two-source model of within-canopy and below-canopy turbulence [Choudhury and Monteith, 1988; Clark et al., 2015a], results in a substantial amount of transpiration at night when there is no light available for photosynthesis.
# * The physiological representations of transpiration – using the Jarvis and Ball-Berry parameterizations, as defined in Clark et al. [2015a] – have an explicit dependence on photosynthetically active radiation and show the expected result of zero transpiration during nighttime hours, resulting in a poor match with observations (Figure 7).
# * A striking result from Figure 7 is that the Ball-Berry parameterization underestimates evapotranspiration when applied using the default model parameters for stomatal resistance in combination with the particular choice of process parameterizations and parameters for soil hydrology.
# ## 6. Post notebook and simulation results back to Hydroshare
# write meta data such as abstract, title, keywords, rtype
abstract = 'output of SUMMA'
title = 'output of SUMMA'
keywords = ('SUMMA', 'Hydrologic Model')
rtype = 'GenericResource'
fpath = out_file
metadata = '[{"creator":{"name":"<NAME>"}}, {"creator":{"name":"<NAME>"}}]'
extra_metadata = '{"key-1": "value-1", "key-2": "value-2"}'
# post simulation results back to HS
resource_id = hs.createResource(rtype, title, resource_file=fpath, keywords=keywords, abstract=abstract, metadata=metadata, extra_metadata=extra_metadata)
# #### Save this notebook file to add the notebook to rhe resource of summa output
# add a notebook to the resource of summa output
npath = '/media/sf_pysumma/pysumma/notebooks/pySUMMA_Demo_Example_Fig7_Docker_develop_ver.ipynb'
# check the resource id on HS that created.
resource_id = hs.addResourceFile('98e7b881e13540779bc3e8071c0a3afb', npath)
# ## 7. make this resource public
# check the resource id
C_resource_id = '98e7b881e13540779bc3e8071c0a3afb'
# make a resource public
hs.setAccessRules(C_resource_id, public=True)
| notebooks/pySUMMA_Example_TestCase7_Sensitivity_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import random
import warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from xgboost import plot_tree
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
data = pd.read_csv("vars_final_zscale.csv")
data.head()
data.shape
# split data into out of date(after 16/11/1), train and test
oot_df=data[data['record']>833508]
trte_df=data[data['record']<=833508]
trte_df.shape
oot_df.shape
oot_df=oot_df.drop(columns=['record'])
trte_df=trte_df.drop(columns=['record'])
def multipltrun(a=5,v=6,md=5,mf=5,ne=25):
'''
This function run model on different samples based on user input:
"a" (int) how many randome samples, default as 5
"v" (int) how many variables, default as 6 (most important ones from backward selection)
users can modify model based on different machine learning algorithm and its parameters
FDR is calculated by first sorting outcome in descending order and cut off at 3%,
sum number of fraud records on top 3% and divided by total fraud racords for that sample
Final output would be a dataframe contains FDR at 3% for training set, testing set, and oot.
'''
#declare dict
FDRdict={"train":[],"test":[],"oot":[]}
for i in range(a):
#split training and testing
train, test = train_test_split(trte_df, test_size=0.2,random_state=i)
# split lables and features and v
train_lab = train["fraud_label"]
train_fea = train.iloc[:,1:v+1]
test_lab = test["fraud_label"]
test_fea = test.iloc[:,1:v+1]
oot_lab=oot_df["fraud_label"]
oot_fea=oot_df.iloc[:,1:v+1]
#define model
model=RandomForestClassifier(n_estimators = ne, max_depth = md, min_samples_leaf = mf, random_state = 42)
#fit model
model.fit(train_fea,train_lab) #modify based on your model
#calculate FDR
for sets in ["train","test","oot"]:
fea=vars()[sets+'_fea']
lab=vars()[sets+'_lab']
prob=pd.DataFrame(model.predict_proba(fea)) #modify based on your model
result=pd.concat([pd.DataFrame(lab).reset_index(),prob],axis=1)
topRows=int(round(len(result)*0.03))
top3per=result.sort_values(by=1,ascending=False).head(topRows)
FDR=sum(top3per.loc[:,'fraud_label'])/sum(result.loc[:,'fraud_label'])
FDRdict[sets].append(FDR)
#convert into dataframe
FDR_df=pd.DataFrame(FDRdict)
#add new row to calculate mean
FDR_df.loc['mean']=FDR_df.mean()
return FDR_df
multipltrun()
# +
##########try differernt number of variables
# -
multipltrun(a=5,v=17,md=5,mf=5,ne=25)
multipltrun(a=5,v=15,md=5,mf=5,ne=25)
multipltrun(a=5,v=17,md=5,mf=5,ne=100)
# %%time
multipltrun(a=5,v=17,md=7,mf=5,ne=100)
# %%time
multipltrun(a=5,v=17,md=10,mf=5,ne=100)
# %%time
multipltrun(a=5,v=17,md=10,mf=7,ne=100)
# %%time
multipltrun(a=5,v=17,md=15,mf=7,ne=100) ############take this as the best model set
# %%time
multipltrun(a=10,v=17,md=15,mf=8,ne=100)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import random
import warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from xgboost import plot_tree
# import neccessary packages
from sklearn.preprocessing import scale
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import mutual_info_classif
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.base import clone
from sklearn.datasets import make_classification
model=RandomForestClassifier(n_estimators = 100, max_depth = 15, min_samples_leaf = 8)
# +
y_pred_proba = model.predict_proba(test_fea)[::,1]
fpr, tpr, _ = metrics.roc_curve(test_lab, y_pred_proba)
auc = round(metrics.roc_auc_score(test_lab, y_pred_proba),3)
plt.plot(fpr,tpr,label="ROC curve, AUC="+str(auc))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate (1-Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.title('Receiver operating characteristic (ROC) for Random Forest (V=12)')
plt.legend(loc="lower right")
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=.8)
plt.show()
| Fraud/Random Forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="cYLZzIbjk62B"
# # Waymo Open Dataset Motion Tutorial
#
# - Website: https://waymo.com/open
# - GitHub: https://github.com/waymo-research/waymo-open-dataset
#
# This tutorial demonstrates:
# - How to decode and interpret the data.
# - How to train a simple model with Tensorflow.
#
# Visit the [Waymo Open Dataset Website](https://waymo.com/open) to download the full dataset.
#
# To use, open this notebook in [Colab](https://colab.research.google.com).
#
# Uncheck the box "Reset all runtimes before running" if you run this colab directly from the remote kernel. Alternatively, you can make a copy before trying to run it by following "File > Save copy in Drive ...".
# + [markdown] id="5ez4Nsk06Sqd"
# # Package installation
#
# Please follow the instructions in [tutorial.ipynb](https://github.com/waymo-research/waymo-open-dataset/blob/master/tutorial/tutorial.ipynb).
# + [markdown] id="wjT3Rdd4lSqC"
# # Imports and global definitions
# + id="xdEcN6WilcBn"
# Data location. Please edit.
# A tfrecord containing tf.Example protos as downloaded from the Waymo dataset
# webpage.
# Replace this path with your own tfrecords.
FILENAME = '/media/robot/hdd/waymo_dataset/tf_example/training/training_tfexample.tfrecord-00000-of-01000'
DATASET_FOLDER = '/path/to/waymo_open_dataset_motion_v_1_1_0/uncompressed'
# + id="M5gzSlBTlTiS"
import math
import os
import uuid
import time
from matplotlib import cm
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import HTML
import itertools
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.ops import py_metrics_ops
from waymo_open_dataset.metrics.python import config_util_py as config_util
from waymo_open_dataset.protos import motion_metrics_pb2
# Example field definition
roadgraph_features = {
'roadgraph_samples/dir':
tf.io.FixedLenFeature([20000, 3], tf.float32, default_value=None),
'roadgraph_samples/id':
tf.io.FixedLenFeature([20000, 1], tf.int64, default_value=None),
'roadgraph_samples/type':
tf.io.FixedLenFeature([20000, 1], tf.int64, default_value=None),
'roadgraph_samples/valid':
tf.io.FixedLenFeature([20000, 1], tf.int64, default_value=None),
'roadgraph_samples/xyz':
tf.io.FixedLenFeature([20000, 3], tf.float32, default_value=None),
}
# Features of other agents.
state_features = {
'state/id':
tf.io.FixedLenFeature([128], tf.float32, default_value=None),
'state/type':
tf.io.FixedLenFeature([128], tf.float32, default_value=None),
'state/is_sdc':
tf.io.FixedLenFeature([128], tf.int64, default_value=None),
'state/tracks_to_predict':
tf.io.FixedLenFeature([128], tf.int64, default_value=None),
'state/current/bbox_yaw':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/height':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/length':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/timestamp_micros':
tf.io.FixedLenFeature([128, 1], tf.int64, default_value=None),
'state/current/valid':
tf.io.FixedLenFeature([128, 1], tf.int64, default_value=None),
'state/current/vel_yaw':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/velocity_x':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/velocity_y':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/width':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/x':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/y':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/current/z':
tf.io.FixedLenFeature([128, 1], tf.float32, default_value=None),
'state/future/bbox_yaw':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/height':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/length':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/timestamp_micros':
tf.io.FixedLenFeature([128, 80], tf.int64, default_value=None),
'state/future/valid':
tf.io.FixedLenFeature([128, 80], tf.int64, default_value=None),
'state/future/vel_yaw':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/velocity_x':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/velocity_y':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/width':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/x':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/y':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/future/z':
tf.io.FixedLenFeature([128, 80], tf.float32, default_value=None),
'state/past/bbox_yaw':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/height':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/length':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/timestamp_micros':
tf.io.FixedLenFeature([128, 10], tf.int64, default_value=None),
'state/past/valid':
tf.io.FixedLenFeature([128, 10], tf.int64, default_value=None),
'state/past/vel_yaw':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/velocity_x':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/velocity_y':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/width':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/x':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/y':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
'state/past/z':
tf.io.FixedLenFeature([128, 10], tf.float32, default_value=None),
}
traffic_light_features = {
'traffic_light_state/current/state':
tf.io.FixedLenFeature([1, 16], tf.int64, default_value=None),
'traffic_light_state/current/valid':
tf.io.FixedLenFeature([1, 16], tf.int64, default_value=None),
'traffic_light_state/current/x':
tf.io.FixedLenFeature([1, 16], tf.float32, default_value=None),
'traffic_light_state/current/y':
tf.io.FixedLenFeature([1, 16], tf.float32, default_value=None),
'traffic_light_state/current/z':
tf.io.FixedLenFeature([1, 16], tf.float32, default_value=None),
'traffic_light_state/past/state':
tf.io.FixedLenFeature([10, 16], tf.int64, default_value=None),
'traffic_light_state/past/valid':
tf.io.FixedLenFeature([10, 16], tf.int64, default_value=None),
'traffic_light_state/past/x':
tf.io.FixedLenFeature([10, 16], tf.float32, default_value=None),
'traffic_light_state/past/y':
tf.io.FixedLenFeature([10, 16], tf.float32, default_value=None),
'traffic_light_state/past/z':
tf.io.FixedLenFeature([10, 16], tf.float32, default_value=None),
}
features_description = {}
features_description.update(roadgraph_features)
features_description.update(state_features)
features_description.update(traffic_light_features)
# + [markdown] id="trAv9YGrvYnc"
# # Visualize TF Example sample
# + [markdown] id="iWnysu4X7Wkt"
# ## Create Dataset.
# -
# + id="TpEZq1EMtXV9"
dataset = tf.data.TFRecordDataset(FILENAME, compression_type='')
# + id="TpEZq1EMtXV9"
data = next(dataset.as_numpy_iterator())
parsed = tf.io.parse_single_example(data, features_description)
# -
# +
img = parsed["roadgraph_samples/xyz"].numpy() #.reshape(100,200,3)
# plt.imshow(img/img.max())
rg_pts = img[:, :2].T
plt.plot(rg_pts[0, 300:], rg_pts[1, 300:], 'k.', alpha=1, ms=0.01)
# -
parsed["state/future/valid"].numpy()[:20,::10]
# +
# print(np.unique(parsed["roadgraph_samples/id"].numpy().reshape(-1)))
# print()
# print(parsed['roadgraph_samples/type'])
# (parsed['roadgraph_samples/xyz'].numpy()[:,0] > -1).sum()
# -
print(parsed["state/current/x"].numpy()[parsed["state/current/valid"].numpy()>0].shape)
print(parsed["state/current/x"].numpy()[parsed["state/tracks_to_predict"].numpy()>0].shape)
print(parsed["state/future/x"].numpy()[parsed["state/future/valid"].numpy()>0].shape)
# print(parsed["state/current/valid"].numpy())
(parsed["state/past/x"].numpy()).shape
# # Torch dataset
import torch
from tfrecord.torch.dataset import TFRecordDataset
from tqdm import tqdm
context_description = {
'roadgraph_samples/xyz': "float",
"state/current/x": 'float',
"state/current/y": 'float',
"state/past/x": 'float',
"state/past/y": 'float',
"state/future/x": 'float',
"state/future/y": 'float',
"state/future/valid": 'int',
"state/current/valid": "int",
"state/past/valid": "int",
"state/tracks_to_predict": "int",
}
# +
tbs = {}
PAD_WIDTH = 20000
dataset = TFRecordDataset(FILENAME,
index_path=None,
description=context_description,
# infinite=False
)
loader = torch.utils.data.DataLoader(dataset, batch_size=64)
data = next(iter(loader))
# -
iterator = iter(dataset)
# +
iterator = iter(loader)
for i in tqdm(range(100000)):
try:
data = next(iterator)
list(data.keys())
except StopIteration:
break
break
# +
# FILENAME
# -
import glob
files = glob.glob("/media/robot/hdd/waymo_dataset/tf_example/training/training_tfexample.*-of-01000")
# +
from itertools import chain
class CustomImageDataset(torch.utils.data.IterableDataset):
def __init__(self, tf_dir, context_desription, transform=None, target_transform=None):
self.tf_dir = tf_dir
self.context_desription = context_desription
self.tf_files = glob.glob("/media/robot/hdd/waymo_dataset/tf_example/training/training_tfexample.*-of-01000")
self.transform = transform
self.target_transform = target_transform
self.cur_file_index = 0
self.dataset = TFRecordDataset(self.tf_files[0], index_path=None, description=self.context_desription)
self.iterator = iter(self.dataset)
def __iter__(self):
for file in self.tf_files[1:]:
dataset = TFRecordDataset(file, index_path=None, description=self.context_desription)
self.iterator = chain(self.iterator, iter(dataset))
return self.iterator
# except StopIteration:
# self.__next_file()
# data = next(self.iterator)
# return data
def __next_file(self):
if (self.cur_file_index + 1 < len(self.tf_files)):
self.cur_file_index += 1
self.dataset = TFRecordDataset(self.tf_files[self.cur_file_index],
index_path=None,
description=self.context_desription)
self.iterator = iter(self.dataset)
return
raise StopIteration
# -
tfrecord_patth = "/media/robot/hdd/waymo_dataset/tf_example/training/"
dataset = CustomImageDataset(tfrecord_patth, context_description)
loader = torch.utils.data.DataLoader(dataset, batch_size=4)
iterator = iter(loader)
for i in tqdm(range(1000)):
data = next(iterator)
data["roadgraph_samples/xyz"]
break
for key in data.keys():
print(key, data[key].shape)
# +
cur = torch.cat([data["state/current/x"].reshape(-1,1,128,1), data["state/current/y"].reshape(-1,1,128,1)], -1)
past = torch.cat([data["state/past/x"].reshape(-1,10,128,1), data["state/past/y"].reshape(-1,10,128,1)], -1)
state = torch.cat([cur, past], 1).permute(0,2,1,3) #.reshape(-1,11,128*2)
state.shape
# -
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.tr = nn.Transformer(d_model=500, nhead=4, num_encoder_layers=4) #.cuda()
self.lin_xyz = nn.Linear(3,2) #.cuda()
self.lin_xyz_post = nn.Linear(500,64)
self.hist_tr = nn.Transformer(d_model=24, nhead=4, num_encoder_layers=4) #.cuda()
self.lin_hist = nn.Linear(22,24) #.cuda()
self.future_tr = nn.Transformer(d_model=160, nhead=4, num_encoder_layers=4) #.cuda()
self.lin_fut = nn.Linear(88,160)
# self.dec = nn.Sequential(nn.Linear(20+160, ))
self.dec = nn.Sequential(nn.Linear((20+160), 160),
nn.ReLU(),
nn.Linear(160,160))
def forward(self, data):
bs = data["roadgraph_samples/xyz"].shape[0]
src = self.lin_xyz(data["roadgraph_samples/xyz"].reshape(bs,-1,3).cuda()).reshape(bs,-1,500)
tgt = torch.rand(bs, 128, 500).cuda()
out_0 = self.tr(src.permute(1,0,2), tgt.permute(1,0,2)).permute(1,0,2)
out_0 = self.lin_xyz_post(out_0)
cur = torch.cat([data["state/current/x"].reshape(-1,1,128,1), data["state/current/y"].reshape(-1,1,128,1)], -1)
past = torch.cat([data["state/past/x"].reshape(-1,128,10,1), data["state/past/y"].reshape(-1,128,10,1)], -1).permute(0,2,1,3)
state = torch.cat([cur, past], 1).permute(0,2,1,3).reshape(-1,128,11*2).cuda()
state = self.lin_hist(state)
tgt = torch.rand(bs, 128, 24).cuda()
out_1 = self.hist_tr(state.permute(1,0,2), tgt.permute(1,0,2)).permute(1,0,2)
out_2 = self.lin_fut(torch.cat([out_0, out_1], -1))
future_tgt = cur.reshape(-1,1,128,2).repeat(1,80,1,1).permute(0,2,1,3).reshape(-1,128,160).cuda()
out_3 = self.future_tr(out_2.permute(1, 0, 2), future_tgt.permute(1, 0, 2))
out_3 = out_3.permute(1,0,2).reshape(-1, 128, 80*2) # bs, 128, 80 ,2
fin_input = torch.cat([past.permute(0,2,1,3).reshape(-1, 128, 20).cuda(), out_3],-1)
out = self.dec(fin_input).reshape(-1,128,80,2)
return out
def get_gt(self, data):
gt_fut = torch.cat([data["state/future/x"].reshape(bs,128,80,1),data["state/future/y"].reshape(bs,128,80,1)], -1)
return gt_fut
def get_mask(self, data):
mask = data["state/tracks_to_predict"]
return mask
def get_n_params(self ):
pp=0
for p in list(self.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
# print(data["state/future/valid"].reshape(4, 128, -1)[0, 10:40, ::10])
# print(data["state/tracks_to_predict"].shape)
torch.cumsum(torch.cat([torch.ones(5).reshape(1,5), 2*torch.ones(5).reshape(1,5)]),1)
# +
def get_future(data):
bs = data["state/future/x"].shape[0]
gt_fut = torch.cat([data["state/future/x"].reshape(bs,128,80,1),data["state/future/y"].reshape(bs,128,80,1)], -1)
gt_fut = gt_fut.permute(0, 2, 1, 3)
# bs, 80, 128, 2
return gt_fut
def get_current(data):
cur = torch.cat([data["state/current/x"].reshape(-1,1, 128,1), data["state/current/y"].reshape(-1, 1, 128, 1)], -1)
return cur
def get_future_speed(data, num_ped=128, future_steps=80):
bs = data["state/future/x"].shape[0]
gt_fut = get_future(data)
assert gt_fut.shape == torch.Size([bs, future_steps, num_ped , 2])
cur = get_current(data)
assert cur.shape == torch.Size([bs, 1, num_ped, 2])
gt_fut[:,1:,:,:] = gt_fut[:,1:,:,:] - gt_fut[:,:-1,:,:]
gt_fut[:,0:1] = gt_fut[:,0:1] - cur
return gt_fut
def get_valid_data_mask(data):
bs = data["state/future/x"].shape[0]
fut_valid = data["state/future/valid"].reshape(bs, 128, -1) * (data["state/current/valid"].reshape(bs, 128, -1)>0)
fut_valid *= (data["state/past/valid"].reshape(bs,128,10).sum(2) == 10).reshape(bs,128,1)>0
return fut_valid
def pred_to_future(data, pred, num_ped=128, future_steps=80):
bs = data["state/future/x"].shape[0]
cur = get_current(data).reshape(-1, 128, 2)
assert pred.shape == torch.Size([bs, num_ped, future_steps, 2])
pred[:,:,0] += cur.to(pred.device)
pred = torch.cumsum(pred,2)
return pred
def get_ade_fde_with_mask(data, pred, num_ped=128, future_steps=80):
bs = data["state/future/x"].shape[0]
assert pred.shape == torch.Size([bs, num_ped, future_steps, 2])
pred = pred.permute(0,2,1,3) # pred bs ,80, 128, 2
gt_fut = get_future(data)
assert gt_fut.shape == torch.Size([bs, future_steps, num_ped , 2])
cur = get_current(data)
assert cur.shape == torch.Size([bs, 1, num_ped, 2])
gt_fut_speed = get_future_speed(data)
dist = torch.norm(pred - gt_fut_speed.cuda(), dim=3)
valid = get_valid_data_mask(data)
mask = data["state/tracks_to_predict"].reshape(-1, 128, 1).repeat(1,1,80) * valid
mask = mask.permute(0,2,1)
dist = dist[mask>0]
# dist = dist[dist<500]
return dist
def ade_loss(data, pred):
ade = get_ade_fde_with_mask(data, pred)
return ade.mean()
# +
# data["state/future/valid"].reshape(8, 128, -1).shape
# data["state/current/valid"].reshape(8, 128, -1).shape
# data["state/future/valid"].reshape(8, 128, -1) * data["state/current/valid"].reshape(8, 128, -1)
# -
torch.tensor([1.]) * (torch.tensor([0])>0)
# out.shape == torch.Size([4, 128, 80 ,2])
m = Model().cuda()
out = m(data)
ade = get_ade_fde_with_mask(data, out)
ade.mean()
import matplotlib.pyplot as plt
plt.hist(ade.detach().cpu())
ade.detach().cpu().sort().values[-40:]
# # TORCH TRAINING
batch_size = 8
# torch.cuda.empty_cache()
tfrecord_path = "/media/robot/hdd/waymo_dataset/tf_example/training/"
dataset = CustomImageDataset(tfrecord_path, context_description)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size)
import torch.optim as optim
device = "cuda"
net = Model()
criterion = ade_loss
optimizer = optim.Adam(net.parameters(), lr=4e-4)
net = net.to(device)
# overfit test
# with torch.autograd.detect_anomaly():
if 0:
pbar = tqdm(range(1000))
for chank in pbar:
optimizer.zero_grad()
outputs = net(data)
loss = ade_loss(data, outputs)
loss.backward()
optimizer.step()
pbar.set_postfix({"loss": loss.detach().item()})
losses = torch.rand(0)
import matplotlib.pyplot as plt
# +
from tqdm.auto import tqdm
from IPython.display import clear_output
from IPython import display
from ipywidgets import Output
# -
# +
out = Output()
display.display(out)
for epoch in range(2): # loop over the dataset multiple times
pbar = tqdm(loader)
for chank, data in enumerate(pbar):
optimizer.zero_grad()
outputs = net(data)
loss = ade_loss(data, outputs)
loss.backward()
optimizer.step()
with torch.no_grad():
speed_ade = get_ade_fde_with_mask(data, outputs)
losses = torch.cat([losses, torch.tensor([loss.detach().item()])], 0)
pbar.set_postfix({"loss": losses.mean().item(),
"median": speed_ade.median().item(),
"max": speed_ade.max().item()})
if len(losses)>500:
losses = losses[100:]
if (len(losses)+1) % 30 == 0:
with out:
display.clear_output(wait=True)
plt.plot(losses)
plt.show()
# -
outputs = net(data)
loss = ade_loss(data, outputs )
# +
# print(outputs.shape)
pred_poses = pred_to_future(data, outputs).detach().cpu()
future = get_future_speed(data).permute(0,2,1,3)
print(future.shape)
print(pred_poses.shape)
mask = get_valid_data_mask(data)
# print(mask.shape)
mask = (mask.sum(2)==80)
# mask[0]
# (outputs.detach().cpu() - future)[0,0,:10]
print()
outputs[0,:3,:10]
# -
gt = data["state/future/x"].reshape(8, 128, 80)[:,:,1:] - data["state/future/x"].reshape(8, 128, 80)[:,:, :-1]
fut_val = (data["state/future/valid"]>0).reshape(-1,128,80)[:,:,1:]
arr = (outputs[:,:,1:, 0] - gt.cuda())[fut_val>0].detach().cpu().pow(2).sqrt().numpy()
plt.hist(arr, bins=100)
# + [markdown] id="Zdc8CBg27dtn"
# ## Generate visualization images.
# + id="utTE9Mtgx3Fq"
def create_figure_and_axes(size_pixels):
"""Initializes a unique figure and axes for plotting."""
fig, ax = plt.subplots(1, 1, num=uuid.uuid4())
# Sets output image to pixel resolution.
dpi = 100
size_inches = size_pixels / dpi
fig.set_size_inches([size_inches, size_inches])
fig.set_dpi(dpi)
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.xaxis.label.set_color('black')
ax.tick_params(axis='x', colors='black')
ax.yaxis.label.set_color('black')
ax.tick_params(axis='y', colors='black')
fig.set_tight_layout(True)
ax.grid(False)
return fig, ax
def fig_canvas_image(fig):
"""Returns a [H, W, 3] uint8 np.array image from fig.canvas.tostring_rgb()."""
# Just enough margin in the figure to display xticks and yticks.
fig.subplots_adjust(
left=0.08, bottom=0.08, right=0.98, top=0.98, wspace=0.0, hspace=0.0)
fig.canvas.draw()
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
return data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
def get_colormap(num_agents):
"""Compute a color map array of shape [num_agents, 4]."""
colors = cm.get_cmap('jet', num_agents)
colors = colors(range(num_agents))
np.random.shuffle(colors)
return colors
def get_viewport(all_states, all_states_mask):
"""Gets the region containing the data.
Args:
all_states: states of agents as an array of shape [num_agents, num_steps,
2].
all_states_mask: binary mask of shape [num_agents, num_steps] for
`all_states`.
Returns:
center_y: float. y coordinate for center of data.
center_x: float. x coordinate for center of data.
width: float. Width of data.
"""
valid_states = all_states[all_states_mask]
all_y = valid_states[..., 1]
all_x = valid_states[..., 0]
center_y = (np.max(all_y) + np.min(all_y)) / 2
center_x = (np.max(all_x) + np.min(all_x)) / 2
range_y = np.ptp(all_y)
range_x = np.ptp(all_x)
width = max(range_y, range_x)
return center_y, center_x, width
def visualize_one_step(states,
mask,
roadgraph,
title,
center_y,
center_x,
width,
color_map,
size_pixels=1000):
"""Generate visualization for a single step."""
# Create figure and axes.
fig, ax = create_figure_and_axes(size_pixels=size_pixels)
# Plot roadgraph.
rg_pts = roadgraph[:, :2].T
ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)
masked_x = states[:, 0][mask]
masked_y = states[:, 1][mask]
colors = color_map[mask]
# Plot agent current position.
ax.scatter(
masked_x,
masked_y,
marker='o',
linewidths=3,
color=colors,
)
# Title.
ax.set_title(title)
# Set axes. Should be at least 10m on a side and cover 160% of agents.
size = max(10, width * 1.0)
ax.axis([
-size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,
size / 2 + center_y
])
ax.set_aspect('equal')
image = fig_canvas_image(fig)
plt.close(fig)
return image
def visualize_all_agents_smooth(
decoded_example,
size_pixels=1000,
):
"""Visualizes all agent predicted trajectories in a serie of images.
Args:
decoded_example: Dictionary containing agent info about all modeled agents.
size_pixels: The size in pixels of the output image.
Returns:
T of [H, W, 3] uint8 np.arrays of the drawn matplotlib's figure canvas.
"""
# [num_agents, num_past_steps, 2] float32.
past_states = tf.stack(
[decoded_example['state/past/x'], decoded_example['state/past/y']],
-1).numpy()
past_states_mask = decoded_example['state/past/valid'].numpy() > 0.0
# [num_agents, 1, 2] float32.
current_states = tf.stack(
[decoded_example['state/current/x'], decoded_example['state/current/y']],
-1).numpy()
current_states_mask = decoded_example['state/current/valid'].numpy() > 0.0
# [num_agents, num_future_steps, 2] float32.
future_states = tf.stack(
[decoded_example['state/future/x'], decoded_example['state/future/y']],
-1).numpy()
future_states_mask = decoded_example['state/future/valid'].numpy() > 0.0
# [num_points, 3] float32.
roadgraph_xyz = decoded_example['roadgraph_samples/xyz'].numpy()
num_agents, num_past_steps, _ = past_states.shape
num_future_steps = future_states.shape[1]
color_map = get_colormap(num_agents)
# [num_agens, num_past_steps + 1 + num_future_steps, depth] float32.
all_states = np.concatenate([past_states, current_states, future_states], 1)
# [num_agens, num_past_steps + 1 + num_future_steps] float32.
all_states_mask = np.concatenate(
[past_states_mask, current_states_mask, future_states_mask], 1)
center_y, center_x, width = get_viewport(all_states, all_states_mask)
images = []
# Generate images from past time steps.
for i, (s, m) in enumerate(
zip(
np.split(past_states, num_past_steps, 1),
np.split(past_states_mask, num_past_steps, 1))):
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,
'past: %d' % (num_past_steps - i), center_y,
center_x, width, color_map, size_pixels)
images.append(im)
# Generate one image for the current time step.
s = current_states
m = current_states_mask
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz, 'current', center_y,
center_x, width, color_map, size_pixels)
images.append(im)
# Generate images from future time steps.
for i, (s, m) in enumerate(
zip(
np.split(future_states, num_future_steps, 1),
np.split(future_states_mask, num_future_steps, 1))):
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,
'future: %d' % (i + 1), center_y, center_x, width,
color_map, size_pixels)
images.append(im)
return images
images = visualize_all_agents_smooth(parsed)
# + [markdown] id="OrIZjUHG7hM3"
# ## Display animation.
# + id="tt2IeGiG0eny"
def create_animation(images):
""" Creates a Matplotlib animation of the given images.
Args:
images: A list of numpy arrays representing the images.
Returns:
A matplotlib.animation.Animation.
Usage:
anim = create_animation(images)
anim.save('/tmp/animation.avi')
HTML(anim.to_html5_video())
"""
plt.ioff()
fig, ax = plt.subplots()
dpi = 100
size_inches = 1000 / dpi
fig.set_size_inches([size_inches, size_inches])
plt.ion()
def animate_func(i):
ax.imshow(images[i])
ax.set_xticks([])
ax.set_yticks([])
ax.grid('off')
anim = animation.FuncAnimation(
fig, animate_func, frames=len(images) // 2, interval=100)
plt.close(fig)
return anim
# anim = create_animation(images[::5])
# HTML(anim.to_html5_video())
# -
# +
import os
if os.path.exists('tutorial_local.ipynb'):
# in case it is executed as a Jupyter notebook from the tutorial folder.
os.chdir('../')
fake_predictions_path = '{pyglib_resource}waymo_open_dataset/metrics/tools/fake_predictions.bin'.format(pyglib_resource='')
fake_ground_truths_path = '{pyglib_resource}waymo_open_dataset/metrics/tools/fake_ground_truths.bin'.format(pyglib_resource='')
bin_path = 'bazel-bin/{pyglib_resource}waymo_open_dataset/metrics/tools/compute_detection_metrics_main'.format(pyglib_resource='')
frames_path = '{pyglib_resource}tutorial/frames'.format(pyglib_resource='')
point_cloud_path = '{pyglib_resource}tutorial/3d_point_cloud.png'.format(pyglib_resource='')
# -
# !{bin_path} {fake_predictions_path} {fake_ground_truths_path}
# + [markdown] id="wdOQTZAiuKdQ"
# # Simple MLP model with TF
#
# Note that this is a very simple example model to demonstrate inputs parsing and metrics computation. Not at all competitive.
# + id="b_5G9lx9uK9B"
def _parse(value):
decoded_example = tf.io.parse_single_example(value, features_description)
past_states = tf.stack([
decoded_example['state/past/x'], decoded_example['state/past/y'],
decoded_example['state/past/length'], decoded_example['state/past/width'],
decoded_example['state/past/bbox_yaw'],
decoded_example['state/past/velocity_x'],
decoded_example['state/past/velocity_y']
], -1)
cur_states = tf.stack([
decoded_example['state/current/x'], decoded_example['state/current/y'],
decoded_example['state/current/length'],
decoded_example['state/current/width'],
decoded_example['state/current/bbox_yaw'],
decoded_example['state/current/velocity_x'],
decoded_example['state/current/velocity_y']
], -1)
input_states = tf.concat([past_states, cur_states], 1)[..., :2]
future_states = tf.stack([
decoded_example['state/future/x'], decoded_example['state/future/y'],
decoded_example['state/future/length'],
decoded_example['state/future/width'],
decoded_example['state/future/bbox_yaw'],
decoded_example['state/future/velocity_x'],
decoded_example['state/future/velocity_y']
], -1)
gt_future_states = tf.concat([past_states, cur_states, future_states], 1)
past_is_valid = decoded_example['state/past/valid'] > 0
current_is_valid = decoded_example['state/current/valid'] > 0
future_is_valid = decoded_example['state/future/valid'] > 0
gt_future_is_valid = tf.concat(
[past_is_valid, current_is_valid, future_is_valid], 1)
# If a sample was not seen at all in the past, we declare the sample as
# invalid.
sample_is_valid = tf.reduce_any(
tf.concat([past_is_valid, current_is_valid], 1), 1)
inputs = {
'input_states': input_states,
'gt_future_states': gt_future_states,
'gt_future_is_valid': gt_future_is_valid,
'object_type': decoded_example['state/type'],
'tracks_to_predict': decoded_example['state/tracks_to_predict'] > 0,
'sample_is_valid': sample_is_valid,
}
return inputs
def _default_metrics_config():
config = motion_metrics_pb2.MotionMetricsConfig()
config_text = """
track_steps_per_second: 10
prediction_steps_per_second: 2
track_history_samples: 10
track_future_samples: 80
speed_lower_bound: 1.4
speed_upper_bound: 11.0
speed_scale_lower: 0.5
speed_scale_upper: 1.0
step_configurations {
measurement_step: 5
lateral_miss_threshold: 1.0
longitudinal_miss_threshold: 2.0
}
step_configurations {
measurement_step: 9
lateral_miss_threshold: 1.8
longitudinal_miss_threshold: 3.6
}
step_configurations {
measurement_step: 15
lateral_miss_threshold: 3.0
longitudinal_miss_threshold: 6.0
}
max_predictions: 6
"""
text_format.Parse(config_text, config)
return config
class SimpleModel(tf.keras.Model):
"""A simple one-layer regressor."""
def __init__(self, num_agents_per_scenario, num_states_steps,
num_future_steps):
super(SimpleModel, self).__init__()
self._num_agents_per_scenario = num_agents_per_scenario
self._num_states_steps = num_states_steps
self._num_future_steps = num_future_steps
self.regressor = tf.keras.layers.Dense(num_future_steps * 2)
def call(self, states):
states = tf.reshape(states, (-1, self._num_states_steps * 2))
pred = self.regressor(states)
pred = tf.reshape(
pred, [-1, self._num_agents_per_scenario, self._num_future_steps, 2])
return pred
class MotionMetrics(tf.keras.metrics.Metric):
"""Wrapper for motion metrics computation."""
def __init__(self, config):
super().__init__()
self._prediction_trajectory = []
self._prediction_score = []
self._ground_truth_trajectory = []
self._ground_truth_is_valid = []
self._prediction_ground_truth_indices = []
self._prediction_ground_truth_indices_mask = []
self._object_type = []
self._metrics_config = config
def reset_state():
self._prediction_trajectory = []
self._prediction_score = []
self._ground_truth_trajectory = []
self._ground_truth_is_valid = []
self._prediction_ground_truth_indices = []
self._prediction_ground_truth_indices_mask = []
self._object_type = []
def update_state(self, prediction_trajectory, prediction_score,
ground_truth_trajectory, ground_truth_is_valid,
prediction_ground_truth_indices,
prediction_ground_truth_indices_mask, object_type):
self._prediction_trajectory.append(prediction_trajectory)
self._prediction_score.append(prediction_score)
self._ground_truth_trajectory.append(ground_truth_trajectory)
self._ground_truth_is_valid.append(ground_truth_is_valid)
self._prediction_ground_truth_indices.append(
prediction_ground_truth_indices)
self._prediction_ground_truth_indices_mask.append(
prediction_ground_truth_indices_mask)
self._object_type.append(object_type)
def result(self):
# [batch_size, num_preds, 1, 1, steps, 2].
# The ones indicate top_k = 1, num_agents_per_joint_prediction = 1.
prediction_trajectory = tf.concat(self._prediction_trajectory, 0)
# [batch_size, num_preds, 1].
prediction_score = tf.concat(self._prediction_score, 0)
# [batch_size, num_agents, gt_steps, 7].
ground_truth_trajectory = tf.concat(self._ground_truth_trajectory, 0)
# [batch_size, num_agents, gt_steps].
ground_truth_is_valid = tf.concat(self._ground_truth_is_valid, 0)
# [batch_size, num_preds, 1].
prediction_ground_truth_indices = tf.concat(
self._prediction_ground_truth_indices, 0)
# [batch_size, num_preds, 1].
prediction_ground_truth_indices_mask = tf.concat(
self._prediction_ground_truth_indices_mask, 0)
# [batch_size, num_agents].
object_type = tf.cast(tf.concat(self._object_type, 0), tf.int64)
# We are predicting more steps than needed by the eval code. Subsample.
interval = (
self._metrics_config.track_steps_per_second //
self._metrics_config.prediction_steps_per_second)
prediction_trajectory = prediction_trajectory[...,
(interval - 1)::interval, :]
return py_metrics_ops.motion_metrics(
config=self._metrics_config.SerializeToString(),
prediction_trajectory=prediction_trajectory,
prediction_score=prediction_score,
ground_truth_trajectory=ground_truth_trajectory,
ground_truth_is_valid=ground_truth_is_valid,
prediction_ground_truth_indices=prediction_ground_truth_indices,
prediction_ground_truth_indices_mask=prediction_ground_truth_indices_mask,
object_type=object_type)
model = SimpleModel(128, 11, 80)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
loss_fn = tf.keras.losses.MeanSquaredError()
metrics_config = _default_metrics_config()
motion_metrics = MotionMetrics(metrics_config)
metric_names = config_util.get_breakdown_names_from_motion_config(
metrics_config)
def train_step(inputs):
with tf.GradientTape() as tape:
# [batch_size, num_agents, D]
states = inputs['input_states']
# Predict. [batch_size, num_agents, steps, 2].
pred_trajectory = model(states, training=True)
# Set training target.
prediction_start = metrics_config.track_history_samples + 1
# [batch_size, num_agents, steps, 7]
gt_trajectory = inputs['gt_future_states']
gt_targets = gt_trajectory[..., prediction_start:, :2]
# [batch_size, num_agents, steps]
gt_is_valid = inputs['gt_future_is_valid']
# [batch_size, num_agents, steps]
weights = (
tf.cast(inputs['gt_future_is_valid'][..., prediction_start:],
tf.float32) *
tf.cast(inputs['tracks_to_predict'][..., tf.newaxis], tf.float32))
loss_value = loss_fn(gt_targets, pred_trajectory, sample_weight=weights)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# [batch_size, num_agents, steps, 2] ->
# [batch_size, num_agents, 1, 1, steps, 2].
# The added dimensions are top_k = 1, num_agents_per_joint_prediction = 1.
pred_trajectory = pred_trajectory[:, :, tf.newaxis, tf.newaxis]
# Fake the score since this model does not generate any score per predicted
# trajectory.
pred_score = tf.ones(shape=tf.shape(pred_trajectory)[:3])
# [batch_size, num_agents].
object_type = inputs['object_type']
# [batch_size, num_agents].
batch_size = tf.shape(inputs['tracks_to_predict'])[0]
num_samples = tf.shape(inputs['tracks_to_predict'])[1]
pred_gt_indices = tf.range(num_samples, dtype=tf.int64)
# [batch_size, num_agents, 1].
pred_gt_indices = tf.tile(pred_gt_indices[tf.newaxis, :, tf.newaxis],
(batch_size, 1, 1))
# [batch_size, num_agents, 1].
pred_gt_indices_mask = inputs['tracks_to_predict'][..., tf.newaxis]
motion_metrics.update_state(pred_trajectory, pred_score, gt_trajectory,
gt_is_valid, pred_gt_indices,
pred_gt_indices_mask, object_type)
return loss_value
dataset = tf.data.TFRecordDataset(FILENAME)
dataset = dataset.map(_parse)
dataset = dataset.batch(32)
epochs = 2
num_batches_per_epoch = 10
for epoch in range(epochs):
print('\nStart of epoch %d' % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, batch in enumerate(dataset):
loss_value = train_step(batch)
# Log every 10 batches.
if step % 10 == 0:
print('Training loss (for one batch) at step %d: %.4f' %
(step, float(loss_value)))
print('Seen so far: %d samples' % ((step + 1) * 64))
if step >= num_batches_per_epoch:
break
# Display metrics at the end of each epoch.
train_metric_values = motion_metrics.result()
for i, m in enumerate(
['min_ade', 'min_fde', 'miss_rate', 'overlap_rate', 'map']):
for j, n in enumerate(metric_names):
print('{}/{}: {}'.format(m, n, train_metric_values[i, j]))
# -
| tutorial/.ipynb_checkpoints/tutorial_motion-checkpoint.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .groovy
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Groovy
// language: groovy
// name: groovy
// ---
// # General Autotranslation
//
// The `beakerx` object synchronizes values between languages. BeakerX's current implementation works with metaprogramming and serializing the objects to JSON, so it's not recommended for more than a few MB of data. Using [Arrow](https://arrow.apache.org/) and shared memory to remove that limitation is on the [agenda](https://github.com/twosigma/beakerx/issues/7577).
//
// Start in Groovy:
beakerx.bar = "a groovy value"
// Translate to JavaScript:
// + language="javascript"
// console.log(beakerx.bar);
// + language="javascript"
// beakerx.bar = [23, 48, 7, "from JS"];
// -
// Groovy:
beakerx.bar
// Translate to Python:
// + language="python"
// from beakerx.object import beakerx
// beakerx.bar
// + language="python"
// from beakerx.object import beakerx
// beakerx.bar = [2, 'python', 'value']
// beakerx.bar
// -
// Translate to Scala:
// + language="scala"
// beakerx.bar
// + language="scala"
// beakerx.bar = Array(3.14,"scala", "value")
// -
// Groovy:
beakerx.bar
// Translate to Clojure:
// + language="clojure"
// (beakerx :get "bar")
// + language="clojure"
// (beakerx :set "bar" [345, "clojure", "value"])
// (beakerx :get "bar")
// -
// Groovy:
beakerx.bar
// Translate to java:
// + language="java"
// return NamespaceClient.getBeakerX().get("bar");
// + language="java"
// Object[] arr = {new Integer(10), "java", "value"};
// NamespaceClient.getBeakerX().set("bar", arr);
// return NamespaceClient.getBeakerX().get("bar");
// -
// Groovy:
beakerx.bar
// Translate to kotlin:
// %%kotlin
beakerx["bar"]
// %%kotlin
beakerx["bar"] = arrayOf(33, "kotlin", "value")
beakerx["bar"]
// Groovy:
beakerx.bar
| test/ipynb/groovy/GeneralAutotranslationTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # 京东JData算法大赛(2)-数据分析
# +
# 导入相关包
# %matplotlib inline
# 绘图包
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# -
# 定义文件名
ACTION_201602_FILE = "data/JData_Action_201602.csv"
ACTION_201603_FILE = "data/JData_Action_201603.csv"
ACTION_201603_EXTRA_FILE = "data/JData_Action_201603_extra.csv"
ACTION_201604_FILE = "data/JData_Action_201604.csv"
COMMENT_FILE = "data/JData_Comment.csv"
PRODUCT_FILE = "data/JData_Product.csv"
USER_FILE = "data/JData_User.csv"
NEW_USER_FILE = "data/JData_User_New.csv"
USER_TABLE_FILE = "data/user_table.csv"
ITEM_TABLE_FILE = "data/item_table.csv"
# ### 周一到周日各天购买情况
# 提取购买(type=4)的行为数据
def get_from_action_data(fname, chunk_size=100000):
reader = pd.read_csv(fname, header=0, iterator=True)
chunks = []
loop = True
while loop:
try:
chunk = reader.get_chunk(chunk_size)[
["user_id", "sku_id", "type", "time"]]
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped")
df_ac = pd.concat(chunks, ignore_index=True)
# type=4,为购买
df_ac = df_ac[df_ac['type'] == 4]
return df_ac[["user_id", "sku_id", "time"]]
df_ac = []
df_ac.append(get_from_action_data(fname=ACTION_201602_FILE))
df_ac.append(get_from_action_data(fname=ACTION_201603_FILE))
df_ac.append(get_from_action_data(fname=ACTION_201603_EXTRA_FILE))
df_ac.append(get_from_action_data(fname=ACTION_201604_FILE))
df_ac = pd.concat(df_ac, ignore_index=True)
print(df_ac.dtypes)
# +
# 将time字段转换为datetime类型
df_ac['time'] = pd.to_datetime(df_ac['time'])
# 使用lambda匿名函数将时间time转换为星期(周一为1, 周日为7)
df_ac['time'] = df_ac['time'].apply(lambda x: x.weekday() + 1)
# -
# 周一到周日每天购买用户个数
df_user = df_ac.groupby('time')['user_id'].nunique()
df_user = df_user.to_frame().reset_index()
df_user.columns = ['weekday', 'user_num']
# 周一到周日每天购买商品个数
df_item = df_ac.groupby('time')['sku_id'].nunique()
df_item = df_item.to_frame().reset_index()
df_item.columns = ['weekday', 'item_num']
# 周一到周日每天购买记录个数
df_ui = df_ac.groupby('time', as_index=False).size()
df_ui = df_ui.to_frame().reset_index()
df_ui.columns = ['weekday', 'user_item_num']
# +
# 条形宽度
bar_width = 0.2
# 透明度
opacity = 0.4
plt.bar(df_user['weekday'], df_user['user_num'], bar_width,
alpha=opacity, color='c', label='user')
plt.bar(df_item['weekday']+bar_width, df_item['item_num'],
bar_width, alpha=opacity, color='g', label='item')
plt.bar(df_ui['weekday']+bar_width*2, df_ui['user_item_num'],
bar_width, alpha=opacity, color='m', label='user_item')
plt.xlabel('weekday')
plt.ylabel('number')
plt.title('A Week Purchase Table')
plt.xticks(df_user['weekday'] + bar_width * 3 / 2., (1,2,3,4,5,6,7))
plt.tight_layout()
plt.legend(prop={'size':9})
# -
# **分析**: 一周用户购买数量分布相对比较均衡,周六周日购买数相对较少,可能是此时大家都去过周末玩了,而平时可以逛京东作为消遣.
# ### 一个月各天购买情况
# #### 2016年2月
# +
df_ac = get_from_action_data(fname=ACTION_201602_FILE)
# 将time字段转换为datetime类型并使用lambda匿名函数将时间time转换为天
df_ac['time'] = pd.to_datetime(df_ac['time']).apply(lambda x: x.day)
# +
df_user = df_ac.groupby('time')['user_id'].nunique()
df_user = df_user.to_frame().reset_index()
df_user.columns = ['day', 'user_num']
df_item = df_ac.groupby('time')['sku_id'].nunique()
df_item = df_item.to_frame().reset_index()
df_item.columns = ['day', 'item_num']
df_ui = df_ac.groupby('time', as_index=False).size()
df_ui = df_ui.to_frame().reset_index()
df_ui.columns = ['day', 'user_item_num']
# +
# 条形宽度
bar_width = 0.2
# 透明度
opacity = 0.4
# 天数
day_range = range(1,len(df_user['day']) + 1, 1)
# 设置图片大小
plt.figure(figsize=(14,10))
plt.bar(df_user['day'], df_user['user_num'], bar_width,
alpha=opacity, color='c', label='user')
plt.bar(df_item['day']+bar_width, df_item['item_num'],
bar_width, alpha=opacity, color='g', label='item')
plt.bar(df_ui['day']+bar_width*2, df_ui['user_item_num'],
bar_width, alpha=opacity, color='m', label='user_item')
plt.xlabel('day')
plt.ylabel('number')
plt.title('February Purchase Table')
plt.xticks(df_user['day'] + bar_width * 3 / 2., day_range)
# plt.ylim(0, 80)
plt.tight_layout()
plt.legend(prop={'size':9})
# -
# **分析**: 从上面可以发现,在2月6号到2月10号之间是我们的农历新年,快递在这几天不上班,因而购物数量相对较少,在我们实际分析时, 可以暂时将这部分数据作为异常数据不去考虑,不加入我们的训练样本中.
# #### 查看特定用户对特定商品的活动轨迹
def spec_ui_action_data(fname, user_id, item_id, chunk_size=100000):
reader = pd.read_csv(fname, header=0, iterator=True)
chunks = []
loop = True
while loop:
try:
chunk = reader.get_chunk(chunk_size)[
["user_id", "sku_id", "type", "time"]]
chunks.append(chunk)
except StopIteration:
loop = False
print("Iteration is stopped")
df_ac = pd.concat(chunks, ignore_index=True)
df_ac = df_ac[(df_ac['user_id'] == user_id) & (df_ac['sku_id'] == item_id)]
return df_ac
def explore_user_item_via_time():
user_id = 62969
item_id = 62655
df_ac = []
df_ac.append(spec_ui_action_data(ACTION_201602_FILE, user_id, item_id))
df_ac.append(spec_ui_action_data(ACTION_201603_FILE, user_id, item_id))
df_ac.append(spec_ui_action_data(
ACTION_201603_EXTRA_FILE, user_id, item_id))
df_ac.append(spec_ui_action_data(ACTION_201604_FILE, user_id, item_id))
df_ac = pd.concat(df_ac, ignore_index=False)
print(df_ac.sort_values(by='time'))
explore_user_item_via_time()
# > 预测数据部分: 2016-04-16到2016-04-20用户是否下单P中的商品,每个用户只会下单一个商品;
#
# 由于我们需要预测16-20号五天用户的购买情况,那我们不妨分析下用户以5天为单位(周期为5)购买情况.
| data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Uncomment and run to destroy the current routing context, which will be recreated by
# running the 0** notebooks in order
# # !rm -r ../artifacts/unweaver
# -
# !mkdir -p ../artifacts/unweaver/layers
# !cp -r ../data/unweaver/* ../artifacts/unweaver/*
# !cp ../data/seattle.geojson ../artifacts/unweaver/layers/seattle.geojson
# !unweaver build ../artifacts/unweaver
# +
# Uncomment to recreate the spatial index. This should not be necessary.
# import entwiner
# G = entwiner.DiGraphDBView(path="../artifacts/unweaver/graph.gpkg")
# G.network.edges.drop_rtree()
# G.network.edges.add_rtree()
# -
| data_notebooks/010 - Building the graph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style='font-size: 200%; line-height: 2;'>
# To participate, you'll need to git clone (or download the .zip from GitHub):
#
# <p>https://github.com/scikit-image/skimage-tutorials</p>
# </div>
#
# <p>You can do that in git using:</p>
#
# <pre>git clone --depth=1 https://github.com/scikit-image/skimage-tutorials</pre>
#
# <div style='font-size: 200%; line-height: 1.5;'>
# If you have already cloned the material, please issue `git pull` now and reload the notebook to ensure that you have the latest updates.
# </div>
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# # Images are numpy arrays
# Images are represented in ``scikit-image`` using standard ``numpy`` arrays. This allows maximum inter-operability with other libraries in the scientific Python ecosystem, such as ``matplotlib`` and ``scipy``.
#
# Let's see how to build a grayscale image as a 2D array:
# +
import numpy as np
from matplotlib import pyplot as plt
random_image = np.random.random([500, 500])
plt.imshow(random_image, cmap='gray')
plt.colorbar();
# -
# The same holds for "real-world" images:
# +
from skimage import data
coins = data.coins()
print('Type:', type(coins))
print('dtype:', coins.dtype)
print('shape:', coins.shape)
plt.imshow(coins, cmap='gray');
# -
# A color image is a 3D array, where the last dimension has size 3 and represents the red, green, and blue channels:
# +
cat = data.chelsea()
print("Shape:", cat.shape)
print("Values min/max:", cat.min(), cat.max())
plt.imshow(cat);
# -
# These are *just NumPy arrays*. E.g., we can make a red square by using standard array slicing and manipulation:
cat[10:110, 10:110, :] = [255, 0, 0] # [red, green, blue]
plt.imshow(cat);
# Images can also include transparent regions by adding a 4th dimension, called an *alpha layer*.
# ### Other shapes, and their meanings
#
# |Image type|Coordinates|
# |:---|:---|
# |2D grayscale|(row, column)|
# |2D multichannel|(row, column, channel)|
# |3D grayscale (or volumetric) |(plane, row, column)|
# |3D multichannel|(plane, row, column, channel)|
# ## Displaying images using matplotlib
# +
from skimage import data
img0 = data.chelsea()
img1 = data.rocket()
# +
import matplotlib.pyplot as plt
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(20, 10))
ax0.imshow(img0)
ax0.set_title('Cat', fontsize=18)
ax0.axis('off')
ax1.imshow(img1)
ax1.set_title('Rocket', fontsize=18)
ax1.set_xlabel(r'Launching position $\alpha=320$')
ax1.vlines([202, 300], 0, img1.shape[0], colors='magenta', linewidth=3, label='Side tower position')
ax1.plot([168, 190, 200], [400, 200, 300], color='white', linestyle='--', label='Side angle')
ax1.legend();
# -
# For more on plotting, see the [Matplotlib documentation](https://matplotlib.org/gallery/index.html#images-contours-and-fields) and [pyplot API](https://matplotlib.org/api/pyplot_summary.html).
# ## Data types and image values
#
# In literature, one finds different conventions for representing image values:
#
# ```
# 0 - 255 where 0 is black, 255 is white
# 0 - 1 where 0 is black, 1 is white
# ```
#
# ``scikit-image`` supports both conventions--the choice is determined by the
# data-type of the array.
#
# E.g., here, I generate two valid images:
# +
linear0 = np.linspace(0, 1, 2500).reshape((50, 50))
linear1 = np.linspace(0, 255, 2500).reshape((50, 50)).astype(np.uint8)
print("Linear0:", linear0.dtype, linear0.min(), linear0.max())
print("Linear1:", linear1.dtype, linear1.min(), linear1.max())
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15, 15))
ax0.imshow(linear0, cmap='gray')
ax1.imshow(linear1, cmap='gray');
# -
# The library is designed in such a way that any data-type is allowed as input,
# as long as the range is correct (0-1 for floating point images, 0-255 for unsigned bytes,
# 0-65535 for unsigned 16-bit integers).
# You can convert images between different representations by using ``img_as_float``, ``img_as_ubyte``, etc.:
# +
from skimage import img_as_float, img_as_ubyte
image = data.chelsea()
image_ubyte = img_as_ubyte(image)
image_float = img_as_float(image)
print("type, min, max:", image_ubyte.dtype, image_ubyte.min(), image_ubyte.max())
print("type, min, max:", image_float.dtype, image_float.min(), image_float.max())
print()
print("231/255 =", 231/255.)
# -
# Your code would then typically look like this:
#
# ```python
# def my_function(any_image):
# float_image = img_as_float(any_image)
# # Proceed, knowing image is in [0, 1]
# ```
#
# We recommend using the floating point representation, given that
# ``scikit-image`` mostly uses that format internally.
# ## Image I/O
#
# Mostly, we won't be using input images from the scikit-image example data sets. Those images are typically stored in JPEG or PNG format. Since scikit-image operates on NumPy arrays, *any* image reader library that provides arrays will do. Options include imageio, matplotlib, pillow, etc.
#
# scikit-image conveniently wraps many of these in the `io` submodule, and will use whichever of the libraries mentioned above are installed:
# +
from skimage import io
image = io.imread('../images/balloon.jpg')
print(type(image))
print(image.dtype)
print(image.shape)
print(image.min(), image.max())
plt.imshow(image);
# -
# We also have the ability to load multiple images, or multi-layer TIFF images:
# +
ic = io.ImageCollection('../images/*.png:../images/*.jpg')
print('Type:', type(ic))
ic.files
# +
import os
f, axes = plt.subplots(nrows=3, ncols=len(ic) // 3 + 1, figsize=(20, 5))
# subplots returns the figure and an array of axes
# we use `axes.ravel()` to turn these into a list
axes = axes.ravel()
for ax in axes:
ax.axis('off')
for i, image in enumerate(ic):
axes[i].imshow(image, cmap='gray')
axes[i].set_title(os.path.basename(ic.files[i]))
plt.tight_layout()
# -
# ### Aside: `enumerate`
#
# `enumerate` gives us each element in a container, along with its position.
animals = ['cat', 'dog', 'leopard']
for i, animal in enumerate(animals):
print('The animal in position {} is {}'.format(i, animal))
# ## <span class="exercize">Exercise: draw the letter H</span>
#
# Define a function that takes as input an RGB image and a pair of coordinates (row, column), and returns a copy with a green letter H overlaid at those coordinates. The coordinates point to the top-left corner of the H.
#
# The arms and strut of the H should have a width of 3 pixels, and the H itself should have a height of 24 pixels and width of 20 pixels.
#
# Start with the following template:
def draw_H(image, coords, color=(0, 255, 0)):
out = image.copy()
...
return out
# Test your function like so:
cat = data.chelsea()
cat_H = draw_H(cat, (50, -50))
plt.imshow(cat_H);
# ## <span class="exercize">Exercise: visualizing RGB channels</span>
#
# Display the different color channels of the image along (each as a gray-scale image). Start with the following template:
# +
# --- read in the image ---
image = plt.imread('../images/Bells-Beach.jpg')
# --- assign each color channel to a different variable ---
r = ...
g = ...
b = ...
# --- display the image and r, g, b channels ---
f, axes = plt.subplots(1, 4, figsize=(16, 5))
for ax in axes:
ax.axis('off')
(ax_r, ax_g, ax_b, ax_color) = axes
ax_r.imshow(r, cmap='gray')
ax_r.set_title('red channel')
ax_g.imshow(g, cmap='gray')
ax_g.set_title('green channel')
ax_b.imshow(b, cmap='gray')
ax_b.set_title('blue channel')
# --- Here, we stack the R, G, and B layers again
# to form a color image ---
ax_color.imshow(np.stack([r, g, b], axis=2))
ax_color.set_title('all channels');
# -
# Now, take a look at the following R, G, and B channels. How would their combination look? (Write some code to confirm your intuition.)
# +
from skimage import draw
red = np.zeros((300, 300))
green = np.zeros((300, 300))
blue = np.zeros((300, 300))
r, c = draw.circle(100, 100, 100)
red[r, c] = 1
r, c = draw.circle(100, 200, 100)
green[r, c] = 1
r, c = draw.circle(200, 150, 100)
blue[r, c] = 1
f, axes = plt.subplots(1, 3)
for (ax, channel) in zip(axes, [red, green, blue]):
ax.imshow(channel, cmap='gray')
ax.axis('off')
# -
# ## Exercise: Convert to grayscale ("black and white")
#
# The *relative luminance* of an image is the intensity of light coming from each point. Different colors contribute differently to the luminance: it's very hard to have a bright, pure blue, for example. So, starting from an RGB image, the luminance is given by:
#
# $$
# Y = 0.2126R + 0.7152G + 0.0722B
# $$
#
# Use Python 3.5's matrix multiplication, `@`, to convert an RGB image to a grayscale luminance image according to the formula above.
#
# Compare your results to that obtained with `skimage.color.rgb2gray`.
#
# Change the coefficients to 1/3 (i.e., take the mean of the red, green, and blue channels, to see how that approach compares with `rgb2gray`).
# +
from skimage import color, img_as_float
image = img_as_float(io.imread('../images/balloon.jpg'))
gray = color.rgb2gray(image)
my_gray = ...
# --- display the results ---
f, (ax0, ax1) = plt.subplots(1, 2, figsize=(10, 6))
ax0.imshow(gray, cmap='gray')
ax0.set_title('skimage.color.rgb2gray')
ax1.imshow(my_gray, cmap='gray')
ax1.set_title('my rgb2gray')
| lectures/00_images_are_arrays.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
X = 2*np.random.rand(100,1)
y = 4 +3 *X +np.random.randn(100,1)
X_b = np.c_[np.ones((100,1)),X]
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
theta_best
X_new = np.array([[0],[2]])
X_new_b = np.c_[np.ones((2,1)),X_new]
y_predict = X_new_b.dot(theta_best)
y_predict
plt.plot(X_new,y_predict,"r-")
plt.plot(X,y,"b.")
plt.axis([0,2,0,15])
plt.show()
# -
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X,y)
lin_reg.intercept_,lin_reg.coef_
lin_reg.predict(X_new)
# +
eta =0.1
n_iterations = 1000
m = 100
theta = np.random.randn(2,1)
for iterations in range(n_iterations):
gradients = 2/m*X_b.T.dot(X_b.dot(theta)-y)
theta= theta -eta *gradients
theta
# -
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
X=iris["data"][:,3:]
y=(iris["target"]==2).astype(np.int)
# +
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X,y)
x_new = np.linspace(0,3,1000).reshape(-1,1)
y_proba = log_reg.predict_proba(X_new)
plt.plot(X_new,y_proba[:,1],"g-",label="Iris-Virginica")
plt.plot(X_new,y_proba[:,0], "b--", label ="Not Iris-Virginica")
# +
import numpy as np
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
iris = datasets.load_iris()
X = iris["data"][:,(2,3)]
y = (iris["target"]==2).astype(np.float64)
svm_clf = Pipeline((
("scaler",StandardScaler()),
("linear_svc", LinearSVC(C=1,loss= "hinge")),
))
svm_clf.fit(X,y)
svm_clf.predict([[5.5,1.7]])
plt.plot(X,y,"g-",label="Iris-Virginica")
plt.plot(X,y, "b--", label ="Not Iris-Virginica")
# -
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data[:, 2:] # petal length and width
y = iris.target
tree_clf = DecisionTreeClassifier(max_depth=2)
tree_clf.fit(X, y)
from sklearn.tree import export_graphviz
export_graphviz(
tree_clf,
#out_file=image_path("iris_tree.dot"),
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True
)
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Stacking automatically generated cross-profiles
#
# This example is based on the Generic Mapping Tool's example 33 (https://docs.generic-mapping-tools.org/latest/gallery/ex33.html).
#
# Data from Tozer et al., 2019 (http://dx.doi.org/10.1029/2019EA000658) provided via `earth_relief_01m`.
#
# import numpy as np
# import pandas as pd
# %%
import pygmt
# %%
# Extract a subset of earth_relief_01m for the East Pacific Rise
grid = pygmt.grdcut("@earth_relief_01m", region=[-118, -107, -49, -42])
# %%
# Plot the grid subset
fig = pygmt.Figure()
pygmt.makecpt(cmap="bukavu", series=[-5000, -2000])
fig.grdimage(grid=grid, projection="M15c", shading="a15+ne0.75", frame=True)
fig.text(
text=r"Data from Tozer et al., 2019",
position="cBR",
offset="0/-1.2c",
font="12p,Helvetica",
no_clip=True,
)
fig.show()
# %%
# Select two points along the ridge
points = np.array([[-111.6, -43.0], [-113.3, -47.5]])
# %%
# Plot ridge segment and end points
fig.plot(data=points, pen="2p,darkorange")
fig.plot(data=points, style="c0.25c", color="darkorange")
fig.show()
# %%
# Generate cross-profiles 400 km long, spaced 10 km, samped every 2km and stack these using the median
pygmt.grdtrack(
points=points,
grid=grid,
crossprofile="400k/2k/10k+v",
stack='m+s"stack.txt"',
outfile="profiles.txt",
)
# %%
# Plot the cross-profiles
fig.plot(data="profiles.txt", pen="0.75p")
fig.show()
# %%
# Create an envelope
upper = pd.read_csv(
"stack.txt", sep="\t", header=None, usecols=[0, 5], names=["Distance", "Value"]
)
lower = pd.read_csv(
"stack.txt", sep="\t", header=None, usecols=[0, 6], names=["Distance", "Value"]
)
envelope = pd.concat([upper, lower[::-1]], ignore_index=True)
# %%
fig.shift_origin(yshift="h+2c")
fig.plot(
data=envelope,
region=[-200, 200, -3500, -2000],
projection="X15c/7.5c",
color="lightgrey",
frame=['xafg1000+l"Distance from ridge (km)"', 'yaf+l"Depth (m)"', "WSNE"],
)
fig.plot(
data="stack.txt",
region=[-200, 200, -3500, -2000],
projection="X15c/7.5c",
pen="3p",
frame=['xafg1000+l"Distance from ridge (km)"', 'yaf+l"Depth (m)"', "WSNE"],
)
fig.show()
# %%
# Save the example figure
fig.savefig("figures/profiles_ex33.png")
| examples/cross_profiles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sklearn
import re
import nltk
import csv
import random
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.probability import FreqDist
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer, CountVectorizer
import pandas as pd
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
global Y, vector, df
df = pd.read_csv(r'C:\Users\melan\OneDrive\Documents\drugsTrain2.csv',
names = ['Name', 'Condition', 'Review', 'Rating'])
df = df.drop(['Name', 'Condition'], 1)
#select the ratings with 1 and 10
high = df.loc[df['Rating'] == 10]
low = df.loc[df['Rating'] == 1]
#combine the dataframes
frames = [low, high]
drugs_df = pd.concat(frames)
#select global Y variable
Y = drugs_df['Rating']
# +
#cleaning the data
def cleaned(text, remove_stopwords = True):
#remove punctuation. make lowercase
text = text.lower()
text = re.sub(r'https?:\/\/.*[\r\n]*', '', text, flags=re.MULTILINE)
text = re.sub(r'\<a href', ' ', text)
text = re.sub(r'&', '', text)
text = re.sub(r'[_"\-;%()|+&=*%.,!?:#$@\[\]/]', ' ', text)
text = re.sub(r'<br />', ' ', text)
text = re.sub(r'\'', ' ', text)
#remove stopwords
if remove_stopwords:
text = text.split()
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Tokenize each word
text = nltk.WordPunctTokenizer().tokenize(text)
return text
#preprocessed reviews
df['Review_Clean'] = list(map(cleaned, df.Review))
#get lemmatization per reviews
lem = WordNetLemmatizer()
df['Review_Lem'] = df['Review_Clean'].apply(
lambda lst:[lem.lemmatize(word) for word in lst])
#get word count per review
df['Review_Count'] = df['Review_Lem'].apply(lambda txt: len(str(txt).split()))
#get number of letters per review
df['Review_Lem_string'] = [' '.join(map(str, h)) for h in df['Review_Lem']]
df['Review_Length'] = df['Review_Lem_string'].astype(str).apply(len)
#visualizing letter per review
letter_avg = df.groupby('Rating')['Review_Length'].mean().plot(kind='bar', figsize=(20,10))
plt.xlabel('Rating', fontsize=15)
plt.ylabel('Count of Letters in Rating', fontsize=15)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title('Average Number of Letters per Rating', fontsize=20)
plt.show()
#visualizing word count in each rating category per review
word_avg = df.groupby('Rating')['Review_Count'].mean().plot(kind='bar', figsize=(20,10))
plt.xlabel('Rating', fontsize=15)
plt.ylabel('Count of Words in Rating', fontsize=15)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title('Average Number of Words per Rating', fontsize=20)
plt.show()
#get all the words in the dataset
commonwords = df['Review_Lem']
words = []
for wordlist in commonwords:
words = words + wordlist
#get top 25 words in the dataset
top25 = FreqDist(words).most_common(25)
x, y = zip(*top25)
plt.figure(figsize=(20,10))
plt.margins(0.02)
plt.bar(x, y)
plt.xlabel('Words', fontsize=15)
plt.ylabel('Frequency of Words', fontsize=15)
plt.yticks(fontsize=10)
plt.xticks(rotation=10, fontsize=10)
plt.title('25 Most Common Words', fontsize=20)
plt.show()
# +
#tf-idf calculations
reviews = drugs_df['Review']
temp = reviews
stop_words = set(stopwords.words('english'))
tfidf_vectorizer = TfidfVectorizer(stop_words='english', lowercase=True)
vector = tfidf_vectorizer.fit_transform(temp)
l = vector.toarray()
vectorizer = tfidf_vectorizer.transform(temp)
#save tf-idf matrix to csv
file = open('tf_idf.csv', 'w+', newline ='')
with file:
write = csv.writer(file)
write.writerows(l)
#Naive bayes classifier
classifier = MultinomialNB()
x_train, x_test, y_train, y_test = train_test_split(vectorizer, Y, test_size = 0.4)
classify = MultinomialNB().fit(x_train, y_train)
y_pred = classify.predict(x_test)
score = accuracy_score(y_test, y_pred)
#top 10 words in both the two ratings
toplow_sorted = classify.feature_log_prob_[0, :].argsort()[::-1]
tophigh_sorted = classify.feature_log_prob_[1, :].argsort()[::-1]
print(np.take(tfidf_vectorizer.get_feature_names_out(), toplow_sorted[:10]))
print(np.take(tfidf_vectorizer.get_feature_names_out(), tophigh_sorted[:10]))
print('Raw accuracy score:', score)
print("************Classification Report Naiive Bayes Model************\n", classification_report(y_test, y_pred))
#visualizing the confusion matrix
cmat = confusion_matrix(y_test, y_pred)
fig, x = plt.subplots(figsize=(12, 12))
x.imshow(cmat)
x.grid(False)
x.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 10s', 'Predicted 1s'))
x.yaxis.set(ticks=(0, 1), ticklabels=('Actual 10s', 'Actual 1s'))
x.set_ylim(1.5, -0.5)
sns.heatmap(pd.DataFrame(cmat), annot=True, cmap="YlGnBu" ,fmt='g')
for i in range(2):
for k in range(2):
x.text(k, i, cmat[i, k], ha='center', va='center', color='white')
plt.show()
# +
#Logistic Regression
model = LogisticRegression(C=1, class_weight=None, max_iter=1000)
x_tr, x_t, y_tr, y_t = train_test_split(vector, Y, test_size = 0.4)
fitting = model.fit(x_tr, y_tr)
testing = fitting.score(x_t, y_t)
pred = model.predict(x_t)
#cross validation 10 folds
kfold = KFold(n_splits=10, random_state=0, shuffle=True)
scores = cross_val_score(fitting, vector, Y, scoring='accuracy', cv=kfold)
#confusion matrix
cmat_lr = confusion_matrix(y_t, pred)
figure, m = plt.subplots(figsize=(12, 12))
m.imshow(cmat_lr)
m.grid(False)
m.xaxis.set(ticks=(0, 1), ticklabels=('Predicted 10s', 'Predicted 1s'))
m.yaxis.set(ticks=(0, 1), ticklabels=('Actual 10s', 'Actual 1s'))
m.set_ylim(1.5, -0.5)
sns.heatmap(pd.DataFrame(cmat_lr), annot=True, cmap="YlGnBu" ,fmt='g')
for o in range(2):
for p in range(2):
m.text(o, p, cmat[o, p], ha='center', va='center', color='white')
plt.show()
print("Accuracy: %.3f%%, standard dev: %.3f%%" % (scores.mean()*100.0, scores.std()*100.0))
print("Raw accuracy score:", testing)
print("************Classification Report Regression Model************\n", classification_report(y_t, pred))
#ROC curve
y_pred_proba = model.predict(x_t)
roc, curve, l = metrics.roc_curve(y_t, y_pred_proba, pos_label = 10)
rocc = metrics.roc_auc_score(y_t, y_pred_proba)
plt.plot(roc, curve, label="drugs data, auc=" + str(rocc))
plt.legend(loc=4)
plt.show()
| FP - DrugReview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chicago Crime Prediction Pipeline
#
# An example notebook that demonstrates how to:
# * Download data from BigQuery
# * Create a Kubeflow pipeline
# * Include Google Cloud AI Platform components to train and deploy the model in the pipeline
# * Submit a job for execution
#
# The model forecasts how many crimes are expected to be reported the next day, based on how many were reported over the previous `n` days.
# ## Imports
# +
# %%capture
# Install the SDK (Uncomment the code if the SDK is not installed before)
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.22/kfp.tar.gz'
# !pip3 install --upgrade pip -q
# !pip3 install $KFP_PACKAGE --upgrade -q
# !pip3 install pandas --upgrade -q
# +
import json
import kfp
import kfp.compiler as compiler
import kfp.components as comp
import kfp.dsl as dsl
import kfp.gcp as gcp
import pandas as pd
import time
# -
# ## Pipeline
# ### Constants
# +
# Required Parameters
PROJECT_ID = '<ADD GCP PROJECT HERE>'
GCS_WORKING_DIR = 'gs://<ADD STORAGE LOCATION HERE>' # No ending slash
# Optional Parameters
REGION = 'us-central1'
RUNTIME_VERSION = '1.13'
PACKAGE_URIS=json.dumps(['gs://chicago-crime/chicago_crime_trainer-0.0.tar.gz'])
TRAINER_OUTPUT_GCS_PATH = GCS_WORKING_DIR + '/train/output/' + str(int(time.time())) + '/'
DATA_GCS_PATH = GCS_WORKING_DIR + '/reports.csv'
PYTHON_MODULE = 'trainer.task'
TRAINER_ARGS = json.dumps([
'--data-file-url', DATA_GCS_PATH,
'--job-dir', GCS_WORKING_DIR
])
EXPERIMENT_NAME = 'Chicago Crime Prediction'
PIPELINE_NAME = 'Chicago Crime Prediction'
PIPELINE_FILENAME_PREFIX = 'chicago'
PIPELINE_DESCRIPTION = ''
# -
# ### Download data
#
# Define a download function that uses the BigQuery component
# +
bigquery_query_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/d2f5cc92a46012b9927209e2aaccab70961582dc/components/gcp/bigquery/query/component.yaml')
QUERY = """
SELECT count(*) as count, TIMESTAMP_TRUNC(date, DAY) as day
FROM `bigquery-public-data.chicago_crime.crime`
GROUP BY day
ORDER BY day
"""
def download(project_id, data_gcs_path):
return bigquery_query_op(
query=QUERY,
project_id=project_id,
output_gcs_path=data_gcs_path
).apply(
gcp.use_gcp_secret('user-gcp-sa')
)
# -
# ### Train the model
#
# Run training code that will pre-process the data and then submit a training job to the AI Platform.
# +
mlengine_train_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/d2f5cc92a46012b9927209e2aaccab70961582dc/components/gcp/ml_engine/train/component.yaml')
def train(project_id,
trainer_args,
package_uris,
trainer_output_gcs_path,
gcs_working_dir,
region,
python_module,
runtime_version):
return mlengine_train_op(
project_id=project_id,
python_module=python_module,
package_uris=package_uris,
region=region,
args=trainer_args,
job_dir=trainer_output_gcs_path,
runtime_version=runtime_version
).apply(gcp.use_gcp_secret('user-gcp-sa'))
# -
# ### Deploy model
#
# Deploy the model with the ID given from the training step
# +
mlengine_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/d2f5cc92a46012b9927209e2aaccab70961582dc/components/gcp/ml_engine/deploy/component.yaml')
def deploy(
project_id,
model_uri,
model_id,
runtime_version):
return mlengine_deploy_op(
model_uri=model_uri,
project_id=project_id,
model_id=model_id,
runtime_version=runtime_version,
replace_existing_version=True,
set_default=True).apply(gcp.use_gcp_secret('user-gcp-sa'))
# -
# ### Define pipeline
# +
@dsl.pipeline(
name=PIPELINE_NAME,
description=PIPELINE_DESCRIPTION
)
def pipeline(
data_gcs_path=dsl.PipelineParam(name='data_gcs_path', value=DATA_GCS_PATH),
gcs_working_dir=dsl.PipelineParam(name='gcs_working_dir', value=GCS_WORKING_DIR),
project_id=dsl.PipelineParam(name='project_id', value=PROJECT_ID),
python_module=dsl.PipelineParam(name='python_module', value=PYTHON_MODULE),
region=dsl.PipelineParam(name='region', value=REGION),
runtime_version=dsl.PipelineParam(name='runtime_version', value=RUNTIME_VERSION),
package_uris=dsl.PipelineParam(name='package_uris', value=PACKAGE_URIS),
trainer_output_gcs_path=dsl.PipelineParam(name='trainer_output_gcs_path', value=TRAINER_OUTPUT_GCS_PATH),
trainer_args=dsl.PipelineParam(name='trainer_args', value=TRAINER_ARGS),
):
download_task = download(project_id,
data_gcs_path)
train_task = train(project_id,
trainer_args,
package_uris,
trainer_output_gcs_path,
gcs_working_dir,
region,
python_module,
runtime_version).after(download_task)
deploy_task = deploy(project_id,
train_task.outputs['job_dir'],
train_task.outputs['job_id'],
runtime_version)
return True
# Reference for invocation later
pipeline_func = pipeline
# -
# ### Compile pipeline
# +
pipeline_filename = PIPELINE_FILENAME_PREFIX + '.pipeline.tar.gz'
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# -
# ### Submit the pipeline for execution
# +
# Specify pipeline argument values
arguments = {}
# Get or create an experiment and submit a pipeline run
client = kfp.Client()
try:
experiment = client.get_experiment(experiment_name=EXPERIMENT_NAME)
except:
experiment = client.create_experiment(EXPERIMENT_NAME)
# Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
| samples/ai-platform/Chicago Crime Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gndede/python/blob/main/Strings_galore.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="cOfrx1QOpe58"
#name = <NAME>
#a, c
name = "<NAME>"
first, *last = name.split()
print("First = {first}".format(first=first))
#First = John
print("Last = {last}".format(last=" ".join(last)))
#Last = <NAME>
# + id="zO9JyXbgrcx5"
name = "<NAME>"
first, *last = name.split()
print("First = {first}".format(first=first))
#First = John
print("Last = {last}".format(last=" ".join(last)))
#Last = Schmidt
# + id="nwQCvWyirnYI"
name = "<NAME>"
first, middle, *last = name.split()
print("First = {first}".format(first=first))
#First = John
print("Middle = {middle}".format(middle=middle))
#Middle = Jacob
print("Last = {last}".format(last=" ".join(last)))
#Last = <NAME>
# + id="ZdTwQjeyrwKy"
address = "<NAME>"
first, middle, *last = address.split()
print("First = {first}".format(first=first))
#First = John
print("Middle = {middle}".format(middle=middle))
#Middle = Jacob
print("Last = {last}".format(last=" ".join(last)))
#Last = <NAME>
# + id="hf9SGdGbsr8v"
full_name = "<NAME>"
address = "405 Rock Plant Lane, Fort Worth, Texas, 76144, <EMAIL>, (817)-323-8783"
first, MiddleInitial, last = full_name.split(" ", 3)
street, street1, street2, street3, street4 = address.split(" ", 4)
#street5 = address.split(" ", 4)
#street, city, state, zipcode = address.split(" ", 4)
print(first+" "+MiddleInitial+" "+last)
print(street+ " "+street1+" "+street2+" "+street3)
print(street4)
# " "+street5 )
# + id="KPAMDfE92JM0"
pip install nameparser
# + id="19DtLuro2GGO"
from nameparser import HumanName
# Here's a full name, with a nickname
full_name = 'Mr. Mathew "The Boss" Oloo'
# Extract values
parsed_name = HumanName(full_name)
# Get just the first and last name
f_name = parsed_name.first
l_name = parsed_name.last
print(f_name, l_name)
# <NAME>
# ------------------------------
# If you want to see everything:
parsed_name.as_dict()
{'title': 'Mr.',
'first': 'Mathew',
'middle': '',
'last': 'Oloo',
'suffix': '',
'nickname': 'The Boss'}
# + id="z5Eob32H2-Oy"
txt = "hello, my name is Peter, I am 26 years old"
x = txt.split(", ")
print(x)
# + id="gNbUf0Q33bgk"
txt = "413 Rockanne Blvd, <NAME>, Texas, 76151, <EMAIL>, 814-325-2254"
x = txt.split(", ")#,"/n")
print(x)
# + id="Cu3zXu5Z0FyP"
txt = "413 Rockanne Blvd, <NAME>, Texas, 76151, <EMAIL>, 814-325-2254"
a, b, c, d, e, f= txt.split(", ")#,"/n")
print(a)
print(b+"," " "+c+"," " "+d+".")
print(e)
print(f)
# + id="BUzFWrT5LGc6"
x = 'Exodus, Corinthians, Thesalonians'
a, b, c = x.split(",")
print(a)
print(b)
print(c)
| Strings_galore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementar o batch gradient descent - PyTorch
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import torch
from sklearn.datasets import load_iris
# ### Carregando dados e normalizando
# +
iris = load_iris()
ndata = iris.data[iris.target==1,::2] # comprimento das sépalas e pétalas, indices 0 e 2
tdata = torch.from_numpy(ndata)
x = tdata[:,0:1]
y = tdata[:,1:2]
a = [x.min(), y.min()]
a = min(a)
b = [x.max(), y.max()]
b = max(b)
x -= a
x /= b # normalização
y -= a
y /= b
# -
# ### Gradiente Descendente por Mini Lote (mini-batch)
def mini_batch_gradient_descent(X, y, w_init, b, lr, n_epochs):
# quantidade de blocos
M = int(y.size()[0]/b)
# amostras restantes
tail = y.size()[0]%b
# hostórico de pesos
w_history = torch.zeros((2,(M*n_epochs)+1))
# peso inicial
wT = w_init.t()
# salva peso inicial no histórico
w_history[:,0] = wT[:,0]
for i in range(n_epochs):
# pega o estado atual que será usado na
# próxima chamado do shuffle
#rng_state = np.random.get_state()
# embaralha X
#np.random.shuffle(X)
# seta o estado atual para o mesmo
# usado no shuffle de X
#np.random.set_state(rng_state)
# embaralha y
#np.random.shuffle(y)
# inicial épocas
for j in range(M):
# elemento inicial do mini lote
elemento_i = (j*b)
# elemento final +1 do mini lote
elemento_f = (j*b)+b
# view do mini lote em X
X_ = X[elemento_i:elemento_f]
# view do mini lote em Y
y_ = y[elemento_i:elemento_f]
wT = wT - lr * (2.0 / b) * (X_.t()).mm(X_.mm(wT) - y_)
w_history[:,j+(i*M)+1] = wT[:,0]
# caso o número de blocos não seja exato,
# tratamos o último lote separadamente
if (tail > 0):
# view do último mini lote em X
X_ = X[-tail:]
# view do último mini lote em Y
y_ = y[-tail:]
wT = wT - lr * (2.0 / tail) * (X_.t()).mm(X_.mm(wT) - y_)
w_history[:,j+(i*M)+1] = wT[:,0]
return w_history.t()
# ### Minimização via gradiente descendente por mini lote
# +
# Parâmetros do gradiente descendente: número de iterações e taxa de aprendizagem
iterations = 100
lr = 0.1
n_samples = y.shape[0]
#Montagem da matriz X agora com os elementos 1 na primeira coluna
ones = torch.ones(n_samples,1)
ones = ones.type(torch.DoubleTensor)
X_bias = torch.cat((ones, x), 1)
w_init = torch.zeros(1,2).type(torch.DoubleTensor)
tam_lote = 1
w_history_1 = mini_batch_gradient_descent(X_bias, y, w_init, tam_lote, lr, iterations)
tam_lote = 9
w_history_T = mini_batch_gradient_descent(X_bias, y, w_init, tam_lote, lr, iterations)
tam_lote = 50
w_history_M = mini_batch_gradient_descent(X_bias, y, w_init, tam_lote, lr, iterations)
# -
def xy_grid(h, w, s):
x = torch.linspace(h, w, s)
y = torch.linspace(h, w, s)
grid = torch.stack([x.repeat(s), y.repeat(s,1).t().contiguous().view(-1)],1)
return grid.t().type(torch.DoubleTensor)
def Jview(X_bias, y, w_history, w_opt, plt, ax):
# parâmetros da função
# quantos pesos gerar
all = 1000
# valor mínimo do grid
min = -0.4
# valor máximo do grid
max = 1.0
# Cálculo da matriz bidimensional de parâmetros
X, Y = np.meshgrid(np.linspace(min, max, all),
np.linspace(min, max, all))
# Cálculo da matriz bidimensional de parâmetros
wT = xy_grid(min, max, all)
# calculo do custo
J = compute_cost(X_bias, y, wT).view(all,all)
J = J.numpy()
# plotagem do espaço de parâmetros
CS = plt.contourf(X, Y, J, 30, cmap=plt.cm.Blues_r)
# plotagem do histórico dos pesos durante o treinamento
w0 = w_history[0:,0].numpy()
w1 = w_history[0:,1].numpy()
plt.scatter(w0, w1, marker='o', c='r')
w_opt = w_opt.numpy()
# plotagem do da solução analítica
plt.plot(w_opt[0], w_opt[1], 'wx')
#plt.title('Visualização do treinamento de w na função de Custo J')
# salva imagem
#plt.savefig('fig_custo.png')
#plt.show()
# +
def compute_cost(X_b, y, wT):
e = X_b.mm(wT) - y
J = (e * e)
J = torch.mean(J, 0)
return J
w_opt = (torch.inverse((X_bias.t()).mm(X_bias)).mm(X_bias.t())).mm(y)
# -
# ### Gerando a figura para correção automática
#
# <a id='figura'></a>
# +
fig = plt.figure(figsize=(20,5))
plt.suptitle('Visualizacão do erro')
ax = fig.add_subplot(131)
plt.title("Batch size 1")
Jview(X_bias, y, w_history_1, w_opt, plt, ax)
#ax.plot(J_history_stochastic)
ay = fig.add_subplot(132)
plt.title("Batch size T")
Jview(X_bias, y, w_history_T, w_opt, plt, ay)
#ay.plot(J_history)
az = fig.add_subplot(133)
plt.title("Batch size num_amostras")
Jview(X_bias, y, w_history_M, w_opt, plt, az)
#az.plot(J_history)
plt.savefig('my_sgd_minibatch.png')
# -
| week07/my_sgd_minibatch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JillanCalvelo/LinearAlgebPublic/blob/main/Assignment_10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="vxbn2-hwTEU5"
#
# + [markdown] id="0H4Uo3foTHhH"
# #Objectives
# At the end of this activity you will be able to:
# 1. Be familiar with representing linear combinations in the 2-dimensional plane.
# 2. Visualize spans using vector fields in Python.
# 3. Perform vector fields operations using scientific programming.
# + [markdown] id="1GXIGWqxH_RH"
# ###**Discussion**
# + id="plJ0FNH6RSED"
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="qlTiZK9mTW0i"
# ##Linear Combination
# It is said that a linear combination is the combination of linear scaling and addition of a vector its bases/components
# + [markdown] id="DKkAkqdECK9f"
# We will try to visualize the vectors and their linear combinations by plotting a sample of real number values for the scalars for the vectors. Let's first try the vectors below:
# + [markdown] id="YF8UyE7XTlZ_"
# $$X = \begin{bmatrix} 2\\5 \\\end{bmatrix} , Y = \begin{bmatrix} 7\\9 \\\end{bmatrix} $$
#
# + id="1oeaP5QnTYxV"
vectX = np.array ([2,5])
vectY = np.array ([7,9])
# + [markdown] id="WQfPFtFgUaeH"
# $$X = c\cdot \begin{bmatrix} 2\\5 \\\end{bmatrix} $$
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="Vt5KA79QT5jw" outputId="e2f94e06-fc95-4398-a4dc-2cf80c905e49"
c = np.arange(-15,15,0.5)
plt.scatter(c*vectX[0],c*vectX[1])
plt.xlim(-20,20)
plt.ylim(-20,20)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="chpjeYNdVlfW" outputId="4b995154-62ad-46f2-8d92-d637b314fb79"
vectA = np.array([1,0])
vectB = np.array([1,-1])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectA + vectB
spanRx = c1*vectA[0] + c2*vectB[0]
spanRy = c1*vectA[1] + c2*vectB[1]
##plt.scatter(R*vectA[0],R*vectA[1])
##plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
# + [markdown] id="iTRJLx0pQx2E"
# # Activity
# + [markdown] id="018TmNygQyrr"
# ### Task 1
# + [markdown] id="I-vzAN2xQ0uy"
# Try different linear combinations using different scalar values. In your methodology discuss the different functions that you have used, the linear equation and vector form of the linear combination, and the flowchart for declaring and displaying linear combinations. Please make sure that your flowchart has only few words and not putting the entire code as it is bad practice. In your results, display and discuss the linear combination visualization you made. You should use the cells below for displaying the equation markdows using LaTeX and your code.
# + [markdown] id="XAJAOdr5EkI9"
# $$ 5x+2y-13z= 0$$
# + [markdown] id="lgo8XYPgCZWY"
# $$A = \begin{bmatrix} 5\\2 \\\end{bmatrix} , B = \begin{bmatrix} 7\\5 \\\end{bmatrix} $$
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="rb4ysje5Q2sS" outputId="ed373fcb-06ba-4b3f-ff4e-e1ef3ddb6cf0"
vectA = np.array ([5,8])
vectB = np.array([7,5])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectA + vectB
spanRx = c1*vectA[0] + c2*vectB[0]
spanRy = c1*vectA[1] + c2*vectB[1]
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
# + [markdown] id="Djheiq6AQ2-t"
# ## Conclusion
# + [markdown] id="pgnaGmmcQ6X0"
# In the entire laboratory report that I’ve done, I can conclude that using of linear combinations and vector spaces using Numpy and matplotlib python functions helps us in mapping from model space to data space and we can illustrate span of vectors using Python functions that allows us to plot vector fields. Performing vector fields operations using Python programming allowed us to visualize linear combinations in the 2-d plane. Plotting is the method of creating a graph in data science, which involves displaying multiple points or variables on an x-y axis to show the relationship between the different data points. This operations are helpful in plotting and visualizing certain vertices to better understand its function and can also be useful in a variety of scientific field as a source of visualizing data as stated to better understand and comprehend it.
| Assignment_10.ipynb |
# +
# Podium installation
# ! pip install podium-nlp
# To install from source instead of the last release, comment the command above and uncomment the following one.
# # ! pip install git+https://github.com/takelab/podium
# Additional dependencies required to run this notebook:
# ! pip install sacremoses clean-text spacy truecase https://github.com/LIAAD/yake/archive/v0.4.2.tar.gz
# ! python -m spacy download en_core_web_sm
# -
# # Hooks
# Podium contains a number of predefined hook classes which you can instantiate and use in your Fields. Most of these hooks are customizable and can work both as pretokenization hooks as well as post-tokenization hooks.
# > **NOTE:** If you apply a hook as post-tokenization, it will be called for each element in the tokenized sequence!
# >
# > Hooks should be cast to post-tokenization **only** if their application would otherwise influence the tokenization process. Setting a hook to post-tokenization is expected to take longer than the same hook being used during pretokenization.
# ## Moses Normalizer
# `MosesNormalizer` is a hook that wraps `MosesPunctNormalizer` from [sacremoses](https://github.com/alvations/sacremoses). Accepts the language for the punctuation normalizer to be applied on. Normalizes whitespace, unicode punctuations, numbers and special control characters.
from podium.preproc import MosesNormalizer
moses = MosesNormalizer(language="en")
text = "A _very_ spaced sentence"
print(moses(text))
# By default, MosesNormalizer is a pretokenization hook, which means it expects a single string as an argument. We can cast it to a post-tokenization hook with the `as_posttokenize_hook` helper function that transforms the built-in pretokenization hooks to post-tokenization hooks. As a result, the hook now expectes two arguments.
from podium.preproc import as_posttokenize_hook
moses = as_posttokenize_hook(moses)
raw_text = None
tokenized_text = ["A "," _very_"," spaced "," sentence"]
print(moses(raw_text, tokenized_text))
# ## Regex Replace
# `RegexReplace` is a hook that applies regex replacements. As an example, we can replace all non-alphanumeric characters from SST instances. First, we will setup loading of the SST dataset, which we will use throughout the following examples. For reference, we will now print out the instance we will apply the transformation on:
# +
from podium import Field, LabelField, Vocab
from podium.datasets import SST
text = Field('text', numericalizer=Vocab())
label = LabelField('label')
fields={'text':text, 'label':label}
sst_train, sst_test, sst_dev = SST.get_dataset_splits(fields=fields)
print(sst_train[222])
# -
# Now, we need to define our replacements, each a `(Pattern, str)` tuple where the pattern matched is replaced with the string.
from podium.preproc import RegexReplace
non_alpha = r"[^a-zA-Z\d\s]"
replacements = RegexReplace([
(non_alpha, '')
])
text = Field('text', numericalizer=Vocab(),
pretokenize_hooks=[replacements],
keep_raw=True)
fields={'text':text, 'label':label}
sst_train, sst_test, sst_dev = SST.get_dataset_splits(fields=fields)
print(sst_train[222])
# As we can see, the non-alphanumeric characters have been removed from the sequence. Similarly, you can pass a list of regex replacements which will then be executed in the order given. Please do take note that regular expressions are not known for their speed and if you can perform a replacement without using one, it might be beneficial.
# ## Text Cleanup
# `TextCleanUp` is a **pretokenization** hook, a wrapper of a versatile library that can perform a number of text cleaning operations. For full options, we refer the reader to the
# [cleantext](https://github.com/jfilter/clean-text) repository . In Podium, `TextCleanUp` can be used as follows:
from podium.preproc import TextCleanUp
cleanup = TextCleanUp(remove_punct=True)
text = Field('text', numericalizer=Vocab(), pretokenize_hooks=[cleanup], keep_raw=True)
sst_train, sst_test, sst_dev = SST.get_dataset_splits(fields={'text':text, 'label':label})
print(sst_train[222])
# ## NLTK Stemmer
# `NLTKStemmer` is a **post-tokenization** hook that applies the NLTK stemmer to the tokenized sequence. This hook, for obvious reasons, cannot be used as a pretokenization hook.
from podium.preproc import NLTKStemmer
stemmer = NLTKStemmer(language="en", ignore_stopwords=True)
text = Field('text', numericalizer=Vocab(), posttokenize_hooks=[stemmer])
sst_train, sst_test, sst_dev = SST.get_dataset_splits(fields={'text':text, 'label':label})
print(sst_train[222])
# ## Spacy Lemmatizer
# `SpacyLemmatizer` is a **post-tokenization** hook that applies the Spacy lemmatizer to the tokenized sequence. This hook, for obvious reasons, cannot be used as a pretokenization hook.
from podium.preproc import SpacyLemmatizer
lemmatizer = SpacyLemmatizer(language="en")
text = Field('text', numericalizer=Vocab(), posttokenize_hooks=[stemmer])
sst_train, sst_test, sst_dev = SST.get_dataset_splits(fields={'text':text, 'label':label})
print(sst_train[222])
# ## Truecase
# `truecase` is a **pre-tokenization** hook that applies [truecasing](https://github.com/daltonfury42/truecase) the the input strings. The `oov` argument controls how the library handles out-of-vocabulary tokens, the options being `{"title", "lower", "as-is"}`.
from podium.preproc import truecase
apply_truecase = truecase(oov='as-is')
print(apply_truecase('hey, what is the weather in new york?'))
# ## Stopword removal
# `remove_stopwords` is a **post-tokenization** hook that removes stop words from the tokenized sequence. The list of stop words is provided by [SpaCy](https://spacy.io/) and the language is controlled by the `language` parameter.
# > **WARNING:** The spacy stopword list is in lowercase, so it is recommended to lowercase your tokens prior to stopword removal to avoid unexpected behavior.
from podium.preproc import remove_stopwords
remove_stopwords_hook = remove_stopwords('en')
raw_text = None
tokenized_text = ['in', 'my', 'opinion', 'an', 'exciting', 'and', 'funny', 'movie']
print(remove_stopwords_hook(raw_text, tokenized_text))
# ## Keyword extraction
# `KeywordExtractor` is a **special post-tokenization** hook that extracts keywords from the **raw** sequence. Currently, two keyword extraction algorithms are supported: `yake` and `rake`.
# > **WARNING:** The results in the following example are not representative due to the short input text.
from podium.preproc import KeywordExtractor
keyword_extraction_hook = KeywordExtractor('yake', top=3)
raw_text = 'Next conference in San Francisco this week, the official announcement could come as early as tomorrow.'
tokenized_text = []
_, keywords = keyword_extraction_hook(raw_text, tokenized_text)
print(keywords)
# # Utilities
# Various tools that can be used for preprocessing textual datasets, not necessarily intended to be used as hooks.
# ## SpaCy sentencizer
# `SpacySentencizer` can be used to split input strings into sentences prior to tokenization.
# ## Hook conversion
# `as_posttokenize_hook` can be used to convert a built-in pretokenization hook to a post-tokenization hook.
| docs/source/notebooks/preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''tensorflow'': conda)'
# metadata:
# interpreter:
# hash: 1d86a45ab2e7836dcacf03a0333e295b9df75ceb9ff683e8d1dc3d219abfe918
# name: python3
# ---
# For the last two questions regarding what are related to relationships of variables with salary and job satisfaction - Each of these questions will involve not only building some sort of predictive model, but also finding and interpretting the influential components of whatever model we build.
#
# To get started let's read in the necessary libraries and take a look at some of our columns of interest.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
# %matplotlib inline
df = pd.read_csv('survey-results-public.csv')
df.head()
# +
# Working with just quantitative variables is actually pretty straightforward
# for most supervised learning techniques (except - we note that any row with
# a missing value in any of the columns we use will be dropped -
# which might leave us with fewer predictions than we were hoping for).
# Let's just start and iterate on our findings
# Let's just fit something and go from there
df.describe()
# +
# The above are variables that python is treating as numeric variables, and therefore, we
# could send them into our linear model blindly to predict the response
# Let's take a quick look at our data first
df.hist();
# -
sns.heatmap(df.corr(), annot=True, fmt=".2f");
# +
# Here we can see that none of our variables appear to greatly correlated with salary
# and we can see that if someone was given an expected salary question, they either
# never answered the salary question or they were not given the salary question
# We an still go ahead and make predictions using these variables as a reminder of the
# scikit learn way of fitting models. The process is similar to quickly fit models of
# all types - usually a four step process of - instantiate, fit, predict, score
# In most cases, we also will want to split data into training and test data to assure
# we are not building models that overfit the data and do not extend well to new situations.
X = df[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
y = df['Salary']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
lm_model = LinearRegression(normalize=True) # Here you could set any hyperparameters of your model
lm_model.fit(X_train, y_train) # If this model was to predict for new individuals, we probably would want
# worry about train/test splits and cross-validation, but for now I am most
# interested in finding a model that just fits all of the data well
# +
### Notice the above breaks because of the NaN values, so we either need to fill or remove them
# Or we could write a conditional model that fits differently
# depending on the values that are missing - we can see the nans based on the describe above
df.shape
#________ Video 1 through here on introduction to the data - could do a bit more EDA ________#
# +
### The easiest way to move onto a conclusion in a first pass is probably just with dropping
num_vars = df[['Salary', 'CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
df_dropna = num_vars.dropna(axis=0)
X = df_dropna[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
y = df_dropna['Salary']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
lm_model = LinearRegression(normalize=True) # Here you could set any hyperparameters of your model
lm_model.fit(X_train, y_train) # If this model was to predict for new individuals, we probably would want
# worry about train/test splits and cross-validation, but for now I am most
# interested in finding a model that just fits all of the data well
y_test_preds = lm_model.predict(X_test) #We can then use our fitted model to predict the salary for each
#indvidual in our test set, and see how well these predictions
#match the truth.
print(r2_score(y_test, y_test_preds)) #In this case we are predicting a continuous, numeric response. Therefore, common
print(mean_squared_error(y_test, y_test_preds)) #metrics to assess fit include Rsquared and MSE.
# -
# Whoop - we built a model that predicts... but we are missing by ALOT!
# We can get a quick glimpse of how bad our predictions are...
# This suggests that 3% of the variability in salaries can be explained by these variables...
df_dropna.shape # But it also reduced our dataset to only 5338 rows
# ~20% of the original dataset size
# # Recorded from here up
#
#
# # Screencasts Remaining:
# 1. Imputation - first results
# 2. Categorical Variables - improved results, but what is happening?
# 3. Combat Overfitting - one method
preds_vs_act = pd.DataFrame(np.hstack([y_test.values.reshape(y_test.size,1), y_test_preds.reshape(y_test.size,1)]))
preds_vs_act.columns = ['actual', 'preds']
preds_vs_act['diff'] = preds_vs_act['actual'] - preds_vs_act['preds']
preds_vs_act.head()
# +
### We can plot how far our predictions are from the actual values compaired to the
### predicted values - you can see that it isn't uncommon for us to miss salaries by
### 150000 and the overpredictions tend to be much worse than the underpredictions
### THere also appears to be a trend where our differences decrease as the predicted
### values increase on the test data.
plt.plot(preds_vs_act['preds'], preds_vs_act['diff'], 'bo');
plt.xlabel('predicted');
plt.ylabel('difference');
# +
#______Video 2 ____Our First Modeling Attempt (Mark all the bad things)________#
### There are tons of downfalls already - our predictions are pretty poor, we have predictions
### for only 20% of the total values that actually hold salaries, and we are only using
### quantitative variables to predict.
### Given how bad the predictions are, we might not hurt anything by just filling the missing
### values to make more predictions.
#Here we fill on the column means
df_fillna = num_vars.apply(lambda x: x.fillna(x.mean()),axis=0)
X = df_fillna[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
y = df_fillna['Salary']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
lm_model = LinearRegression(normalize=True) # Here you could set any hyperparameters of your model
lm_model.fit(X_train, y_train) # If this model was to predict for new individuals, we probably would want
# worry about train/test splits and cross-validation, but for now I am most
# interested in finding a model that just fits all of the data well
y_test_preds = lm_model.predict(X_test) #We can then use our fitted model to predict the salary for each
#indvidual in our test set, and see how well these predictions
#match the truth.
print(r2_score(y_test, y_test_preds)) #In this case we are predicting a continuous, numeric response. Therefore, common
# -
X.shape
# +
### Now we can predict on everything, but our predictions are even worse!
preds_vs_act = pd.DataFrame(np.hstack([y_test.values.reshape(y_test.size,1), y_test_preds.reshape(y_test.size,1)]))
preds_vs_act.columns = ['actual', 'preds']
preds_vs_act['diff'] = preds_vs_act['actual'] - preds_vs_act['preds']
preds_vs_act.head()
# -
plt.plot(preds_vs_act['preds'], preds_vs_act['diff'], 'bo');
plt.xlabel('predicted');
plt.ylabel('difference');
plt.plot(preds_vs_act['preds'], preds_vs_act['actual'], 'bo');
plt.xlabel('predicted');
plt.ylabel('actual'); #This looks less compelling that we are predicting well...
# I also think I found the mean amount...which aren't real 'actual' salaries
# +
### Some strange line here - probably because we filled in our average for everything
### Which was actually data leakage. We shouldn't have done this at all. We would likely
### Have to use the mean of the old data to fill in the missing of the future data...
### But this does depend a bit - if on future homes, you will have the x-variables before
### having to predict, this really isn't data leakage, as you would have the abiltiy to update
### the inputed means with each new individual in your dataset.
### Really the values that have the mean value for the salary should be dropped - because
### those are not true salaries.
df_fillna = df_fillna.drop(df_fillna[df_fillna['Salary'] == np.mean(df['Salary'])].index)
df_fillna.shape # that's better. we only have this many non-null salaries in our original dataset
# +
#Below you can fit a new model with the missing salaries removed
# +
X = df_fillna[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']]
y = df_fillna['Salary']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
lm_model = LinearRegression(normalize=True) # Here you could set any hyperparameters of your model
lm_model.fit(X_train, y_train) # If this model was to predict for new individuals, we probably would want
# worry about train/test splits and cross-validation, but for now I am most
# interested in finding a model that just fits all of the data well
y_test_preds = lm_model.predict(X_test) #We can then use our fitted model to predict the salary for each
#indvidual in our test set, and see how well these predictions
#match the truth.
print(r2_score(y_test, y_test_preds)) #In this case we are predicting a continuous, numeric response. Therefore, common
print(mean_squared_error(y_test, y_test_preds)) #metrics to assess fit include Rsquared and MSE.
# +
##### Stop Video 2
### Now we can predict on everything, but our predictions are even worse!
preds_vs_act = pd.DataFrame(np.hstack([y_test.values.reshape(y_test.size,1), y_test_preds.reshape(y_test.size,1)]))
preds_vs_act.columns = ['actual', 'preds']
preds_vs_act['diff'] = preds_vs_act['actual'] - preds_vs_act['preds']
preds_vs_act.shape
# -
plt.plot(preds_vs_act['preds'], preds_vs_act['diff'], 'bo');
plt.xlabel('predicted');
plt.ylabel('difference');
# +
### When we see fan like shapes in the residual plots like this - it often suggests
### we might make better predictions on the log of the response
plt.plot(preds_vs_act['preds'], preds_vs_act['actual'], 'bo');
plt.xlabel('predicted');
plt.ylabel('actual'); #there appears to be a slight positive trend like we would want to see
# +
#______Video 3 Fill in Missing values with the mean - why this is bad_______#
### Let's see how we might be able to use categorical variables in our models.
### Though you might try to do something smart to reduce the feature space of your
### x-matrix (like find curved relationships that exist in salary comparing across categories).
### It is probably easier to just blindly encode all of the categorical variables as dummy
### variables in our models.
cat_vars_int = df.select_dtypes(include=['object']).copy().columns
# http://pbpython.com/categorical-encoding.html
len(cat_vars_int)
# +
### Now that we have a list of all the dummy variables we might be interested in...
### Let's dummy code them, so that we can use them in our machine learning models
### you can do this with pandas (get dummies) or with sklearn (one hot encoding)
### Feel free to use whatever you are comfortable with
# +
for var in cat_vars_int:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(var, axis=1), pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1)
df.describe()
# +
### Because we have more rows than number of variables, it is actually possible
### for us to build a model that uses all of the columns to predict the response...
### Whether this is actually a good idea or not is up for debate - let's maybe
### choose some variables that seem like they might be related to salary and go from there.
### You can also see that the nulls are still dropped after dummy encoding, which means
### we will again need to figure out what to do with rows where those values are null.
### It might be okay to just use the mode of the dataset to fill in those values - though
### in reality, a lack of answer is maybe an indication that your answer is different
### from the group and therefore, you didn't want to answer the question.
### We know there are 12891 non-NaN salaries to predict based on the previous model - so we
### want to make sure we can predict all of these salaries with our new model as well, but now
### unlike the 5 columns we had to choose from before we have more than 40,000 to choose from.
### This could be a great place for some PCA or PLS, but I would like to try and keep
### the interpretability of the features as much as possible... so I am just going to
### use the original features.
### We could try even adding interactions or other combinations of these features, but again
### this would make our features less interpretable. So you have to weigh the pros and cons
### of adding these features.
# -
df_result = pd.concat([df, df_fillna], axis=1, join='inner')
df_result.shape
df_result['Salary'].head()['Salary']
df_result = df_result.iloc[:,~df_result.columns.duplicated()]
df_result.shape
# +
### Now we have no duplicated columns, we can focus on which of our new columns (and the
### previously used columns) we would like to use to try and predict the response. We might
### just go based on intuition, or we could try to find the variables that are most correlated
### Don't get too high of hopes - having a quant variable correlated with a 1-0 variable
### is not really what correlation coefficients are designed to detect. They are meant
### to find linear relationships between quant variables. Though correlations are not built for
### finding these relations - they can still give a sense of which variables are best related
### Actually if you try to build the correlation matrix... it might run for a long time, and
### not be very legible anyway... Let's just fit some stuff that seems interesting
### and intuitive.
# +
### Given how many columns we have to use - let's just drop all of the columns that have any
### missing values
df_result = df_result.dropna(axis=1, how='any')
# -
df_result.shape # which is only 6, sooo that kind of sucks at narrowing down this mess...
y = df_result['Salary']
X = df_result.drop(['Respondent', 'Salary'], axis=1)
# +
#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42)
#lm_model = LinearRegression(normalize=True) # Here you could set any hyperparameters of your model
#lm_model.fit(X_train, y_train) # If this model was to predict for new individuals, we probably would want
# worry about train/test splits and cross-validation, but for now I am most
# interested in finding a model that just fits all of the data well
#y_test_preds = lm_model.predict(X_test) #We can then use our fitted model to predict the salary for each
#indvidual in our test set, and see how well these predictions
#match the truth.
#print(r2_score(y_test, y_test_preds)) #In this case we are predicting a continuous, numeric response. Therefore, common
#print(mean_squared_error(y_test, y_test_preds)) #metrics to assess fit include Rsquared and MSE.
## Filling in the missing values does appear to have helped based on a preliminary check
# +
#print(r2_score(y_train, lm_model.predict(X_train)))
#print(mean_squared_error(y_train, lm_model.predict(X_train))) # What does this mean?
# -
# To combat the overfitting we have a number of options, but one way that would also reduce our run time would be to remove columns from our dataframe. You will notice that sklearn does not provide pvals back for our coefficients, but it performs ridge regression by default. So, therefore, we can consider that columns that have larger coefficients are also more useful for predicting our response variable. How large is large enough to consider keeping? Well, that is a great question, and I also don't have a great answer... We can try some stuff and see what works.
# Then we can also run cross-validation and aggregate our results to combat the overfitting we saw earlier using this reduced X matrix.
# +
# You could deal with these rare events in different ways - you could consider them as great predictors
# I am going to remove them - as I feel like they are likely not that indicative of other individuals
# I want to find overriding truths about the individuals who receive particular salaries.
# So, let's only consider columns where there are more than 1000 of the level of interest in the column.
reduce_X = X.iloc[:, np.where((X.sum() > 10) == True)[0]]
reduce_X.shape
# +
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = .30, random_state=42)
lm_model = LinearRegression(normalize=True) # Here you could set any hyperparameters of your model
lm_model.fit(X_train, y_train) # If this model was to predict for new individuals, we probably would want
# worry about train/test splits and cross-validation, but for now I am most
# interested in finding a model that just fits all of the data well
y_test_preds = lm_model.predict(X_test) #We can then use our fitted model to predict the salary for each
#indvidual in our test set, and see how well these predictions
#match the truth.
print(r2_score(y_test, y_test_preds)) #In this case we are predicting a continuous, numeric response. Therefore, common
print(mean_squared_error(y_test, y_test_preds)) #metrics to assess fit include Rsquared and MSE.
## Filling in the missing values does appear to have helped based on a preliminary check
# -
print(r2_score(y_train, lm_model.predict(X_train))) #In this case we are predicting a continuous, numeric response. Therefore, common
print(mean_squared_error(y_train, lm_model.predict(X_train))) #metrics to assess fit include Rsquared and MSE.
## Filling in the missing values does appear to have helped based on a preliminary check
### Let's see what be the best number of features to use based on the test set performance
def find_optimal_lm_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test
cutoffs = [5000, 3500, 2500, 1000, 100, 50, 30, 20, 10, 5]
r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test = find_optimal_lm_mod(X, y, cutoffs)
# +
#______Video 4 Creating Dummy Variables & Other Alternatives for Categorical Variables____#
### Now that we have the best model in terms of the r2 on the test data, we can use this model to see which features
### appear to be most important, and what impact they have on salary.
X_train.shape # we have 1081 features in the optimal model - let's look at some of them
# +
y_test_preds = lm_model.predict(X_test)
preds_vs_act = pd.DataFrame(np.hstack([y_test.values.reshape(y_test.size,1), y_test_preds.reshape(y_test.size,1)]))
preds_vs_act.columns = ['actual', 'preds']
preds_vs_act['diff'] = preds_vs_act['actual'] - preds_vs_act['preds']
plt.plot(preds_vs_act['preds'], preds_vs_act['diff'], 'bo');
plt.xlabel('predicted');
plt.ylabel('difference');
# -
plt.plot(preds_vs_act['preds'], preds_vs_act['actual'], 'bo');
plt.xlabel('predicted');
plt.ylabel('actual'); #there appears to be a slight positive trend like we would want to see
# +
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = lm_model.coef_
coefs_df['abs_coefs'] = np.abs(lm_model.coef_)
coefs_df.sort_values('abs_coefs', ascending=False).head(20)
# -
lm_model.intercept_
# +
X_train.shape, sum(X_train['Professional_Professional developer'])
#_____Video 7 Interpretting the results_____#
# +
#____Video 8 - Ensemble Models______#
### One of the best out of the box methods for supervised machine learning
### is known as the RandomForest - let's see if we can use this model to outperform
### The linear model from earlier.
from sklearn.ensemble import RandomForestRegressor
### Let's see what be the best number of features to use based on the test set performance
def find_optimal_rf_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
kwargs - include the arguments you want to pass to the rf model
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
rf_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
rf_model = RandomForestRegressor() #no normalizing here, but could tune other hyperparameters
rf_model.fit(X_train, y_train)
y_test_preds = rf_model.predict(X_test)
y_train_preds = rf_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
rf_model = RandomForestRegressor()
rf_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, rf_model, X_train, X_test, y_train, y_test
# -
cutoffs = [5000, 3500, 2500, 1000, 100, 50, 30, 20, 10, 5]
r2_test, r2_train, rf_model, X_train, X_test, y_train, y_test = find_optimal_rf_mod(X, y, cutoffs)
# +
y_test_preds = rf_model.predict(X_test)
preds_vs_act = pd.DataFrame(np.hstack([y_test.values.reshape(y_test.size,1), y_test_preds.reshape(y_test.size,1)]))
preds_vs_act.columns = ['actual', 'preds']
preds_vs_act['diff'] = preds_vs_act['actual'] - preds_vs_act['preds']
plt.plot(preds_vs_act['preds'], preds_vs_act['diff'], 'bo');
plt.xlabel('predicted');
plt.ylabel('difference');
# +
#Looks like this overfits quite a bit...
# -
# +
from sklearn.model_selection import GridSearchCV
### Let's see what be the best number of features to use based on the test set performance
def find_optimal_rf_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True, param_grid=None):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
kwargs - include the arguments you want to pass to the rf model
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
rf_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
if param_grid==None:
rf_model = RandomForestRegressor() #no normalizing here, but could tune other hyperparameters
else:
rf_inst = RandomForestRegressor(n_jobs=-1, verbose=1)
rf_model = GridSearchCV(rf_inst, param_grid, n_jobs=-1)
rf_model.fit(X_train, y_train)
y_test_preds = rf_model.predict(X_test)
y_train_preds = rf_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
if param_grid==None:
rf_model = RandomForestRegressor() #no normalizing here, but could tune other hyperparameters
else:
rf_inst = RandomForestRegressor(n_jobs=-1, verbose=1)
rf_model = GridSearchCV(rf_inst, param_grid, n_jobs=-1)
rf_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, rf_model, X_train, X_test, y_train, y_test
# -
cutoffs = [5000, 3500, 2500, 1000, 100, 50, 30, 20, 10, 5]
params = {'n_estimators': [10, 100, 1000], 'max_depth': [1, 5, 10, 100]}
r2_test, r2_train, rf_model, X_train, X_test, y_train, y_test = find_optimal_rf_mod(X, y, cutoffs, param_grid=params)
| Salary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cleanMsc
# language: python
# name: cleanmsc
# ---
plate = 13
directory='//sun.amolf.nl/shimizu-data/home-folder/oyartegalvez/Drive_AMFtopology/PRINCE'
listdir=os.listdir(directory)
list_dir_interest=[name for name in listdir if name.split('_')[-1]==f'Plate{0 if plate<10 else ""}{plate}']
ss=[name.split('_')[0] for name in list_dir_interest]
ff=[name.split('_')[1] for name in list_dir_interest]
dates_datetime=[datetime(year=int(ss[i][:4]),month=int(ss[i][4:6]),day=int(ss[i][6:8]),hour=int(ff[i][0:2]),minute=int(ff[i][2:4])) for i in range(len(list_dir_interest))]
dates_datetime.sort()
dates_datetime_chosen=dates_datetime[1:25]
dates = [f'{0 if date.month<10 else ""}{date.month}{0 if date.day<10 else ""}{date.day}_{0 if date.hour<10 else ""}{date.hour}{0 if date.minute<10 else ""}{date.minute}' for date in dates_datetime_chosen]
def pinpoint_anastomosis(expe,t):
nx_graph_tm1 = expe.nx_graph[t]
nx_grapht = expe.nx_graph[t+1]
from_tip = expe.connections[t]
pos_tm1 = expe.positions[t]
anastomosis=[]
origins=[]
tips = [node for node in nx_graph_tm1.nodes if nx_graph_tm1.degree(node)==1]
number_anastomosis = 0
def dist_branch(node,nx_graph,pos):
mini=np.inf
for edge in nx_graph.edges:
pixel_list=nx_graph.get_edge_data(*edge)['pixel_list']
if np.linalg.norm(np.array(pixel_list[0])-np.array(pos[node]))<=5000:
distance=np.min(np.linalg.norm(np.array(pixel_list)-np.array(pos[node]),axis=1))
if distance<mini:
mini=distance
return(mini)
def count_neighbors_is_from_root(equ_list,nx_graph,root):
count=0
for neighbor in nx_graph.neighbors(root):
if neighbor in equ_list:
count+=1
return(count)
for tip in tips:
# print(tip)
consequence = from_tip[tip]
for node in consequence:
if node in nx_grapht.nodes and nx_grapht.degree(node)>=3 and count_neighbors_is_from_root(consequence,nx_grapht,node)<2:
# if node==2753:
# print(count_neighbors_is_from_root(consequence,nx_grapht,node))
# print(list(nx_grapht.neighbors(node)))
anastomosis.append(node)
origins.append(tip)
number_anastomosis+=1
if tip not in nx_grapht.nodes and dist_branch(tip,nx_grapht,pos_tm1)<=30 and nx_graph_tm1.get_edge_data(*list(nx_graph_tm1.edges(tip))[0])['weight']>=20:
origins.append(tip)
number_anastomosis+=1/2
return(anastomosis,origins,number_anastomosis)
exp2 = pickle.load( open( f'Data/'+f"experiment_{13}.pick", "rb" ) )
exp = Experiment(13)
exp.copy(exp2)
exp.pickle_save()
exp = Experiment(13)
exp.pickle_load()
tip_track={}
tip_ts={}
for node in exp.nodes:
for t in range(len(exp.dates)):
if node.is_in(t) and node.degree(t)==1:
track = []
for tsup in range(t,len(exp.dates)):
if node.is_in(tsup):
track.append(node.pos(tsup))
tip_track[node.label] = track
tip_ts[node.label] = t
break
# +
def get_angle(pos1,pos2):
vector = pos2-pos1
unit_vector = vector/np.linalg.norm(vector)
vertical_vector=np.array([-1,0])
dot_product = np.dot(vertical_vector,unit_vector)
if vertical_vector[1]*vector[0]-vertical_vector[0]*vector[1]>=0: #determinant
angle = np.arccos(dot_product)/(2*np.pi)*360
else:
angle = -np.arccos(dot_product)/(2*np.pi)*360
return(angle)
def becomes_degree3(node):
node_obj = exp.get_node(node)
for t in range(len(exp.nx_graph)):
if node_obj.is_in(t) and node_obj.degree(t)==3:
return(True,t)
return(False,None)
# -
tip_growth={tip : [np.linalg.norm(tip_track[tip][i+1]-tip_track[tip][i]) for i in range(len(tip_track[tip])-1)] for tip in tip_track.keys()}
tip_of_interest=[tip for tip in tip_growth.keys() if np.any(np.array(tip_growth[tip])>=100)]
min_growth=30
anastomose = [tip for tip in tip_of_interest if becomes_degree3(tip)[0]]
stop = [tip for tip in tip_of_interest if not becomes_degree3(tip)[0]]
stop_with_long_growth=[tip for tip in stop if np.sum(tip_growth[tip])>500]
anastomose_with_long_growth=[tip for tip in anastomose if np.sum(tip_growth[tip])>500]
angles = {tip : [get_angle(tip_track[tip][i+1],tip_track[tip][i]) for i in range(len(tip_track[tip])-1) if tip_growth[tip][i]>=min_growth] for tip in stop}
angles_last = {tipi : [angles[tipi][i+1]-angles[tipi][i] for i in range(len(angles[tipi])-1)] for tipi in angles.keys()}
tis = {tip : [i for i in range(len(tip_track[tip])-1) if tip_growth[tip][i]>=min_growth] for tip in stop}
tips_for_angle=[tip for tip in stop if len(angles[tip])>=4 and not np.any(np.abs(np.array(angles_last[tip]))>=90)]
len(tips_for_angle)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('time (h)')
ax.set_ylabel('angle_from_begining(°)')
for tip in tips_for_angle[0:10]:
if len(angles[tip])>=1 and not np.any(np.abs(np.array(angles_last[tip]))>=90):
spotting,=ax.plot(np.array(tis[tip])*4,(np.array(angles[tip])-angles[tip][0]+180)%360-180)
spotting.set_label(str(tip))
ax.legend()
plt.show()
distances = {tip : [np.linalg.norm(tip_track[tip][t]-tip_track[tip][tis[tip][0]]) for t in tis[tip]] for tip in tips_for_angle}
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('distance (log(mm))')
ax.set_ylabel('angle_from_begining(°)')
for tip in tips_for_angle:
if len(angles[tip])>=1 and not np.any(np.abs(np.array(angles_last[tip]))>=90):
spotting,=ax.plot(np.log(np.array(distances[tip])*1.725/1000+1),(np.array(angles[tip])-angles[tip][0]+180)%360-180)
spotting.set_label(str(tip))
ax.legend()
plt.show()
plt.close('all')
angle_distrib=[(np.array(angles[tip])[1]-angles[tip][0]+180)%360-180 for tip in tips_for_angle]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(angle_distrib,8)
tip=3481
node_interest=tip
poss = exp.positions
ts = [t for t in range(len(poss)) if exp.get_node(tip).is_in(t)]
pos_problem=[poss[i][node_interest] for i in ts]
xbegin=[pos_problem[i][0]-1500 for i in range(len(ts))]
ybegin=[pos_problem[i][1]-1500 for i in range(len(ts))]
xend=[pos_problem[i][0]+1500 for i in range(len(ts))]
yend=[pos_problem[i][1]+1500 for i in range(len(ts))]
skeletons_small=[]
for i in range(len(ts)):
skeletons_small.append(exp.skeletons[ts[i]][xbegin[i]:xend[i],ybegin[i]:yend[i]])
if ts[-1]<len(exp.skeletons)-1:
skeletons_small.append(exp.skeletons[ts[-1]+1][xbegin[-1]:xend[-1],ybegin[-1]:yend[-1]])
node_smalls=[]
for i in range(len(ts)):
node_smalls.append([node for node in exp.nx_graph[ts[i]].nodes if (xbegin[i]<poss[ts[i]][node][0]<xend[i] and ybegin[i]<poss[ts[i]][node][1]<yend[i] and exp.nx_graph[ts[i]].degree(node)>=1)])
kernel = np.ones((5,5),np.uint8)
skeletons_small_dilated=[cv2.dilate(skeleton.todense().astype(np.uint8),kernel,iterations = 1) for skeleton in skeletons_small]
for tp1 in range(len(node_smalls)):
plot_t_tp1([node_interest],[node_interest],poss[ts[tp1]],poss[ts[tp1]],skeletons_small_dilated[tp1],skeletons_small_dilated[tp1],shift=(xbegin[tp1],ybegin[tp1]),save=f'Data/video_test/network_timestep_{tp1}',time=f't={4*tp1}h')
clear_output(wait=True)
if ts[-1]<len(exp.skeletons)-1:
plot_t_tp1([],[],poss[ts[-1]+1],poss[ts[-1]+1],skeletons_small_dilated[-1],skeletons_small_dilated[-1],shift=(xbegin[-1],ybegin[-1]),save=f'Data/video_test/network_timestep_{len(node_smalls)}',time=f't={len(node_smalls)}h')
clear_output(wait=True)
images = []
for t in range(len(node_smalls)):
images.append(imageio.imread(f'Data/video_test/network_timestep_{t}.png'))
if ts[-1]<len(exp.skeletons)-1:
images.append(imageio.imread(f'Data/video_test/network_timestep_{len(node_smalls)}.png'))
imageio.mimsave(f'Data/video_test/tips_for_angle/{node_interest}movie_track.gif', images,duration=1)
tip=3481
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('distance (mm)')
ax.set_ylabel('angle_from_begining(°)')
for tip in [tip]:
if len(angles[tip])>=1 and not np.any(np.abs(np.array(angles_last[tip]))>=90):
spotting,=ax.plot(np.array(distances[tip])*1.725/1000+1,(np.array(angles[tip])-angles[tip][0]+180)%360-180)
# spotting.set_label(str(tip))
# ax.legend()
plt.show()
angles_interest = ((np.array(angles[tip])-angles[tip][0]+180)%360-180)/360*2*np.pi
distances_interest = np.array(distances[tip])*1.725/1000+1
dist=[]
cosins = []
for i,angle in enumerate(angles_interest):
for j in range(i+1,len(angles_interest)):
dist.append(distances_interest[j]-distances_interest[i])
cosins.append(np.cos(angles_interest[j]-angles_interest[i]))
num_slices=len(distances[tip])//2
slices = [(i*max(dist)/num_slices,(i+1)*max(dist)/num_slices) for i in range(num_slices)]
end_slice = [slico[1] for slico in slices]
slice_cosins = [[cos for k,cos in enumerate(cosins) if slico[0]<=dist[k]<slico[1]] for slico in slices]
slice_cosins_av=[np.mean(slice_cos) for slice_cos in slice_cosins]
slice_size = [len(slice_cos) for slice_cos in slice_cosins]
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('distance (mm)')
ax.set_ylabel('angle_from_begining(°)')
ax.scatter(end_slice,np.array(slice_cosins_av))
# spotting.set_label(str(tip))
# ax.legend()
plt.show()
regr = linear_model.LinearRegression()
regr.fit(np.array(end_slice).reshape(-1,1),np.log(np.array(slice_cosins_av)).reshape(-1,1),sample_weight=slice_size)
fit = regr.predict(np.array(end_slice).reshape(-1,1))
-1/regr.coef_[0][0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('distance (mm)')
ax.set_ylabel('angle_from_begining(°)')
ax.scatter(end_slice,np.log(np.array(slice_cosins_av)))
plt.plot(end_slice,fit)
# spotting.set_label(str(tip))
# ax.legend()
plt.show()
plt.close('all')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('time (h)')
ax.set_ylabel('growth (mm/day)')
for i in range(20):
tip=choice(tip_of_interest)
spotting,=ax.plot(np.array(range(len(tip_growth[tip])))*4,np.array(tip_growth[tip])*1.725/4*24/1000)
spotting.set_label(str(tip))
ax.legend()
plt.show()
def becomes_degree3(node):
node_obj = exp.get_node(node)
for t in range(len(exp.nx_graph)):
if node_obj.is_in(t) and node_obj.degree(t)==3:
return(True,t)
return(False,None)
time_anas= {tip : becomes_degree3(tip)[1] for tip in anastomose}
def is_crossing(tip_an):
time = time_anas[tip_an]
tip_obj = exp.get_node(tip_an)
edges = tip_obj.edges(time-1)
print(edges)
assert len(edges)==1
orient = (edges[0].orientation_whole(time-1))+180%360
print('orien',orient)
neighbours = tip_obj.neighbours(time+1)
if tip_obj.degree(time+1)>=4:
print('degree4')
mini=np.inf
for edge in tip_obj.edges(time+1):
candidate = abs(edge.orientation_whole(time+1)%360-orient)
if candidate <mini:
mini=candidate
continuity = edge.end
return(mini,continuity)
else:
mini=np.inf
for neighbour in neighbours:
candidate = np.linalg.norm(neighbour.pos(time+1)-tip_obj.pos(time+1))
if candidate <mini:
mini=candidate
continuity = edge.end
tip_int = 2652
is_crossing(tip_int)
plt.close('all')
tip
ts
plt.close('all')
exp.get_node(tip).is_in(5)
exp.positions
choice(anastomose)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('time (h)')
ax.set_ylabel('growth (mm/day)')
spotting,=ax.plot(np.array(range(len(tip_growth[tip])))*4,np.array(tip_growth[tip])*1.725/4*24/1000)
spotting.set_label(str(tip))
ax.legend()
plt.show()
for i in range(10):
tip=choice(anastomose)
node_interest=tip
poss = exp.positions
ts = [t for t in range(len(poss)) if exp.get_node(tip).is_in(t)]
pos_problem=[poss[i][node_interest] for i in ts]
xbegin=[pos_problem[i][0]-1500 for i in range(len(ts))]
ybegin=[pos_problem[i][1]-1500 for i in range(len(ts))]
xend=[pos_problem[i][0]+1500 for i in range(len(ts))]
yend=[pos_problem[i][1]+1500 for i in range(len(ts))]
skeletons_small=[]
for i in range(len(ts)):
skeletons_small.append(exp.skeletons[ts[i]][xbegin[i]:xend[i],ybegin[i]:yend[i]])
if ts[-1]<len(exp.skeletons)-1:
skeletons_small.append(exp.skeletons[ts[-1]+1][xbegin[-1]:xend[-1],ybegin[-1]:yend[-1]])
node_smalls=[]
for i in range(len(ts)):
node_smalls.append([node for node in exp.nx_graph[ts[i]].nodes if (xbegin[i]<poss[ts[i]][node][0]<xend[i] and ybegin[i]<poss[ts[i]][node][1]<yend[i] and exp.nx_graph[ts[i]].degree(node)>=1)])
kernel = np.ones((5,5),np.uint8)
skeletons_small_dilated=[cv2.dilate(skeleton.todense().astype(np.uint8),kernel,iterations = 1) for skeleton in skeletons_small]
for tp1 in range(len(node_smalls)):
plot_t_tp1([node_interest],[node_interest],poss[ts[tp1]],poss[ts[tp1]],skeletons_small_dilated[tp1],skeletons_small_dilated[tp1],shift=(xbegin[tp1],ybegin[tp1]),save=f'Data/video_test/network_timestep_{tp1}',time=f't={4*tp1}h')
clear_output(wait=True)
if ts[-1]<len(exp.skeletons)-1:
plot_t_tp1([],[],poss[ts[-1]+1],poss[ts[-1]+1],skeletons_small_dilated[-1],skeletons_small_dilated[-1],shift=(xbegin[-1],ybegin[-1]),save=f'Data/video_test/network_timestep_{len(node_smalls)}',time=f't={len(node_smalls)}h')
clear_output(wait=True)
images = []
for t in range(len(node_smalls)):
images.append(imageio.imread(f'Data/video_test/network_timestep_{t}.png'))
if ts[-1]<len(exp.skeletons)-1:
images.append(imageio.imread(f'Data/video_test/network_timestep_{len(node_smalls)}.png'))
imageio.mimsave(f'Data/video_test/anast/{node_interest}movie_track.gif', images,duration=1)
plt.close('all')
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.set_xlabel('time (h)')
# ax.set_ylabel('growth (mm/day)')
# spotting,=ax.plot(np.array(range(len(tip_growth[tip])))*4,np.array(tip_growth[tip])*1.725/4*24/1000)
# spotting.set_label(str(tip))
# ax.legend()
# plt.show()
for i in range(10):
tip=choice(stop_with_long_growth)
node_interest=tip
poss = exp.positions
ts = [t for t in range(len(poss)) if exp.get_node(tip).is_in(t)]
pos_problem=[poss[i][node_interest] for i in ts]
xbegin=[pos_problem[i][0]-1500 for i in range(len(ts))]
ybegin=[pos_problem[i][1]-1500 for i in range(len(ts))]
xend=[pos_problem[i][0]+1500 for i in range(len(ts))]
yend=[pos_problem[i][1]+1500 for i in range(len(ts))]
skeletons_small=[]
for i in range(len(ts)):
skeletons_small.append(exp.skeletons[ts[i]][xbegin[i]:xend[i],ybegin[i]:yend[i]])
if ts[-1]<len(exp.skeletons)-1:
skeletons_small.append(exp.skeletons[ts[-1]+1][xbegin[-1]:xend[-1],ybegin[-1]:yend[-1]])
node_smalls=[]
for i in range(len(ts)):
node_smalls.append([node for node in exp.nx_graph[ts[i]].nodes if (xbegin[i]<poss[ts[i]][node][0]<xend[i] and ybegin[i]<poss[ts[i]][node][1]<yend[i] and exp.nx_graph[ts[i]].degree(node)>=1)])
kernel = np.ones((5,5),np.uint8)
skeletons_small_dilated=[cv2.dilate(skeleton.todense().astype(np.uint8),kernel,iterations = 1) for skeleton in skeletons_small]
for tp1 in range(len(node_smalls)):
plot_t_tp1([node_interest],[node_interest],poss[ts[tp1]],poss[ts[tp1]],skeletons_small_dilated[tp1],skeletons_small_dilated[tp1],shift=(xbegin[tp1],ybegin[tp1]),save=f'Data/video_test/network_timestep_{tp1}',time=f't={4*tp1}h')
clear_output(wait=True)
if ts[-1]<len(exp.skeletons)-1:
plot_t_tp1([],[],poss[ts[-1]+1],poss[ts[-1]+1],skeletons_small_dilated[-1],skeletons_small_dilated[-1],shift=(xbegin[-1],ybegin[-1]),save=f'Data/video_test/network_timestep_{tp1}',time=f't={len(node_smalls)}h')
clear_output(wait=True)
images = []
for t in range(len(node_smalls)):
images.append(imageio.imread(f'Data/video_test/network_timestep_{t}.png'))
if ts[-1]<len(exp.skeletons)-1:
images.append(imageio.imread(f'Data/video_test/network_timestep_{len(node_smalls)}.png'))
imageio.mimsave(f'Data/video_test/stop_long/{node_interest}movie_track.gif', images,duration=1)
plt.close('all')
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.set_xlabel('time (h)')
# ax.set_ylabel('growth (mm/day)')
# spotting,=ax.plot(np.array(range(len(tip_growth[tip])))*4,np.array(tip_growth[tip])*1.725/4*24/1000)
# spotting.set_label(str(tip))
# ax.legend()
# plt.show()
30*1.725/4*24/1000
choice(stop)
exp.save()
L=pinpoint_anastomosis(exp,t)[1]
node=64
node_obj=ex
p2.get_node(node)
node_obj.pos(t)
for node in L:
node=830
node_obj=exp2.get_node(node)
imtm1,posstm1=exp2.find_image_pos(node_obj.pos(t)[0],node_obj.pos(t)[1],t,local=False)
imt,posst=exp2.find_image_pos(node_obj.pos(t)[0],node_obj.pos(t)[1],t+1,local=False)
i=0
plot_t_tp1([node],[],{node:(posstm1[1][i],posstm1[0][i])},None,imtm1[i],imt[i])
break
t=0
exp2.plot([t,t+1],[pinpoint_anastomosis(exp2,t)[1]]*2)
4725*5,2908*5
anas_result=[pinpoint_anastomosis(exp2,i) for i in range (len(dates)-1)]
anastomosiss=[ana_result[0] for ana_result in anas_result]
origins=[ana_result[1] for ana_result in anas_result]
number_an = [ana_result[2] for ana_result in anas_result]
degree4_nodes = [[node for node in nx_graph.nodes if nx_graph.degree(node)==4] for nx_graph in exp2.nx_graph]
numberdegree4=[len(liste) for liste in degree4_nodes]
number_anastomosis_theory=[2+len(nx_graph.edges)-len(nx_graph.nodes)-numberdegree4[i] for i,nx_graph in enumerate(exp2.nx_graph)]
number_anastomosis_spot = [number_anastomosis_theory[0]]+[number_anastomosis_theory[0]+np.cumsum(number_an)[i] for i in range(len(anastomosiss))]
per_frame_anastomosis_theory=[number_anastomosis_theory[i+1]-number_anastomosis_theory[i] for i in range(len(number_anastomosis_theory)-1)]
per_frame_anastomosis_spot=[number_anastomosis_spot[i+1]-number_anastomosis_spot[i] for i in range(len(number_anastomosis_theory)-1)]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('time (h)')
ax.set_ylabel('number anastomosis per frame')
theory,=ax.plot(per_frame_anastomosis_theory[:-6])
theory.set_label('theory')
spotting,=ax.plot(per_frame_anastomosis_spot[:-6])
spotting.set_label('frame spotting')
ax.legend()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('time (h)')
ax.set_ylabel('total number of anastomosis')
theory,=ax.plot(number_anastomosis_theory[:-6])
theory.set_label('theory')
spotting,=ax.plot(number_anastomosis_spot[:-6])
spotting.set_label('frame spotting')
ax.legend()
plt.show()
plt.close('all')
| old/anastomosis_stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Gather
import pandas as pd
patients = pd.read_csv('patients.csv')
treatments = pd.read_csv('treatments.csv')
adverse_reactions = pd.read_csv('adverse_reactions.csv')
# ## Assess
patients
treatments
adverse_reactions
patients.info()
treatments.info()
adverse_reactions.info()
all_columns = pd.Series(list(patients) + list(treatments) + list(adverse_reactions))
all_columns[all_columns.duplicated()]
list(patients)
patients[patients['address'].isnull()]
patients.describe()
treatments.describe()
patients.sample(5)
patients.surname.value_counts()
patients.address.value_counts()
patients[patients.address.duplicated()]
patients.weight.sort_values()
weight_lbs = patients[patients.surname == 'Zaitseva'].weight * 2.20462
height_in = patients[patients.surname == 'Zaitseva'].height
bmi_check = 703 * weight_lbs / (height_in * height_in)
bmi_check
patients[patients.surname == 'Zaitseva'].bmi
sum(treatments.auralin.isnull())
sum(treatments.novodra.isnull())
# #### Quality
# ##### `patients` table
# - Zip code is a float not a string
# - Zip code has four digits sometimes
# - <NAME> height is 27 in instead of 72 in
# - Full state names sometimes, abbreviations other times
# - <NAME>
# - Missing demographic information (address - contact columns) ***(can't clean)***
# - Erroneous datatypes (assigned sex, state, zip_code, and birthdate columns)
# - Multiple phone number formats
# - Default John Doe data
# - Multiple records for Jakobsen, Gersten, Taylor
# - kgs instead of lbs for Zaitseva weight
#
# ##### `treatments` table
# - Missing HbA1c changes
# - The letter 'u' in starting and ending doses for Auralin and Novodra
# - Lowercase given names and surnames
# - Missing records (280 instead of 350)
# - Erroneous datatypes (auralin and novodra columns)
# - Inaccurate HbA1c changes (leading 4s mistaken as 9s)
# - Nulls represented as dashes (-) in auralin and novodra columns
#
# ##### `adverse_reactions` table
# - Lowercase given names and surnames
# #### Tidiness
# - Contact column in `patients` table should be split into phone number and email
# - Three variables in two columns in `treatments` table (treatment, start dose and end dose)
# - Adverse reaction should be part of the `treatments` table
# - Given name and surname columns in `patients` table duplicated in `treatments` and `adverse_reactions` tables
# ## Clean
patients_clean = patients.copy()
treatments_clean = treatments.copy()
adverse_reactions_clean = adverse_reactions.copy()
# ### Missing Data
# <font color='red'>Complete the following two "Missing Data" **Define, Code, and Test** sequences after watching the *"Address Missing Data First"* video.</font>
# #### `treatments`: Missing records (280 instead of 350)
# ##### Define
# *Your definition here. Note: the missing `treatments` records are stored in a file named `treatments_cut.csv`, which you can see in this Jupyter Notebook's dashboard (click the **jupyter** logo in the top lefthand corner of this Notebook). Hint: [documentation page](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.concat.html) for the function used in the solution.*
#
#
# - Import the cut_treatments.csv into a dataframe and add it with the original treatments dataframe
# ##### Code
# Your cleaning code here
treatments_cut = pd.read_csv('treatments_cut.csv')
treatments_clean = pd.concat([treatments_clean, treatments_cut], ignore_index = True)
# ##### Test
treatments_clean.info()
treatments.info()
# #### `treatments`: Missing HbA1c changes and Inaccurate HbA1c changes (leading 4s mistaken as 9s)
# *Note: the "Inaccurate HbA1c changes (leading 4s mistaken as 9s)" observation, which is an accuracy issue and not a completeness issue, is included in this header because it is also fixed by the cleaning operation that fixes the missing "Missing HbA1c changes" observation. Multiple observations in one **Define, Code, and Test** header occurs multiple times in this notebook.*
# ##### Define
# - recalculate the `hba1c_change` column --> `hba1c_start` minus `hba1c_end`
# ##### Code
# Your cleaning code here
treatments_clean.hba1c_change = (treatments_clean.hba1c_start - treatments_clean.hba1c_end)
# ##### Test
# Your testing code here
treatments_clean.head()
treatments_clean.hba1c_change.head()
# ### Tidiness
# <font color='red'>Complete the following four "Tidiness" **Define, Code, and Test** sequences after watching the *"Cleaning for Tidiness"* video.</font>
# #### Contact column in `patients` table contains two variables: phone number and email
# ##### Define
# *Your definition here. Hint 1: use regular expressions with pandas' [`str.extract` method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.extract.html). Here is an amazing [regex tutorial](https://regexone.com/). Hint 2: [various phone number regex patterns](https://stackoverflow.com/questions/16699007/regular-expression-to-match-standard-10-digit-phone-number). Hint 3: [email address regex pattern](http://emailregex.com/), which you might need to modify to distinguish the email from the phone number.*
# ##### Code
# +
# Your cleaning code here
patients_clean['phone_number'] = patients_clean.contact.str.extract(
'((?:\+\d{1,2}\s)?\(?\d{3}\)?[\s.-]?\d{3}[\s.-]?\d{4})', expand = True)
patients_clean['email'] = patients_clean.contact.str.extract(
'([a-zA-Z][a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+[a-zA-Z])', expand = True)
patients_clean = patients_clean.drop('contact', axis = 1)
# -
patients_clean.info()
# ##### Test
# Your testing code here
list(patients_clean)
patients_clean.phone_number.sample(10)
patients_clean.email.sample(10)
# #### Three variables in two columns in `treatments` table (treatment, start dose and end dose)
# ##### Define
# *Your definition here. Hint: use pandas' [melt function](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.melt.html) and [`str.split()` method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.split.html). Here is an excellent [`melt` tutorial](https://deparkes.co.uk/2016/10/28/reshape-pandas-data-with-melt/).*
#
#
#
# - melt the both *auralin* and *novodra* columns to a treatment and a dose column (dose will still contain the both start and end point of doses)then split the dose column on '-' to obtain `start_dose` and `end_dose` columns. Drop the intermediate dose column
# ##### Code
# +
# Your cleaning code here
treatments_clean = pd.melt(treatments_clean, id_vars=['given_name', 'surname', 'hba1c_start', 'hba1c_end','hba1c_change'],
value_vars = None, var_name = 'treatment', value_name = 'dose', col_level=None)
treatments_clean = treatments_clean[treatments_clean.dose != '-']
treatments_clean['dose_start'], treatments_clean['dose_end'] = treatments_clean['dose'].str.split('-',1).str
treatments_clean = treatments_clean.drop('dose', axis = 1)
# -
treatments_clean.head()
# ##### Test
# Your testing code here
treatments_clean.info()
# #### Adverse reaction should be part of the `treatments` table
# ##### Define
# *Your definition here. Hint: [tutorial](https://chrisalbon.com/python/pandas_join_merge_dataframe.html) for the function used in the solution.*
# - merge the *`adverse_reaction`* column to the `treatments` table, joining on `given_name` and `surname`
# ##### Code
# Your cleaning code here
treatments_clean = pd.merge(treatments_clean, adverse_reactions_clean,
on = ['given_name', 'surname'], how = 'left')
adverse_reactions_clean.head()
treatments_clean.head()
# ##### Test
# Your testing code here
treatments_clean.sample(5)
# #### Given name and surname columns in `patients` table duplicated in `treatments` and `adverse_reactions` tables and Lowercase given names and surnames
# ##### Define
# *Your definition here. Hint: [tutorial](https://chrisalbon.com/python/pandas_join_merge_dataframe.html) for one function used in the solution and [tutorial](http://erikrood.com/Python_References/dropping_rows_cols_pandas.html) for another function used in the solution.*
# - we dont need the adverse_reations table anymore.
# - Isolate the patient id and name in the patients table, then convert these names to the lowercase to join with *treatments*, and then drop the given_name and the surname columns in the treatments table (so these being lowercase is not an issue anymore).
# ##### Code
# Your cleaning code here
id_names = patients_clean[['patient_id', 'given_name', 'surname']]
id_names.given_name = id_names.given_name.str.lower()
id_names.surname = id_names.surname.str.lower()
treatments_clean = pd.merge(treatments_clean, id_names, on=['given_name', 'surname'])
treatments_clean = treatments_clean.drop(['given_name', 'surname'], axis = 1)
patients_clean.head()
treatments_clean.head()
# ##### Test
# +
# Your testing code here
# patient id should be the only duplicate column
all_cloumns = pd.Series(list(patients_clean) + list(treatments_clean))
all_columns[all_columns.duplicated()]
# -
# ### Quality
# <font color='red'>Complete the remaining "Quality" **Define, Code, and Test** sequences after watching the *"Cleaning for Quality"* video.</font>
# #### Zip code is a float not a string and Zip code has four digits sometimes
#
# ##### Define
# *Your definition here. Hint: see the "Data Cleaning Process" page.*
# - convert the zip-code columns data type from a float to string using `astype`, remove float points from the zip-code using sttring slicing, and pad four digit zip codes with leading 0.
# ##### Code
# +
# Your cleaning code here
patients_clean.zip_code = patients_clean.zip_code.astype(str).str[:-2].str.pad(5, fillchar='0')
#reconvert NaNs entries that were converted to '0000n' due to above written code
patients_clean.zip_code = patients_clean.zip_code.replace('0000n', 'np.nan')
# -
# ##### Test
# Your testing code here
patients_clean.zip_code.head()
# #### <NAME> height is 27 in instead of 72 in
# ##### Define
# *Your definition here.*
# - replace height for rows in the patients table that have a height of 27 in 72.
# ##### Code
# Your cleaning code here
patients_clean.height = patients_clean.height.replace(27, 72)
# ##### Test
# Your testing code here
patients_clean[patients_clean.height == 27]
# #### Full state names sometimes, abbreviations other times
# ##### Define
# *Your definition here. Hint: [tutorial](https://chrisalbon.com/python/pandas_apply_operations_to_dataframes.html) for method used in solution.*
# - apply a function that convert full state name to state abbreviations for california,New York, Illinois, Florida, Nebraska
# ##### Code
# +
# Your cleaning code here
state_abbrev = {
'California': 'CA',
'New York': 'NY',
'Illinois': 'IL',
'Florida': 'FL',
'Nebraska': 'NE'
}
def abbreviate_state(patient):
if patient['state'] in state_abbrev.keys():
abbrev = state_abbrev[patient['state']]
return abbrev
else:
return patient['state']
patients_clean['state'] = patients_clean.apply(abbreviate_state, axis = 1)
# -
patients_clean.state.value_counts()
# ##### Test
# Your testing code here
patients_clean.head()
# #### <NAME>
# ##### Define
# *Your definition here.*
# - Replace given name for rows in the patients table that have a given name of *Dsvid* with *David*
# ##### Code
# Your cleaning code here
patients_clean.given_name = patients_clean.given_name.replace('Dsvid', 'David')
# ##### Test
# Your testing code here
patients_clean[patients_clean.surname == 'Gustafsson']
# #### Erroneous datatypes (assigned sex, state, zip_code, and birthdate columns) and Erroneous datatypes (auralin and novodra columns) and The letter 'u' in starting and ending doses for Auralin and Novodra
# ##### Define
# *Your definition here. Hint: [documentation page](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.astype.html) for one method used in solution, [documentation page](http://pandas.pydata.org/pandas-docs/version/0.20/generated/pandas.to_datetime.html) for one function used in the solution, and [documentation page](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.str.strip.html) for another method used in the solution.*
# - Convert assigned sex and state to categorical data types. Zip code data types was already addressed, convert birthdate to datetime data types. strip the letter 'u' in start dose and end dose and convert those columns to data type int.
#
# ##### Code
# +
# To category
patients_clean.assigned_sex = patients_clean.assigned_sex.astype('category')
patients_clean.state = patients_clean.state.astype('category')
# To datetime
patients_clean.birthdate = pd.to_datetime(patients_clean.birthdate)
# Strip u and to integer
treatments_clean.dose_end = treatments_clean.dose_end.str.strip('u').astype(int)
#treatments_clean.dose_start = treatments_clean.dose_start.str.strip('u').astype(int)
# -
patients_clean.info()
treatments_clean.info()
# ##### Test
# Your testing code here
treatments_clean.head()
# #### Multiple phone number formats
# ##### Define
# *Your definition here. Hint: helpful [Stack Overflow answer](https://stackoverflow.com/a/123681).*
# - strip all "", "-","(",")", "+" and store each number without any formatting. pad the phone number with a 1 if the length of the number is 10 digits(want country code)
# ##### Code
# Your cleaning code here
patients_clean.phone_number = patients_clean.phone_number.str.replace(r'\D+', '').str.pad(11, fillchar='1')
# ##### Test
# Your testing code here
patients_clean.phone_number.head()
# #### Default <NAME> data
# ##### Define
# *Your definition here. Recall that it is assumed that the data that this John Doe data displaced is not recoverable.*
# - remove the non-recoverable records
# ##### Code
# Your cleaning code here
patients_clean = patients_clean[patients_clean.surname != 'Doe']
# ##### Test
# Your testing code here
patients_clean[patients_clean.surname == 'Doe']
patients_clean.surname.value_counts()
# #### Multiple records for Jakobsen, Gersten, Taylor
# ##### Define
# *Your definition here.*
# - Remove the <NAME>, <NAME> and <NAME> rows from the *patients* table.
# These are the nick names which happens to also not be in the `treatments` table (removing the wrong name would create a consistency issue between the `patients` and `treatments` table.
# These are all the second occurance of the duplicate. these are also the only occurences of non-null duplicate address
# ##### Code
# +
# Your cleaning code here
# tilde means not: http://pandas.pydata.org/pandas-docs/stable/indexing.html#boolean-indexing
patients_clean = patients_clean[~((patients_clean.address.duplicated()) & patients_clean.address.notnull())]
# -
# ##### Test
# Your testing code here
patients_clean[patients_clean.surname == 'Jakobsen']
patients_clean[patients_clean.surname == 'Gersten']
patients_clean[patients_clean.surname == 'Taylor']
# #### kgs instead of lbs for Zaitseva weight
# ##### Define
#
# Advance indexing to isolate the row where the surname is Zaitseva and convert the entry in its weight field from kg to lbs
# [advanced indexing](https://stackoverflow.com/a/44913631)
# ##### Code
# Your cleaning code here
weight_kg = patients_clean.weight.min()
mask = patients_clean.surname == 'Zaitseva'
column_name = 'weight'
patients_clean.loc[mask, column_name] = weight_kg *2.20462
patients_clean.weight.min()
patients_clean[patients_clean.surname=='Zaitseva']
# ##### Test
# +
# Your testing code here
# -
patients_clean.weight.sort_values()
| Data_Wrangling/Cleaning Data/cleaning-student (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pastoril10/Trabalhando_com_plotly/blob/main/Trabalhando_com_plotly.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="jc4Ml1f1nAjB"
# Importando as bibliotecas
# + [markdown] id="-l7Jcs93nMwi"
# * Utilizaremos Pandas para ler nosso DataFrame e manipular os dados.
#
# * A mágica do Plotly acontece nessas duas outras funções que importamos: offline, que será responsável por mostrar o gráfico no notebook e, caso a gente queira, salvar o resultado do gráfico como PNG, por exemplo. Imagine ele como se fosse o plt.show() do Matplotlib.
#
# * Graph_objs é responsável por criar os gráficos de fato. Se você quiser criar um gráfico de line, barra, pizza, etc., é com esse cara que você irá falar.
# + id="19gTyadXl0gJ"
import pandas as pd
import plotly.offline as py
import plotly.graph_objs as go
import plotly.express as px
# + [markdown] id="mleu6CtcJuDN"
# # Importando dados
# + id="6TY6JvHvoT3n"
df = pd.read_csv("heart-disease.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="NoLcqXPpp1FO" outputId="8cf45eb8-80f1-4d5c-a66c-8984dfbdcab0"
df.head()
# + [markdown] id="rwdzIGrdqKha"
# # Gráfico de dispersão (Scatter plot)
# + [markdown] id="j0gs2FKdwDor"
# Criando um gráfico simples
#
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="cRwn-dopp2lR" outputId="c50577eb-3b18-467f-f163-f7d1304b030c"
figura = go.Scatter(x = df.age,
y = df.trestbps,
mode = "markers"
)
data = [figura]
py.iplot(data)
# + [markdown] id="4LX3uN_7tF-f"
# ### Aprimorando um pouco mais o gráfico de dispersão
#
#
# + [markdown] id="p_RVELZZ4hCj"
# Podemos customizá-lo alterando suas cores e até espessura das linhas de contorno, utilizando o argumento marker. Também utilizaremos o argumento opacity, para deixar o gráfico um pouco menos opaco.
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="YNPYctWirnVW" outputId="1f8f0944-378e-41d1-b36e-62769b298ab1"
#criando o tipo de gráfico
figura1 = go.Scatter(x = df.age,
y = df.thalach,
name = "thalach",
mode = "markers",
marker = {"color":"red",
"line": {'width': 1,
'color': '#c0392b'}},
opacity=.8)
figura2 = go.Scatter(x = df.age,
y = df.trestbps,
name = "trestbps",
mode = "markers",
marker = {"color":"blue",
"line": {'width': 1,
'color': '#c0392b'}},
opacity=.8)
data = [figura1, figura2]
#criando o layout
layout = go.Layout(title= "thalach + trestbps em função da idade",
yaxis = {"title": "Thalach + Trestbps "},
xaxis = {"title":"Idade"})
#criando a figura que será exibida
fig = go.Figure(data=data, layout=layout)
#mostrando a figura
py.iplot(fig)
# + [markdown] id="ANSBzb_Ew1H9"
# Podemos customizá-lo alterando suas cores e até espessura das linhas de contorno, utilizando o argumento marker. Também utilizaremos o argumento opacity, para deixar o gráfico um pouco menos opaco.
# + [markdown] id="pJMOkXeKSU_-"
# # Gráfico de linha
# + [markdown] id="Vxi00dOvwj0T"
# ## A partir de dados do yahoo finance
# + colab={"base_uri": "https://localhost:8080/"} id="TylLVQBBso2k" outputId="429325a7-c99f-4db1-b87d-b9af69bbf087"
# !pip install yfinance
# + id="CTjV0r28s3oH"
import pandas_datareader.data as wb
import yfinance as yf
from datetime import datetime
yf.pdr_override()
# + colab={"base_uri": "https://localhost:8080/"} id="Atm1ezjIsS1w" outputId="c1943b8f-6254-429d-cd5a-5e60675db190"
bb = wb.get_data_yahoo("BBAS3.SA", start = "2015-1-1") # BB
itau = wb.get_data_yahoo("ITUB4.SA", start = "2015-1-1") # BB
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="K-Dl3FkzvIse" outputId="e7d85999-4233-4eef-f8bc-bd9246516fa5"
bb.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="ouiv2nc6udrD" outputId="f86b6a31-f320-42bb-d84a-9dbe1834d535"
# Creating trace1
trace1 = go.Scatter(
x =bb.index,
y =bb["Adj Close"],
mode = "lines",
name = "BB",
marker = dict(color = "LightSkyBlue"))
# Creating trace2
trace2 = go.Scatter(
x = itau.index,
y = itau["Adj Close"],
mode = "lines+markers",
name = "ITAU",
marker = dict(color = 'MediumPurple'),
)
data = [trace1, trace2]
layout = dict(title = 'Retorno dos bancos Itau e BB desde 2015',
xaxis= dict(title= 'Ano',ticklen= 5,zeroline= False),
yaxis = dict(title = "Valor da ação", ticklen= 5)
)
fig = dict(data = data, layout = layout)
py.iplot(fig)
# + [markdown] id="wnY9VuAcxSje"
# ## Criando um gráfico de linha
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="STMpL8LhxVI0" outputId="06249982-8ec4-4dbe-8380-19f81123e616"
# Gráfico usando apenas marcadores
Santos = go.Scatter(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [1,3,8,2],
mode = 'markers+lines',
marker = {"color":'#d35400'},
name = "Santos"
)
Corinthians = go.Scatter(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [3,1,7,2],
mode = 'markers+lines',
marker = {"color":'#e67e22'},
name = 'Corinthians'
)
Palmeiras = go.Scatter(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [4,2,10,0],
mode = 'markers+lines',
marker = {"color": '#f39c12'},
name = 'Palmeiras'
)
#'markers' - só os pontos
#'markers+lines' - pontos e as linhas
#"lines - só as linhas"
#criando o layout
layout = go.Layout(title= "Titulos",
yaxis = {"title": "Titulos"},
xaxis = {"title":"Quantidade de titulos ganhos"})
data = [Santos, Corinthians, Palmeiras]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] id="POqXStc2xuwx"
# Mudando até o
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="j3TO6xZDxjST" outputId="d1989b47-7c61-4377-df9a-79bc3e56e115"
# Gráfico usando apenas marcadores
Santos = go.Scatter(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [1,3,8,2],
mode = 'lines',
name = "Santos",
line = {'color': '#ee5253',
'dash': 'dash'})
Corinthians = go.Scatter(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [3,1,7,2],
mode = 'lines',
line = {'color': '#341f97',
'dash': 'dot'},
name = 'Corinthians'
)
#criando o layout
layout = go.Layout(title= "Titulos",
yaxis = {"title": "Titulos"},
xaxis = {"title":"Quantidade de titulos ganhos"})
data = [Santos, Corinthians]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] id="tFxbsO49CB3U"
# # Gráfico de caixa
# + [markdown] id="4dIY15BXCPgw"
# Dentre seus diversos tipos de gráfico, Plotly permite que você crie boxplots, ou gráfico de caixas. Para construí-los é muito similar ao gráfico de dispersão, mas agora você utilizará a função go.Box()
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="4UWKodTCw3uy" outputId="ad153e38-c4c3-4d49-c302-53812621b332"
#Gerando gráficos para CP = 0
figura0 = go.Box(y=df.loc[df["cp"] == 0, "trestbps"],
name = "Angina Típica",
marker = {"color":'#f39c12'})
#Gerando gráficos para CP = 1
figura1 = go.Box(y=df.loc[df["cp"] == 1,"trestbps"],
name = "Angína atípica",
marker = {"color":'#e67e22'})
#Gerando gráficos para CP = 2
figura2 = go.Box(y=df.loc[df["cp"] == 2,"trestbps"],
name = "Dor não Anginosa",
marker = {"color":'#d35400'})
#Gerando gráficos para CP = 3
figura3 = go.Box(y=df.loc[df["cp"] == 3, "trestbps"],
name = "Assintomático",
marker = {"color":'#e74c3c'})
data = [figura0, figura1, figura2, figura3]
layout = go.Layout(title="Dispersão dos batimentos cardiacos para cada tipo de dor no peito",
titlefont = {'family': 'Arial',
'size': 22,
'color': '#7f7f7f'},
xaxis = {"title": "Tipo de dor no peito"},
yaxis = {"title":"Batimento cardiaco"},
paper_bgcolor = 'rgb(243, 243, 243)',
plot_bgcolor = 'rgb(243, 243, 243)')
#Utilizando os argumentos paper_bgcolor e plot_bgcolor, vamos alterar a cor de fundo do gráfico.
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] id="1M_dAWj5iscO"
# # Gráfico de barras
# + [markdown] id="Aa13436A6GEc"
# ## A partir da importação de um conjunto de dados
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="ZA93VQP_r_Iu" outputId="1248e686-db02-4d35-f277-9aae8e7ce82a"
df.head()
# + [markdown] id="8Bo46x9e410q"
# Separandoo conjunto de dados em masculino e feminino
# + id="OtjSGfztz-XB"
df_h = df[df.sex ==1]
df_m = df[df.sex == 0]
# + id="7VNSLlVfi0iI" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="70cafe0c-7aa3-47dc-cc30-c3e21d38d5d2"
values_h = df_h["cp"].sort_values(ascending=False).value_counts().sort_index()
labels_h = values_h.index
values_m = df_m["cp"].sort_values(ascending=False).value_counts().sort_index()
labels_m = values_m.index
trace1 = go.Bar(x = labels_h,
y = values_h,
name = "Homens",
marker = dict(color = 'rgba(255, 174, 255, 0.5)',
line=dict(color='rgb(0,0,0)',width=1.5)))
trace2 = go.Bar(x = labels_m,
y = values_m,
name = "Mulher",
marker = dict(color = 'rgba(255, 255, 128, 0.5)',
line=dict(color='rgb(0,0,0)',width=1.5)))
data = [trace1, trace2]
layout = go.Layout(barmode = "group")
fig = go.Figure(data = data, layout = layout)
py.iplot(fig)
# + [markdown] id="hkZLS25U50Nt"
# Veja, os dados são os mesmos, pra homens e para mulheres. Isso indica que os dados foram criados
# + [markdown] id="m8suTOMCQfqp"
# ## Criando gráficos de barra
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="PBH-dr3rUUaq" outputId="c72bd740-a94a-4ac8-827b-fc48c3092143"
Santos = go.Bar(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [1,3,8,2],
name = "Santos")
Corinthians = go.Bar(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [3,1,7,2],
name = 'Corinthians'
)
data = [Santos, Corinthians]
py.iplot(data)
# + [markdown] id="ZBXvmjUSlrwA"
# ### Mudar cores e nomes de gráficos de dispersão e linha
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="O1_l3BioQiVw" outputId="ad223c4c-6348-40a2-82bd-d2b3991870f6"
Santos = go.Bar(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [1,3,8,2],
name = "Santos",
marker = {'color': '#feca57'})
Corinthians = go.Bar(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [3,1,7,2],
name = 'Corinthians',
marker = {'color': '#ff9f43'}
)
data = [Santos, Corinthians]
py.iplot(data)
# + [markdown] id="WC2v9AfOl5hu"
# ### Empilhando barras
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="CAobeWXHlp_y" outputId="f17af253-b893-467b-ad18-f0933e67cf3c"
Santos = go.Bar(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [1,3,8,2],
name = "Santos",
marker = {'color': '#feca57'})
Corinthians = go.Bar(x = ["Copa do Brasil", "Libertadores", "Brasileiro", "Mundial"],
y = [3,1,7,2],
name = 'Corinthians',
marker = {'color': '#ff9f43'}
)
data = [Santos, Corinthians]
layout = go.Layout(title = "Titulos Santos e Corinthians",
xaxis = {"title":"Campeonato"},
yaxis = {"title": "numero de titulos"},
barmode = "stack"
)
fig = go.Figure(data=data, layout = layout)
py.iplot(fig)
# + [markdown] id="Ob0ayj7Y8viM"
# # Gráfico de Pizza
# + [markdown] id="aVrC4-05EbE9"
# Um gráfico de pizza é um gráfico analítico circular, que é dividido em regiões para simbolizar a porcentagem numérica. Em px.pie, dados antecipados pelos setores da pizza para definir os valores. Todos os setores são classificados em nomes. O gráfico de pizza é usado geralmente para mostrar a porcentagem com a próxima fatia correspondente da pizza. O gráfico de pizza ajuda a compreender bem por causa de suas diferentes partes e códigos de cores.
# + id="YHxlmPJ4HBM0"
quantidade_cp = df.cp.value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="0hUrQ2cCM8Df" outputId="2154533e-11c1-44d6-e941-5d73615b58b0"
quantidade_cp.index
# + colab={"base_uri": "https://localhost:8080/"} id="6Y_eb0HuMBvf" outputId="9d58e16d-e61a-45b3-aac2-2cb08d1c74a8"
quantidade_cp.values
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="Qm34JTpANBtB" outputId="36096605-12a2-4e1e-9289-9861e2ed3957"
px.pie(quantidade_cp,
names=quantidade_cp.index,
values=quantidade_cp.values,
width = 500,
height = 500)
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="8oIh8sInAUdR" outputId="2e68d66a-5dba-4806-cb85-7940e83e6825"
fig = {
"data": [
{
"values": quantidade_cp.values,
"labels": quantidade_cp.index,
"domain": {"x": [0, .5]},
"name": "Tipo de doença no peito",
"hoverinfo":"label+percent+name",
"hole": .3,
"type": "pie",
},],
"layout": {
"title":"Tipo de doença no peito",
"annotations": [
{ "font": { "size": 20},
"showarrow": False,
"text": "Number of Students",
"x": 0.20,
"y": 1.1
},
]
}
}
py.iplot(fig)
# + [markdown] id="2O9MTprJl4Pu"
# # Gráfico de bolhas (simples)
# + [markdown] id="QkcO-jh2rk_h"
# Outra forma de construir um gráfico de bolhas
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="dRBV42M3mT1k" outputId="d9cd88ec-4c9d-4f52-ece6-b5a853abd540"
figura = [
{
'y': df.oldpeak,
'x': df.age,
'mode': "markers",
"marker": {
"size": df.age.value_counts(),
"color": df.age.value_counts(),
'showscale': True
}}]
py.iplot(figura)
# + [markdown] id="UGCL9Uv9z7j9"
# Outra forma
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="h30RwKbHroEK" outputId="7954682d-d204-493b-ed0e-514eb6e78329"
fig = px.scatter(df, x="age", y="chol",
size="oldpeak",
color='thalach'
)
fig.show()
# + [markdown] id="Oster4vf0As4"
# # Histograma
# + id="Vvs3Yf5I0HNZ"
df_h = df[df.sex == 1]
df_m = df[df.sex == 0]
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="FG0W2z4Y0YkR" outputId="fb5a1f94-0e79-449b-ba96-4795bd903637"
trace1 = go.Histogram(
x=df_h.age,
opacity=0.75,
name = "Homem",
nbinsx=100,
marker=dict(color='rgba(171, 50, 96, 0.6)'))
trace2 = go.Histogram(
x=df_m.age,
opacity=0.75,
name = "mulher",
nbinsx=100,
marker=dict(color='rgba(12, 50, 196, 0.6)'))
data = [trace1, trace2]
layout = go.Layout(barmode='overlay',
title= "Distribuição de idade para cada sexo",
xaxis=dict(title='Idade'),
yaxis=dict( title='Quantidade'))
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] id="92Xa6XqVUxSP"
# # Gráfico inserido - Inset
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="US5OMkyLU9Ft" outputId="0ff6b380-06e2-48d0-856c-0ce075a9b79f"
# first line plot
trace1 = go.Scatter(
x=df.age,
y=df.trestbps,
mode = "markers",
name = "Distribuição dos batimentos por idade",
marker = dict(color = 'rgba(16, 112, 2, 0.8)'))
# second line plot
trace2 = go.Histogram(
x=df.age,
opacity=0.75,
name = "Distribuição de idade",
nbinsx=100,
xaxis='x2',
yaxis='y2',
marker=dict(color="red"))
data = [trace1, trace2]
layout = go.Layout(
xaxis2=dict(
domain=[0.05, 0.4],
anchor='y2',
),
yaxis2=dict(
domain=[0.65, 0.98],
anchor='x2',
),
title = 'Distribuição de idade e Batimentos'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] id="JIWNViC-D2ci"
# # Gráfico 3D
# + [markdown] id="8h2LG2Q6Fr_3"
# Dispersão 3D: Às vezes, 2D não é suficiente para entender os dados. Portanto, adicionar mais uma dimensão aumenta a inteligibilidade dos dados. Mesmo não adicionando cores, que seria na verdade a 4ª dimensão.
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="msLhy9mYD6LL" outputId="f2d3057e-5faf-4459-dccc-b1a46833c5c6"
trace1 = go.Scatter3d(
x=df.age,
y=df.trestbps ,
z=df.chol,
mode='markers',
marker=dict(
size=10,
color='rgb(255,0,0)'))
data = [trace1]
layout = go.Layout(
margin=dict(
l=0,
r=0,
b=0,
t=0
) )
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# + [markdown] id="qybBvdcVmsOF"
# # Criando Mapa
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="XF97BSRLmjiU" outputId="10cddeb2-f6c4-47df-d269-b9f22c413d20"
#importando dataset que contem informações populacionais
map = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/2014_us_cities.csv")
map.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="roFaEQxLmt2j" outputId="08c4aa4c-3bdd-4e51-ba1f-82e465d1725c"
figura = go.Scattergeo(locationmode = 'USA-states',
lon = map['lon'],
lat = map['lat'],
text = map['name'] + '- População: ' + map['pop'].astype(str),
marker = dict(
size = map['pop']/5000,
color = '#e74c3c',
line = {'width': 0.5,
'color': '#2c3e50'},
sizemode = 'area')
)
data = [figura]
layout = go.Layout(title = "População americana em 2014",
titlefont = {'family': 'Arial',
'size': 24},
geo = {'scope': 'usa',
'projection': {'type': 'albers usa'},
'showland': True,
'landcolor': '#2ecc71',
'showlakes': True,
'lakecolor': '#3498db',
'subunitwidth': 1,
'subunitcolor': "rgb(255, 255, 255)"
}
)
fig = go.Figure(data=data, layout = layout)
py.iplot(fig)
# + id="yJCPMJ6cJNnz"
| Trabalhando_com_plotly.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import random
from scipy import stats
from scipy.optimize import fmin
# ### Gradient Descent
# <b>Gradient descent</b>, also known as <b>steepest descent</b>, is an optimization algorithm for finding the local minimum of a function. To find a local minimum, the function "steps" in the direction of the negative of the gradient. <b>Gradient ascent</b> is the same as gradient descent, except that it steps in the direction of the positive of the gradient and therefore finds local maximums instead of minimums. The algorithm of gradient descent can be outlined as follows:
#
# 1: Choose initial guess $x_0$ <br>
# 2: <b>for</b> k = 0, 1, 2, ... <b>do</b> <br>
# 3: $s_k$ = -$\nabla f(x_k)$ <br>
# 4: choose $\alpha_k$ to minimize $f(x_k+\alpha_k s_k)$ <br>
# 5: $x_{k+1} = x_k + \alpha_k s_k$ <br>
# 6: <b>end for</b>
# As a simple example, let's find a local minimum for the function $f(x) = x^3-2x^2+2$
f = lambda x: x**3-2*x**2+2
x = np.linspace(-1,2.5,1000)
plt.plot(x,f(x))
plt.xlim([-1,2.5])
plt.ylim([0,3])
plt.show()
# We can see from plot above that our local minimum is gonna be near around 1.4 or 1.5 (on the x-axis), but let's pretend that we don't know that, so we set our starting point (arbitrarily, in this case) at $x_0 = 2$
# +
x_old = 0
x_new = 2 # The algorithm starts at x=2
n_k = 0.1 # step size
precision = 0.0001
x_list, y_list = [x_new], [f(x_new)]
# returns the value of the derivative of our function
def f_prime(x):
return 3*x**2-4*x
while abs(x_new - x_old) > precision:
x_old = x_new
s_k = -f_prime(x_old)
x_new = x_old + n_k * s_k
x_list.append(x_new)
y_list.append(f(x_new))
print("Local minimum occurs at:", x_new)
print("Number of steps:", len(x_list))
# -
# The figures below show the route that was taken to find the local minimum.
plt.figure(figsize=[10,3])
plt.subplot(1,2,1)
plt.scatter(x_list,y_list,c="r")
plt.plot(x_list,y_list,c="r")
plt.plot(x,f(x), c="b")
plt.xlim([-1,2.5])
plt.ylim([0,3])
plt.title("Gradient descent")
plt.subplot(1,2,2)
plt.scatter(x_list,y_list,c="r")
plt.plot(x_list,y_list,c="r")
plt.plot(x,f(x), c="b")
plt.xlim([1.2,2.1])
plt.ylim([0,3])
plt.title("Gradient descent (zoomed in)")
plt.show()
# You'll notice that the step size (also called learning rate) in the implementation above is constant, unlike the algorithm in the pseudocode. Doing this makes it easier to implement the algorithm. However, it also presents some issues: If the step size is too small, then convergence will be very slow, but if we make it too large, then the method may fail to converge at all.
#
# A solution to this is to use adaptive step sizes as the algorithm below does (using scipy's fmin function to find optimal step sizes):
# +
# we setup this function to pass into the fmin algorithm
def f2(n,x,s):
x = x + n*s
return f(x)
x_old = 0
x_new = 2 # The algorithm starts at x=2
precision = 0.0001
x_list, y_list = [x_new], [f(x_new)]
# returns the value of the derivative of our function
def f_prime(x):
return 3*x**2-4*x
while abs(x_new - x_old) > precision:
x_old = x_new
s_k = -f_prime(x_old)
# use scipy fmin function to find ideal step size.
n_k = fmin(f2,0.1,(x_old,s_k), full_output = False, disp = False)
x_new = x_old + n_k * s_k
x_list.append(x_new)
y_list.append(f(x_new))
print("Local minimum occurs at ", float(x_new))
print("Number of steps:", len(x_list))
# -
# With adaptive step sizes, the algorithm converges in just 4 iterations rather than 17. Of course, it takes time to compute the appropriate step size at each iteration. Here are some plots of the path taken below. You can see that it converges very quickly to a point near the local minimum, so it's hard to even discern the dots after the first two steps until we zoom in very close in the third frame below:
plt.figure(figsize=[15,3])
plt.subplot(1,3,1)
plt.scatter(x_list,y_list,c="r")
plt.plot(x_list,y_list,c="r")
plt.plot(x,f(x), c="b")
plt.xlim([-1,2.5])
plt.title("Gradient descent")
plt.subplot(1,3,2)
plt.scatter(x_list,y_list,c="r")
plt.plot(x_list,y_list,c="r")
plt.plot(x,f(x), c="b")
plt.xlim([1.2,2.1])
plt.ylim([0,3])
plt.title("zoomed in")
plt.subplot(1,3,3)
plt.scatter(x_list,y_list,c="r")
plt.plot(x_list,y_list,c="r")
plt.plot(x,f(x), c="b")
plt.xlim([1.3333,1.3335])
plt.ylim([0,3])
plt.title("zoomed in more")
plt.show()
# Another approach to update the step size is choosing a decrease constant $d$ that shrinks the step size over time:
# $\eta(t+1) = \eta(t) / (1+t \times d)$.
# +
x_old = 0
x_new = 2 # The algorithm starts at x=2
n_k = 0.17 # step size
precision = 0.0001
t, d = 0, 1
x_list, y_list = [x_new], [f(x_new)]
# returns the value of the derivative of our function
def f_prime(x):
return 3*x**2-4*x
while abs(x_new - x_old) > precision:
x_old = x_new
s_k = -f_prime(x_old)
x_new = x_old + n_k * s_k
x_list.append(x_new)
y_list.append(f(x_new))
n_k = n_k / (1 + t * d)
t += 1
print("Local minimum occurs at:", x_new)
print("Number of steps:", len(x_list))
# -
# Let's now consider an example which is a little bit more complicated. Consider a simple linear regression where we want to see how the temperature affects the noises made by crickets. We have a data set of cricket chirp rates at various temperatures. First we'll load that data set in and plot it:
# +
#Load the dataset
data = np.loadtxt('SGD_data.txt', delimiter=',')
#Plot the data
plt.scatter(data[:, 0], data[:, 1], marker='o', c='b')
plt.title('cricket chirps vs temperature')
plt.xlabel('chirps/sec for striped ground crickets')
plt.ylabel('temperature in degrees Fahrenheit')
plt.xlim([13,21])
plt.ylim([65,95])
plt.show()
# -
# Our goal is to find the equation of the straight line $h_\theta(x) = \theta_0 + \theta_1 x$ that best fits our data points. The function that we are trying to minimize in this case is:
#
# $J(\theta_0,\theta_1) = {1 \over 2m} \sum\limits_{i=1}^m (h_\theta(x_i)-y_i)^2$
#
# In this case, our gradient will be defined in two dimensions:
#
# $\frac{\partial}{\partial \theta_0} J(\theta_0,\theta_1) = \frac{1}{m} \sum\limits_{i=1}^m (h_\theta(x_i)-y_i)$
#
# $\frac{\partial}{\partial \theta_1} J(\theta_0,\theta_1) = \frac{1}{m} \sum\limits_{i=1}^m ((h_\theta(x_i)-y_i) \cdot x_i)$
#
# Below, we set up our function for h, J and the gradient:
# +
h = lambda theta_0,theta_1,x: theta_0 + theta_1*x
def J(x,y,m,theta_0,theta_1):
returnValue = 0
for i in range(m):
returnValue += (h(theta_0,theta_1,x[i])-y[i])**2
returnValue = returnValue/(2*m)
return returnValue
def grad_J(x,y,m,theta_0,theta_1):
returnValue = np.array([0.,0.])
for i in range(m):
returnValue[0] += (h(theta_0,theta_1,x[i])-y[i])
returnValue[1] += (h(theta_0,theta_1,x[i])-y[i])*x[i]
returnValue = returnValue/(m)
return returnValue
# -
# Now, we'll load our data into the x and y variables;
x = data[:, 0]
y = data[:, 1]
m = len(x)
# And we run our gradient descent algorithm (without adaptive step sizes in this example):
# +
theta_old = np.array([0.,0.])
theta_new = np.array([1.,1.]) # The algorithm starts at [1,1]
n_k = 0.001 # step size
precision = 0.001
num_steps = 0
s_k = float("inf")
while np.linalg.norm(s_k) > precision:
num_steps += 1
theta_old = theta_new
s_k = -grad_J(x,y,m,theta_old[0],theta_old[1])
theta_new = theta_old + n_k * s_k
print("Local minimum occurs where:")
print("theta_0 =", theta_new[0])
print("theta_1 =", theta_new[1])
print("This took",num_steps,"steps to converge")
# -
# For comparison, let's get the actual values for $\theta_0$ and $\theta_1$:
actualvalues = sp.stats.linregress(x,y)
print("Actual values for theta are:")
print("theta_0 =", actualvalues.intercept)
print("theta_1 =", actualvalues.slope)
# So we see that our values are relatively close to the actual values (even though our method was pretty slow). If you look at the source code of [linregress](https://github.com/scipy/scipy/blob/master/scipy/stats/_stats_mstats_common.py), it uses the convariance matrix of x and y to compute fastly. Below, you can see a plot of the line drawn with our theta values against the data:
xx = np.linspace(0,21,1000)
plt.scatter(data[:, 0], data[:, 1], marker='o', c='b')
plt.plot(xx,h(theta_new[0],theta_new[1],xx))
plt.xlim([13,21])
plt.ylim([65,95])
plt.title('cricket chirps vs temperature')
plt.xlabel('chirps/sec for striped ground crickets')
plt.ylabel('temperature in degrees Fahrenheit')
plt.show()
# Notice that in the method above we need to calculate the gradient in every step of our algorithm. In the example with the crickets, this is not a big deal since there are only 15 data points. But imagine that we had 10 million data points. If this were the case, it would certainly make the method above far less efficient.
#
# In machine learning, the algorithm above is often called <b>batch gradient descent</b> to contrast it with <b>mini-batch gradient descent</b> (which we will not go into here) and <b>stochastic gradient descent</b>.
# ### Stochastic gradient descent
# As we said above, in batch gradient descent, we must look at every example in the entire training set on every step (in cases where a training set is used for gradient descent). This can be quite slow if the training set is sufficiently large. In <b>stochastic gradient descent</b>, we update our values after looking at <i>each</i> item in the training set, so that we can start making progress right away. Recall the linear regression example above. In that example, we calculated the gradient for each of the two theta values as follows:
#
# $\frac{\partial}{\partial \theta_0} J(\theta_0,\theta_1) = \frac{1}{m} \sum\limits_{i=1}^m (h_\theta(x_i)-y_i)$
#
# $\frac{\partial}{\partial \theta_1} J(\theta_0,\theta_1) = \frac{1}{m} \sum\limits_{i=1}^m ((h_\theta(x_i)-y_i) \cdot x_i)$
#
# Where $h_\theta(x) = \theta_0 + \theta_1 x$
#
# Then we followed this algorithm (where $\alpha$ was a non-adapting stepsize):
#
# 1: Choose initial guess $x_0$ <br>
# 2: <b>for</b> k = 0, 1, 2, ... <b>do</b> <br>
# 3: $s_k$ = -$\nabla f(x_k)$ <br>
# 4: $x_{k+1} = x_k + \alpha s_k$ <br>
# 5: <b>end for</b>
#
# When the sample data had 15 data points as in the example above, calculating the gradient was not very costly. But for very large data sets, this would not be the case. So instead, we consider a stochastic gradient descent algorithm for simple linear regression such as the following, where m is the size of the data set:
#
# 1: Randomly shuffle the data set <br>
# 2: <b>for</b> k = 0, 1, 2, ... <b>do</b> <br>
# 3: <b>for</b> i = 1 to m <b>do</b> <br>
# 4: $\begin{bmatrix}
# \theta_{1} \\
# \theta_2 \\
# \end{bmatrix}=\begin{bmatrix}
# \theta_1 \\
# \theta_2 \\
# \end{bmatrix}-\alpha\begin{bmatrix}
# 2(h_\theta(x_i)-y_i) \\
# 2x_i(h_\theta(x_i)-y_i) \\
# \end{bmatrix}$ <br>
# 5: <b>end for</b> <br>
# 6: <b>end for</b>
#
# Typically, with stochastic gradient descent, you will run through the entire data set 1 to 10 times (see value for k in line 2 of the pseudocode above), depending on how fast the data is converging and how large the data set is.
#
# With batch gradient descent, we must go through the entire data set before we make any progress. With this algorithm though, we can make progress right away and continue to make progress as we go through the data set. Therefore, stochastic gradient descent is often preferred when dealing with large data sets.
#
# Unlike gradient descent, stochastic gradient descent will tend to oscillate <i>near</i> a minimum value rather than continuously getting closer. It may never actually converge to the minimum though. One way around this is to slowly decrease the step size $\alpha$ as the algorithm runs. However, this is less common than using a fixed $\alpha$.
#
# Let's look at another example where we illustrate the use of stochastic gradient descent for linear regression. In the example below, we'll create a set of 500,000 points around the line $y = 2x+17+\epsilon$, for values of x between 0 and 100:
# +
f = lambda x: x*2+17+np.random.randn(len(x))*10
x = np.random.random(500000)*100
y = f(x)
m = len(y)
# -
# First, let's randomly shuffle around our dataset. Note that in this example, this step isn't strictly necessary since the data is already in a random order. However, that obviously may not always be the case:
# +
from random import shuffle
x_shuf = []
y_shuf = []
index_shuf = list(range(len(x)))
shuffle(index_shuf)
for i in index_shuf:
x_shuf.append(x[i])
y_shuf.append(y[i])
# -
# Now we'll setup our h function and our cost function, which we will use to check how the value is improving.
h = lambda theta_0,theta_1,x: theta_0 + theta_1*x
cost = lambda theta_0,theta_1, x_i, y_i: 0.5*(h(theta_0,theta_1,x_i)-y_i)**2
# Now we'll run our stochastic gradient descent algorithm. To see it's progress, we'll take a cost measurement at every step. Every 10,000 steps, we'll get an average cost from the last 10,000 steps and then append that to our cost_list variable. We will run through the entire list 10 times here:
# +
theta_old = np.array([0.,0.])
theta_new = np.array([1.,1.]) # The algorithm starts at [1,1]
n_k = 0.000005 # step size
iter_num = 0
s_k = np.array([float("inf"),float("inf")])
sum_cost = 0
cost_list = []
for j in range(10):
for i in range(m):
iter_num += 1
theta_old = theta_new
s_k[0] = (h(theta_old[0],theta_old[1],x[i])-y[i])
s_k[1] = (h(theta_old[0],theta_old[1],x[i])-y[i])*x[i]
s_k = (-1)*s_k
theta_new = theta_old + n_k * s_k
sum_cost += cost(theta_old[0],theta_old[1],x[i],y[i])
if (i+1) % 10000 == 0:
cost_list.append(sum_cost/10000.0)
sum_cost = 0
print("Local minimum occurs where:")
print("theta_0 =", theta_new[0])
print("theta_1 =", theta_new[1])
# -
# As you can see, our values for $\theta_0$ and $\theta_1$ are close to their true values of 17 and 2.
#
# Now, we plot our cost versus the number of iterations. As you can see, the cost goes down quickly at first, but starts to level off as we go through more iterations:
iterations = np.arange(len(cost_list))*10000
plt.plot(iterations,cost_list)
plt.xlabel("iterations")
plt.ylabel("avg cost")
plt.show()
#
| Kata Fundamentos/CalculoML/stochastic_gradient_descent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="HvhYZrIZCEyo"
# <img src="https://storage.googleapis.com/ultralytics/logo/logoname1000.png" width="150">
#
# <table align="center"><td>
# <a target="_blank" href="https://github.com/ultralytics/yolov3/blob/master/tutorial.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on github
# </a>
# </td><td>
# <a target="_blank" href="https://colab.sandbox.google.com/github/ultralytics/yolov3/blob/master/tutorial.ipynb">
# <img width=32px src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td></table>
#
# This notebook contains software developed by Ultralytics LLC, and **is freely available for redistribution under the GPL-3.0 license**. For more information please visit https://github.com/ultralytics/yolov3 and https://www.ultralytics.com.
#
#
#
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="e5ylFIvlCEym" outputId="fbc88edd-7b26-4735-83bf-b404b76f9c90"
import time
import glob
import torch
import os
from IPython.display import Image, clear_output
print('PyTorch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))
# + [markdown] colab_type="text" id="7mGmQbAO5pQb"
# Clone repository and download COCO 2014 dataset (20GB):
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" id="tIFv0p1TCEyj" outputId="e9230cff-ede4-491a-a74d-063ce77f21cd"
# !git clone https://github.com/ultralytics/yolov3 # clone
# !bash yolov3/data/get_coco2014.sh # copy COCO2014 dataset (19GB)
# %cd yolov3
# + [markdown] colab_type="text" id="N3qM6T0W53gh"
# Run `detect.py` to perform inference on images in `data/samples` folder:
# + colab={"base_uri": "https://localhost:8080/", "height": 477} colab_type="code" id="zR9ZbuQCH7FX" outputId="49268b66-125d-425e-dbd0-17b108914c51"
# !python3 detect.py
Image(filename='output/zidane.jpg', width=600)
# + [markdown] colab_type="text" id="ijTFlKcp6JVy"
# Run `train.py` to train YOLOv3-SPP starting from a darknet53 backbone:
# + colab={} colab_type="code" id="Mupsoa0lzSPo"
# !python3 train.py --data data/coco_64img.data --img-size 320 --epochs 3 --nosave
# + [markdown] colab_type="text" id="0eq1SMWl6Sfn"
# Run `test.py` to evaluate the performance of a trained darknet or PyTorch model:
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="0v0RFtO-WG9o" outputId="6791f795-cb10-4da3-932f-c4ac47574601"
# !python3 test.py --data data/coco.data --save-json --img-size 416 # 0.565 mAP
# + [markdown] colab_type="text" id="VUOiNLtMP5aG"
# Reproduce tutorial training runs and plot training results:
# + colab={"base_uri": "https://localhost:8080/", "height": 417} colab_type="code" id="LA9qqd_NCEyB" outputId="1521c334-92ef-4f9f-bb8a-916ad5e2d9c2"
# !python3 train.py --data data/coco_16img.data --batch-size 16 --accumulate 1 --nosave && mv results.txt results_coco_16img.txt # CUSTOM TRAINING EXAMPLE
# !python3 train.py --data data/coco_64img.data --batch-size 16 --accumulate 1 --nosave && mv results.txt results_coco_64img.txt
# !python3 -c "from utils import utils; utils.plot_results()" # plot training results
Image(filename='results.png', width=800)
# + [markdown] colab_type="text" id="14mT7T7Q6erR"
# Extras below
#
# ---
#
#
#
# + colab={} colab_type="code" id="42_zEpW6W_N1"
# !git pull
# + colab={} colab_type="code" id="9bVTcveIOzDd"
# %cd yolov3
# + colab={} colab_type="code" id="odMr0JFnCEyb"
# %ls
# + colab={} colab_type="code" id="uB3v5hj_CEyI"
# Unit Tests
# !python3 detect.py # detect 2 persons, 1 tie
# !python3 test.py --data data/coco_32img.data # test mAP = 0.8
# !python3 train.py --data data/coco_32img.data --epochs 3 --nosave # train 3 epochs
# + colab={} colab_type="code" id="6D0si0TNCEx5"
# Evolve Hyperparameters
# !python3 train.py --data data/coco.data --img-size 320 --epochs 1 --evolve
| modeling/tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pomegranate as pg
guest = pg.DiscreteDistribution({'A': 1./3, 'B': 1./3, 'C': 1./3})
prize = pg.DiscreteDistribution({'A': 1./3, 'B': 1./3, 'C': 1./3})
monty = pg.ConditionalProbabilityTable(
[[ 'A', 'A', 'A', 0.0 ],
[ 'A', 'A', 'B', 0.5 ],
[ 'A', 'A', 'C', 0.5 ],
[ 'A', 'B', 'A', 0.0 ],
[ 'A', 'B', 'B', 0.0 ],
[ 'A', 'B', 'C', 1.0 ],
[ 'A', 'C', 'A', 0.0 ],
[ 'A', 'C', 'B', 1.0 ],
[ 'A', 'C', 'C', 0.0 ],
[ 'B', 'A', 'A', 0.0 ],
[ 'B', 'A', 'B', 0.0 ],
[ 'B', 'A', 'C', 1.0 ],
[ 'B', 'B', 'A', 0.5 ],
[ 'B', 'B', 'B', 0.0 ],
[ 'B', 'B', 'C', 0.5 ],
[ 'B', 'C', 'A', 1.0 ],
[ 'B', 'C', 'B', 0.0 ],
[ 'B', 'C', 'C', 0.0 ],
[ 'C', 'A', 'A', 0.0 ],
[ 'C', 'A', 'B', 1.0 ],
[ 'C', 'A', 'C', 0.0 ],
[ 'C', 'B', 'A', 1.0 ],
[ 'C', 'B', 'B', 0.0 ],
[ 'C', 'B', 'C', 0.0 ],
[ 'C', 'C', 'A', 0.5 ],
[ 'C', 'C', 'B', 0.5 ],
[ 'C', 'C', 'C', 0.0 ]], [guest, prize])
s1 = pg.State(guest, name="guest")
s2 = pg.State(prize, name="prize")
s3 = pg.State(monty, name="monty")
# +
# Create the Bayesian network object with a useful name
model = pg.BayesianNetwork("Monty Hall Problem")
# Add the three states to the network
model.add_states(s1, s2, s3)
# -
# For now edges are added from parent -> child by calling `model.add_edge(parent, child)`.
# Monty is dependent on both guest and prize
model.add_edge(s1, s3)
model.add_edge(s2, s3)
model.bake()
model.structure
model.plot()
model
observations = {
'guest': 'A',
}
beliefs = map(str, model.predict_proba(observations))
print("\n".join( "{}\t{}".format(state.name, belief) for state, belief in zip(model.states, beliefs)))
observations = {
'guest': 'A',
'monty': 'C',
}
beliefs = map(str, model.predict_proba(observations))
print("\n".join( "{}\t{}".format(state.name, belief) for state, belief in zip(model.states, beliefs)))
# Train model with more data
# +
data = [[ 'A', 'A', 'A' ],
[ 'A', 'A', 'A' ],
[ 'A', 'A', 'A' ],
[ 'A', 'A', 'A' ],
[ 'A', 'A', 'A' ],
[ 'B', 'B', 'B' ],
[ 'B', 'B', 'C' ],
[ 'C', 'C', 'A' ],
[ 'C', 'C', 'C' ],
[ 'C', 'C', 'C' ],
[ 'C', 'C', 'C' ],
[ 'C', 'B', 'A' ]]
model.fit(data)
# -
model.predict_proba({})
observations = {
'guest': 'A',
'prize': 'A'
}
beliefs = map(str, model.predict_proba(observations))
print("\n".join( "{}\t{}".format(state.name, belief) for state, belief in zip(model.states, beliefs)))
| monty_hall_problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # haberman
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
# # Importing datasets
data = pd.read_csv("haberman.csv")
data.head()
data.columns
# # Spliting Data for training and testing
from sklearn.model_selection import train_test_split
X_Data=data[['Age of patient at time of operation', 'Patients year of operation','Number of positive axillary nodes detected']]
y_Data=data[['Survival status']]
X_train,X_test,y_train,y_test = train_test_split(X_Data,y_Data,test_size=0.2,random_state=42)
# # Decision Trees
#CART
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
modelDT = DecisionTreeClassifier()
modelDT.fit(X_train, np.ravel(y_train,order='C'))
predictDT=modelDT.predict(X_test)
# +
##evaluation of metrices
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
cm = confusion_matrix(y_test,predictDT)
cm_display = ConfusionMatrixDisplay(cm).plot()
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictDT))
# -
# # Random Forest
from sklearn.ensemble import RandomForestClassifier
modelRFC=RandomForestClassifier()
modelRFC.fit(X_train, np.ravel(y_train,order='C'))
predictRFC=modelRFC.predict(X_test)
# +
##evaluation of metrices
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
cm = confusion_matrix(y_test,predictRFC)
cm_display = ConfusionMatrixDisplay(cm).plot()
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictRFC))
# -
# # Naive Bayes
from sklearn.naive_bayes import GaussianNB
modelGNB = GaussianNB()
modelGNB.fit(X_train, np.ravel(y_train,order='C'))
predictGNB=modelGNB.predict(X_test)
# +
##evaluation of metrices
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
cm = confusion_matrix(y_test,predictGNB)
cm_display = ConfusionMatrixDisplay(cm).plot()
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictGNB))
# -
# # AdaBoost
from sklearn.ensemble import AdaBoostClassifier
modelAda = AdaBoostClassifier()
modelAda.fit(X_train, np.ravel(y_train,order='C'))
predictAda=modelAda.predict(X_test)
# +
##evaluation of metrices
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
cm = confusion_matrix(y_test,predictAda)
cm_display = ConfusionMatrixDisplay(cm).plot()
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictAda))
# -
# # Data normalization
# +
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_normalized = scaler.transform(X_train)
#scaler.fit(y_train)
#y_train_normalized = scaler.transform(y_train)
scaler.fit(X_test)
X_test_normalized = scaler.transform(X_test)
scaler.fit(y_train)
y_test_normalized = scaler.transform(y_test)
# -
# # K-Nearest Neighbours
from sklearn.neighbors import KNeighborsClassifier
modelKNN = KNeighborsClassifier()
modelKNN.fit(X_train_normalized, np.ravel(y_train,order='C'))
predictKNN=modelKNN.predict(X_test_normalized)
# +
##evaluation of metrices
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
cm = confusion_matrix(y_test,predictKNN)
cm_display = ConfusionMatrixDisplay(cm).plot()
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictKNN))
# -
# # Support Vector Machine
#Support Vector Classifier
from sklearn.svm import SVC
modelSVC = SVC(probability= True)
modelSVC.fit(X_train_normalized, np.ravel(y_train,order='C'))
predictSVC=modelSVC.predict(X_test_normalized)
# +
##evaluation of metrices
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
cm = confusion_matrix(y_test,predictSVC)
cm_display = ConfusionMatrixDisplay(cm).plot()
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictSVC))
# -
# Linear Support Vector Classifier
from sklearn.svm import LinearSVC
modelLSVC = LinearSVC()
modelLSVC.fit(X_train_normalized, np.ravel(y_train,order='C'))
predictLSVC=modelLSVC.predict(X_test_normalized)
# +
##evaluation of metrices
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
cm = confusion_matrix(y_test,predictLSVC)
cm_display = ConfusionMatrixDisplay(cm).plot()
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictLSVC))
# -
# # Multi-Layer Perceptron
from sklearn.neural_network import MLPClassifier
modelMLP = MLPClassifier()
modelMLP.fit(X_train_normalized, np.ravel(y_train,order='C'))
predictMLP=modelMLP.predict(X_test_normalized)
# +
##evaluation of metrices
#Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
cm = confusion_matrix(y_test,predictMLP)
cm_display = ConfusionMatrixDisplay(cm).plot()
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, predictMLP))
# -
# # ROC and AUC
# +
r_probs =[0 for _ in range(len(y_test_normalized))]
dt_probs=modelDT.predict_proba(X_test)
rf_probs=modelRFC.predict_proba(X_test)
nb_probs=modelGNB.predict_proba(X_test)
ada_probs=modelAda.predict_proba(X_test)
knn_probs=modelKNN.predict_proba(X_test_normalized)
svm_probs=modelSVC.predict_proba(X_test_normalized)
lsvm_probs=modelLSVC._predict_proba_lr(X_test_normalized)
mlp_probs=modelMLP.predict_proba(X_test_normalized)
dt_probs=dt_probs[:,1]
rf_probs=rf_probs[:, 1]
nb_probs=nb_probs[:, 1]
ada_probs=ada_probs[:, 1]
knn_probs=knn_probs[:, 1]
svm_probs=svm_probs[:, 1]
lsvm_probs=lsvm_probs[:, 1]
mlp_probs=mlp_probs[:, 1]
from sklearn.metrics import roc_curve, roc_auc_score
r_auc=roc_auc_score(y_test,r_probs)
dt_auc= roc_auc_score(y_test,dt_probs)
rf_auc= roc_auc_score(y_test,rf_probs)
nb_auc= roc_auc_score(y_test,nb_probs)
ada_auc= roc_auc_score(y_test,ada_probs)
knn_auc= roc_auc_score(y_test,knn_probs)
svm_auc= roc_auc_score(y_test,svm_probs)
lsvm_auc= roc_auc_score(y_test,lsvm_probs)
mlp_auc= roc_auc_score(y_test,mlp_probs)
r_fpr,r_tpr, _ = roc_curve(y_test_normalized,r_probs)
dt_fpr, dt_tpr, _ = roc_curve(y_test_normalized,dt_probs)
rf_fpr, rf_tpr, _= roc_curve(y_test_normalized,rf_probs)
nb_fpr, nb_tpr, _= roc_curve(y_test_normalized,nb_probs)
ada_fpr, ada_tpr, _= roc_curve(y_test_normalized,ada_probs)
knn_fpr, knn_tpr, _= roc_curve(y_test_normalized,knn_probs)
svm_fpr, svm_tpr, _= roc_curve(y_test_normalized,svm_probs)
lsvm_fpr, lsvm_tpr, _= roc_curve(y_test_normalized,lsvm_probs)
mlp_fpr, mlp_tpr, _= roc_curve(y_test_normalized,mlp_probs)
import matplotlib.pyplot as plt
plt.plot(r_fpr, r_tpr, linestyle='--', label='Random prediction (AUROC = %0.3f)' % r_auc)
plt.plot(dt_fpr, dt_tpr, marker='.', label='Decision Tree (AUROC = %0.3f)' % dt_auc)
plt.plot(rf_fpr, rf_tpr, marker='.', label='Random Forest (AUROC = %0.3f)' % rf_auc)
plt.plot(nb_fpr, nb_tpr, marker='.', label='Naive Bayes (AUROC = %0.3f)' % nb_auc)
plt.plot(ada_fpr, ada_tpr, marker='.', label='AdaBoost (AUROC = %0.3f)' % ada_auc)
plt.plot(knn_fpr, knn_tpr, marker='.', label='kNN (AUROC = %0.3f)' % knn_auc)
plt.plot(svm_fpr, svm_tpr, marker='.', label='SVM (AUROC = %0.3f)' % svm_auc)
plt.plot(lsvm_fpr, lsvm_tpr, marker='.', label='LSVM (AUROC = %0.3f)' % lsvm_auc)
plt.plot(mlp_fpr, mlp_tpr, marker='.', label='MLP (AUROC = %0.3f)' % mlp_auc)
# Title
plt.title('ROC Plot for haberman Data Set')
# Axis labels
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# Show legend
plt.legend() #
# Show plot
plt.show()
| Codes/haberman.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
labels = ['power', 'speed', 'range', 'price', 'acceleration']
tesla_model3_performance = [450, 155,348, 98.940, 3.8] #price is thousands dollars
tesla_modelY_performance = [456, 155,303, 56.940, 3.5]
x = np.arange(len(labels))
width = 0.4
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, tesla_model3_performance, width, label='Tesla 3')
rects2 = ax.bar(x + width/2, tesla_modelY_performance, width, label='Tesla Y')
font1 = {'family':'serif','color':'green','size':20}
font2 = {'family':'serif','color':'blue','size':15}
ax.set_ylabel('number', font1)
ax.set_title('Comparing Tesla 3 and Tesla Y',font2)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
plt.grid(color = 'blue', linestyle = '-', linewidth = 2)
fig.tight_layout()
plt.show()
# -
| comparing tesla 3 and tesla y.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://githubtocolab.com/giswqs/gee-tutorials/blob/master/Image/image_styling.ipynb)
# +
# # !pip install geemap
# -
import ee
import geemap
import geemap.colormaps as cm
# +
# geemap.update_package()
# -
cm.palettes.dem
cm.palettes.ndvi
cm.palettes.ndwi
cm.get_palette('terrain', n_class=8)
cm.plot_colormap('terrain', width=8.0, height=0.4, orientation='horizontal')
cm.list_colormaps()
cm.plot_colormaps(width=12, height=0.4)
# +
Map = geemap.Map()
palette = cm.palettes.dem
# palette = cm.palettes.terrain
dem = ee.Image('USGS/SRTMGL1_003')
vis_params = {
'min': 0,
'max': 4000,
'palette': palette}
Map.addLayer(dem, vis_params, 'SRTM DEM')
Map.add_colorbar(vis_params, label="Elevation (m)", layer_name="SRTM DEM")
Map
# -
Map.add_colorbar(vis_params, label="Elevation (m)", orientation="vertical", layer_name="SRTM DEM")
Map.add_colorbar(vis_params, label="Elevation (m)", orientation="vertical", layer_name="SRTM DEM", transparent_bg=True)
Map.add_colorbar(vis_params, discrete=True, label="Elevation (m)", orientation="vertical", layer_name="SRTM DEM")
| Image/image_styling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Nearest Neighbors
# [sklearn.neighbors](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.neighbors) provides functionality for unsupervised and supervised neighbors-based learning methods. Supervised neighbors-based learning comes in two flavors: classification for data with discrete labels, and regression for data with continuous labels.
#
# The principle behind nearest neighbor methods is to find a predefined number of training samples closest in distance to the new point, and predict the label from these. The number of samples can be a user-defined constant (k-nearest neighbor learning), or vary based on the local density of points (radius-based neighbor learning). The distance can, in general, be any metric measure: standard Euclidean distance is the most common choice. Neighbors-based methods are known as *non-generalizing* machine learning methods, since they simply “remember” all of its training data (possibly transformed into a fast indexing structure such as a Ball Tree or KD Tree.).
#
# Despite its simplicity, nearest neighbors has been successful in a large number of classification and regression problems, including handwritten digits or satellite image scenes. Being a non-parametric method, it is often successful in classification situations where the decision boundary is very irregular.
#
# ## Nearest Neighbors Classification
# Neighbors-based classification is a type of *instance-based learning* or *non-generalizing learning*: it does not attempt to construct a general internal model, but simply stores instances of the training data. Classification is computed from a simple majority vote of the nearest neighbors of each point: a query point is assigned the data class which has the most representatives within the nearest neighbors of the point.
#
# scikit-learn implements two different nearest neighbors classifiers: [KNeighborsClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier) implements learning based on the k nearest neighbors of each query point, where k is an integer value specified by the user. [RadiusNeighborsClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsClassifier.html#sklearn.neighbors.RadiusNeighborsClassifier) implements learning based on the number of neighbors within a fixed radius r of each training point, where r is a floating-point value specified by the user.
#
# The k-neighbors classification in **KNeighborsClassifier** is the more commonly used of the two techniques. The optimal choice of the value k is highly data-dependent: in general a larger k suppresses the effects of noise, but makes the classification boundaries less distinct.
#
# ## Advantages of k-Nearest Neighbors
# * Easy to understand
# * Fastest learning algorithm
# * Building this model only consists of storing the training set
# * Often gives reasonable performance without a lot of adjustments
#
# ## Disadvantages of k-Nearest Neighbors
# * When your training set is very large (either in number of features or in number of samples) prediction can be slow
# * Important to preprocess your data
# * A funamental assumption of kNN is that all dimensions are "equal", so scales should be similar.
# * Does not perform well on datasets with many features
# * Curse of Dimensionality
# * Does particularly badly with datasets where most features are 0 most of the time (so-called *sparse datasets*)
#
# *Disclaimer*: Some of the code in this notebook was lifted from the excellent book [Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do) by <NAME> and <NAME>.
# + hide_input=false
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ### A First Application: Classifying iris species
# One of the most famous datasets for classification in a supervised learning setting is the [Iris flower data set](https://en.wikipedia.org/wiki/Iris_flower_data_set). It is a multivariate dataset introduced in a 1936 paper which records sepal length, sepal width, petal length, and petal width for three species of iris.
#
# scikit-learn has a number of small toy datasets included with it which makes it quick and easy to experiment with different machine learning algorithms on these datasets.
#
# The [sklearn.datasets.load_iris()](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_iris.html#sklearn.datasets.load_iris) method can be used to load the iris dataset.
#
# #### Meet the data
# The *iris* object that is returned by **load_iris** is a **Bunch** object, which is very similar to a dictionary. It contains keys and values.
from sklearn.datasets import load_iris
iris_dataset = load_iris()
print("Keys of iris_dataset: {}".format(iris_dataset.keys()))
# The value of the key DESCR is a short description of the dataset. Here we show the beinning of the description.
print(iris_dataset['DESCR'][:193] + "\n...")
# The value of the key target_names is an array of strings, containing the species of flower that we want to predict
print("Target names: {}".format(iris_dataset['target_names']))
# The value of feature_names is a list of strings, giving the description of each feature
print("Feature names: {}".format(iris_dataset['feature_names']))
# The data itself is contained in the target and data fields.
# data contains the numeric measurements of sepal length, sepal width, petal length, and petal width in a NumPy array
print("Type of data: {}".format(type(iris_dataset['data'])))
# The rows in the data array correspond to flowers, while the columns represent the four measurements for each flower.
print("Shape of data: {}".format(iris_dataset['data'].shape))
# We see that the array contains measurements for 150 different flowers (samples). Here are values for the first 5.
print("First five columns of data:\n{}".format(iris_dataset['data'][:5]))
# The target array contains the species of each of the flowers that were measured, also as a NumPy array
print("Type of target: {}".format(type(iris_dataset['target'])))
# target is a one-dimensional array, with one entry per flower
print("Shape of target: {}".format(iris_dataset['target'].shape))
# The species are encoded as integers from 0 to 2. The meanings of the numbers are given by the target_names key.
print("Target:\n{}".format(iris_dataset['target']))
# #### Measuring Success: Training and testing data
# We want to build a machine learning model from this data that can predict the species of iris for a new set of measurements. But before we can apply our model to new measurements, we need to know whether it actually works -- that is, whether we should trust its predictions.
#
# Unfortunately, we cannot use the data we used to build the model to evaluate it. This is because our model can always simply remember the whole training set, and will therefore always predict the correct label for any point in the training set. This "remembering" does not indicate to us whether the model will *generalize* well (in other words, whether it will also perform well on new data).
#
# To assess the model's performance, we show it new data (data that it hasn't seen before) for which we have labels. This is usually done by splitting the labeled data we have collected (here, our 150 flower measurements) into two parts. One part of the data is used to build our machine learning model, and is called the *training data* or *training set*. The rest of the data will be used to assess how well the model works; this is called the *test data*, *test set*, or *hold-out set*.
#
# scikit-learn contains a function that shuffles the dataset and splits it for you: the [train_test_split](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function. This function extracts 75% of the rows in the data as the training set, together with the corresponding labels for this data. The remaining 25% of the data, together with the remaining labels, is declared as the test set. Deciding how much data you want to put into the training and the test set respectively is somewhat arbitrary, but scikit-learn's default 75/25 split is a reasonable starting point.
#
# In scikit-learn, data is usually denoted with a capital X, while labels are denoted by a lowercase y. This is inspired by the standard formulation *f(x)=y* in mathematics, where *x* is the input to a function and *y* is the output. Following more conventions from mathematics, we use a capital *X* because the data is a two-dimensional array (a matrix) and a lowercase *y* because the target is a one-dimensional array (a vector).
#
# Before making the split, the **train_test_split** function shuffles the dataset using a pseudorandom number generator. If we just took the last 25% of the data as a test set, all the data points would have the label 2, as the data points are sorted by the label.
#
# To make sure this example code will always get the same output if run multiple times, we provide the pseudorandom number generator with a fixed seed using the **random_state** parameter.
#
# The output of the **train_test_split** function is **X_train**, **X_test**, **y_train**, and **y_test**, which are all NumPy arrays. **X_train** contains 75% of the rows of the dataset, and **X_test** contains the remaining 25%.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris_dataset['data'], iris_dataset['target'], random_state=0)
print("X_train shape: {}".format(X_train.shape))
print("y_train shape: {}".format(y_train.shape))
print("X_test shape: {}".format(X_test.shape))
print("y_test shape: {}".format(y_test.shape))
# #### First things first: Look at your data
# Before building a machine learning model, it is often a good idea to inspect the data, to see if the task is easily solvable without machine learning, or if the desired information might not be contained in the data.
#
# Additionally, inspecting the data is a good way to find abnormalities and peculiarities. Maybe some of your irises were measured using inches and not centimeters, for example. In the real world, inconsistencies in the data and unexpected measurements are very common, as are missing data and not-a-number (NaN) or infinite values.
#
# One of the best ways to inspect data is to visualize it. One way to do this is by using a *scatter plot*. A scatter plot of the data puts one feature along the x-axis and another along the y-axis, and draws a dot for each data point. Unfortunately, computer screens have only two dimensions, which allows us to plot only two (or maybe three) features at a time. It is difficult to plot datasets with more than three features this way. One way around this problem is to do a *pair plot*, which looks at all possible pairs of features. If you have a small number of features, such as the four we have here, this is quite reasonable. You should keep in mind, however, that a pair plot does not show the interaction of all of the features at once, so some interesting aspects of the data may not be revealed when visualizing it this way.
#
# In Python, the *pandas* library has a convenient function called [scatter_matrix](http://pandas.pydata.org/pandas-docs/version/0.18.1/visualization.html#scatter-matrix-plot) for creating pair plots for a DataFrame.
# create dataframe from data in X_train
# label the columns using the strings in iris_dataset.feature_names
iris_dataframe = pd.DataFrame(X_train, columns=iris_dataset.feature_names)
# create a scatter matrix from the dataframe, color by y_train
grr = pd.scatter_matrix(iris_dataframe, c=y_train, figsize=(15, 15), marker='o',
hist_kwds={'bins': 20}, s=60, alpha=.8)
# From the plots, we can see that the three classes seem to be relatively well separated using the sepal and petal measurements. This means that a machine learning model will likely be able to learn to separate them quite well.
# #### Building your model: k-Nearest Neighbors
# Now we can start building the actual machine learning model. There are many classification algorithms in *scikit-learn* that we could use. Here we will use a k-nearest neighbors classifier, which is easy to understand.
#
# The k in k-nearest neighbors signifies that instead of using only the closest neighbor to the new data point, we can consider any fixed number k of neighbors in the training (for example, the closest three or five neighbors). Then, we can make a prediction using the majority class among these neighbors. For starters, we’ll use only a single neighbor.
#
# All machine learning models in *scikit-learn* are implemented in their own classes, which are called *Estimator* classes. The k-nearest neighbors classification algorithm is implemented in the [KNeighborsClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html#sklearn.neighbors.KNeighborsClassifier) class in the *neighbors* module. Before we can use the model, we need to instantiate the class into an object. This is when we will set any parameters of the model. The most important parameter of KNeighborsClassifier is **n_neighbors **, the number of neighbors, which we will set to 1. The default value for **n_neighbors** is 5.
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
# The *knn* object encapsulates the algorithm that will be used to build the model from the training data, as well the algorithm to make predictions on new data points. It will also hold the information that the algorithm has extracted from the training data. In the case of **KNeighborsClassifier**, it will just store the training set.
#
# To build the model on the training set, we call the **fit** method of the *tree* object, which takes as arguments the NumPy array *X_train* containing the training data and the NumPy array *y_train* of the corresponding training labels.
knn.fit(X_train, y_train)
# #### Making predictions
# We can now make predictions using this model on new data for which we might not know the correct labels. Image we found an iris in the wild with a sepal length of 5 cm, a sepal width of 2.9 cm, a petal length of 1 cm, and a petal width of 0.2cm. What species of iris would this be? We can put this data into a NumPy array, which in this case will be of shape 1 x 4 (1 row/sample x 4 features).
#
# Note: Even though we made the measurements of this single flower, *scikit-learn* always expects two-dimensional arrays for the data.
#
# To make a prediction, we call the **predict** method of the tree object.
X_new = np.array([[5, 2.9, 1, 0.2]])
print("X_new.shape: {}".format(X_new.shape))
prediction = knn.predict(X_new)
print("Prediction: {}".format(prediction))
print("Predicted target name: {}".format(iris_dataset['target_names'][prediction]))
# #### Evaluating the model
# How do we know whether we can trust our model? This is where the test set that we created earlier comes in. This data was not used to build the model, but we do know what the correct speciies is for each iris in the test set.
#
# Therefore, we can make a prediction for each iris in the test data and compare it against its lable (the known species). We can measure how well the model works by computing the *accuracy*, which is the fraction of flowers for which the right species was predicted.
#
# We can also use the **score** method of the tree object, which will compute the test set accuracy for us.
y_pred = knn.predict(X_test)
print("Test set predictions:\n {}".format(y_pred))
print("Test set score: {:.2f}".format(np.mean(y_pred == y_test)))
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
# For this model, the test set accuracy is about 0.97, which means we made the right prediction for 97% of the irises in the test set. Under some mathematical assumptions, this means that we can expect our model to be correct about 97% of the time for new irises.
#
# A more advanced model may be able to do a better job, but with an overlapping dataset like this, it is unlikely that we would ever be able to achieve 100% accuracy.
# #### Summary
# Here is a summary of the code needed for the whole training and evaluation procedure (just 4 lines!).
#
# This snippet contains the core code for applying any machine learning algorithm using *scikit-learn*. The **fit**, **predict**, and **score** methods are the common interface to supervised models in *scikit-learn*.
# +
X_train, X_test, y_train, y_test = train_test_split(iris_dataset['data'], iris_dataset['target'], random_state=0)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print("Test set score: {:.2f}".format(knn.score(X_test, y_test)))
# -
# # Effect of k
# Let's Investigate the effect of varying k. But the iris dataset isn't the ideal dataset for this purpose. So let's create a synthetic dataset which this would be good for.
# +
import numbers
import numpy as np
from sklearn.utils import check_array, check_random_state
from sklearn.utils import shuffle as shuffle_
def make_blobs(n_samples=100, n_features=2, centers=2, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, or tuple, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
if isinstance(n_samples, numbers.Integral):
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
else:
n_samples_per_center = n_samples
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
X, y = shuffle_(X, y, random_state=generator)
return X, y
# -
def make_forge():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
from sklearn.model_selection import train_test_split
X, y = make_forge()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# +
import matplotlib as mpl
from matplotlib.colors import colorConverter
def discrete_scatter(x1, x2, y=None, markers=None, s=10, ax=None,
labels=None, padding=.2, alpha=1, c=None, markeredgewidth=None):
"""Adaption of matplotlib.pyplot.scatter to plot classes or clusters.
Parameters
----------
x1 : nd-array
input data, first axis
x2 : nd-array
input data, second axis
y : nd-array
input data, discrete labels
cmap : colormap
Colormap to use.
markers : list of string
List of markers to use, or None (which defaults to 'o').
s : int or float
Size of the marker
padding : float
Fraction of the dataset range to use for padding the axes.
alpha : float
Alpha value for all points.
"""
if ax is None:
ax = plt.gca()
if y is None:
y = np.zeros(len(x1))
unique_y = np.unique(y)
if markers is None:
markers = ['o', '^', 'v', 'D', 's', '*', 'p', 'h', 'H', '8', '<', '>'] * 10
if len(markers) == 1:
markers = markers * len(unique_y)
if labels is None:
labels = unique_y
# lines in the matplotlib sense, not actual lines
lines = []
current_cycler = mpl.rcParams['axes.prop_cycle']
for i, (yy, cycle) in enumerate(zip(unique_y, current_cycler())):
mask = y == yy
# if c is none, use color cycle
if c is None:
color = cycle['color']
elif len(c) > 1:
color = c[i]
else:
color = c
# use light edge for dark markers
if np.mean(colorConverter.to_rgb(color)) < .4:
markeredgecolor = "grey"
else:
markeredgecolor = "black"
lines.append(ax.plot(x1[mask], x2[mask], markers[i], markersize=s,
label=labels[i], alpha=alpha, c=color,
markeredgewidth=markeredgewidth,
markeredgecolor=markeredgecolor)[0])
if padding != 0:
pad1 = x1.std() * padding
pad2 = x2.std() * padding
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.set_xlim(min(x1.min() - pad1, xlim[0]), max(x1.max() + pad1, xlim[1]))
ax.set_ylim(min(x2.min() - pad2, ylim[0]), max(x2.max() + pad2, ylim[1]))
return lines
# -
plt.figure(figsize=(10,6))
discrete_scatter(X[:, 0], X[:, 1], y)
plt.legend(["training class 0", "training class 1"])
# +
from sklearn.metrics import euclidean_distances
def plot_knn_classification(n_neighbors=1):
X, y = make_forge()
X_test = np.array([[8.2, 3.66214339], [9.9, 3.2], [11.2, .5]])
dist = euclidean_distances(X, X_test)
closest = np.argsort(dist, axis=0)
for x, neighbors in zip(X_test, closest.T):
for neighbor in neighbors[:n_neighbors]:
plt.arrow(x[0], x[1], X[neighbor, 0] - x[0],
X[neighbor, 1] - x[1], head_width=0, fc='k', ec='k')
clf = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)
test_points = discrete_scatter(X_test[:, 0], X_test[:, 1], clf.predict(X_test), markers="*")
training_points = discrete_scatter(X[:, 0], X[:, 1], y)
plt.legend(training_points + test_points, ["training class 0", "training class 1",
"test pred 0", "test pred 1"])
# -
# k = 1
plot_knn_classification(n_neighbors=1)
# Here, we added three new data points, shown as stars. For each of them, we marked the closest point in the training set. The prediction of the one-nearest-neighbor algorithm is the label of that point (shown by the color of the cross).
#
# Instead of considering only the closest neighbor, we can also consider an arbitrary number, k, of neighbors. This is where the name of the k-nearest neighbors algorithm comes from. When considering more than one neighbor, we use voting to assign a label. This means that for each test point, we count how many neighbors belong to class 0 and how many neighbors belong to class 1. We then assign the class that is more frequent: in other words, the majority class among the k-nearest neighbors. The following example uses the three closest neighbors.
# k = 3
plot_knn_classification(n_neighbors=3)
# Again, the prediction is shown as the color of the cross. You can see that the prediction for the new data point at the top left is not the same as the prediction when we used only one neighbor.
#
# While this illustration is for a binary classification problem, this method can be applied to datasets with any number of classes. For more classes, we count how many neighbors belong to each class and again predict the most common class.
# ### Analyzing Decision Boundary as k varies
# For two-dimensional datasets, we can also illustrate the prediction for all possible test points in the xy-plane. We color the plane according to the class that would be assigned to a point in this region. This lets us view the *decision boundary*, which is the divide between where the algorithm assigns class 0 versus where it assigns class 1. The following code produces the visualizations of the decision boundaries for one, three, and nine neighbors.
# +
from matplotlib.colors import ListedColormap
cm2 = ListedColormap(['#0000aa', '#ff2020'])
def plot_2d_separator(classifier, X, fill=False, ax=None, eps=None, alpha=1,
cm=cm2, linewidth=None, threshold=None, linestyle="solid"):
# # binary?
if eps is None:
eps = X.std() / 2.
if ax is None:
ax = plt.gca()
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 100)
yy = np.linspace(y_min, y_max, 100)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
try:
decision_values = classifier.decision_function(X_grid)
levels = [0] if threshold is None else [threshold]
fill_levels = [decision_values.min()] + levels + [decision_values.max()]
except AttributeError:
# no decision_function
decision_values = classifier.predict_proba(X_grid)[:, 1]
levels = [.5] if threshold is None else [threshold]
fill_levels = [0] + levels + [1]
if fill:
ax.contourf(X1, X2, decision_values.reshape(X1.shape),
levels=fill_levels, alpha=alpha, cmap=cm)
else:
ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels,
colors="black", alpha=alpha, linewidths=linewidth,
linestyles=linestyle, zorder=5)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
# +
fig, axes = plt.subplots(1, 3, figsize=(10, 3))
for n_neighbors, ax in zip([1, 3, 9], axes):
# the fit method returns the object self, so we can instantiate
# and fit in one line
clf = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)
plot_2d_separator(clf, X, fill=True, eps=0.5, ax=ax, alpha=.4)
discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
ax.set_title("{} neighbor(s)".format(n_neighbors))
ax.set_xlabel("feature 0")
ax.set_ylabel("feature 1")
axes[0].legend(loc=3)
# -
# As you can see on the left in the figure, using a single neighbor results in a decision boundary that follows the training data closely. Considering more and more neighbors leads to a smoother decision boundary. A smoother boundary corresponds to a simpler model. In other words, using few neighbors corresponds to high model complexity, and using many neighbors corresponds to low model complexity. If you consider the extreme case where the number of neighbors is the number of all data points in the training set, each test point would have exactly the same neighbors (all training points) and all predictions would be the same: the class that is most frequent in the training set.
#
# Let’s investigate whether we can confirm the connection between model complexity and generalization. We will do this on the real-world Breast Cancer dataset. We begin by splitting the dataset into a training and a test set. Then we evaluate training and test set performance with different numbers of neighbors.
# +
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, random_state=66)
training_accuracy = []
test_accuracy = []
# try n_neighbors from 1 to 10
neighbors_settings = range(1, 11)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors=n_neighbors)
clf.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(clf.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(clf.score(X_test, y_test))
plt.figure(figsize=(10,6))
plt.plot(neighbors_settings, training_accuracy, label="training accuracy")
plt.plot(neighbors_settings, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
# -
# The plot shows the training and test set accuracy on the y-axis against the setting of n_neighbors on the x-axis. While real-world plots are rarely very smooth, we can still recognize some of the characteristics of overfitting and underfitting (note that because considering fewer neighbors corresponds to a more complex model, the plot is horizontally flipped relative to what is conventionally seen. Considering a single nearest neighbor, the prediction on the training set is perfect. But when more neighbors are considered, the model becomes simpler and the training accuracy drops. The test set accuracy for using a single neighbor is lower than when using more neighbors, indicating that using the single nearest neighbor leads to a model that is too complex. On the other hand, when considering 10 neighbors, the model is too simple and performance is even worse. The best performance is somewhere in the middle, using around six neighbors. Still, it is good to keep the scale of the plot in mind. The worst performance is around 88% accuracy, which might still be acceptable.
# # Strengths, weaknesses, and parameters of kNN
# In principle, there are two important parameters to the KNeighbors classifier: the number of neighbors and how you measure distance between data points. In practice, using a small number of neighbors like three or five often works well, but you should certainly adjust this parameter. Choosing the right distance measure is complicated and in the real world would likely require domain knowledge. By default, Euclidean distance is used, which works well in many settings.
#
# One of the strengths of k-NN is that the model is very easy to understand, and often gives reasonable performance without a lot of adjustments. Using this algorithm is a good baseline method to try before considering more advanced techniques. Building the nearest neighbors model is usually very fast, but when your training set is very large (either in number of features or in number of samples) prediction can be slow. When using the k-NN algorithm, it’s important to preprocess your data (particularly scaling it using either StandardScaler or MinMaxScaler). This approach often does not perform well on datasets with many features (hundreds or more), and it does particularly badly with datasets where most features are 0 most of the time (so-called sparse datasets).
#
# So, while the nearest k-neighbors algorithm is easy to understand, it is not often used in practice, due to prediction being slow and its inability to handle many features.
| SL4_Instance_Based_Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from keras.callbacks import History, ReduceLROnPlateau,EarlyStopping,ModelCheckpoint
import os
import numpy as np
from data_analysis import calculate_metrics, load_weights_and_evaluate
from model_builders import GCN_pretraining
from hyperparameter_tuning_GCN import objective
from functools import partial
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
import pickle
import dill
from hyper_mining import objective_fn
import keras
import keras.backend as K
from keras.layers import Dense, Dropout, Input, Lambda, concatenate,Flatten
from keras.models import Model, load_model
from hyper_mining import XGB_predictor
from data_analysis import calculate_metrics
from distance_and_mask_fn import pairwise_distance,masked_maximum,masked_minimum
import seaborn as sns
from NGF.preprocessing import tensorise_smiles
from custom_layers.model_creator import encode_smiles, stage_creator
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import dtypes
# +
class GCN_online_mining(object):
def __init__(self, model_params):
self.model_params = model_params
def build_encoder(self):
model_enc_1 = stage_creator(self.model_params, 1, conv=True)[0]
model_enc_2 = stage_creator(self.model_params, 2, conv=True)[0]
model_enc_3 = stage_creator(self.model_params, 3, conv=True)[0]
model_enc_fp_1 = stage_creator(self.model_params, 1, conv=False)[1]
model_enc_fp_2 = stage_creator(self.model_params, 2, conv=False)[1]
model_enc_fp_3 = stage_creator(self.model_params, 3, conv=False)[1]
atoms, bonds, edges = encode_smiles(self.model_params["max_atoms"],
self.model_params["num_atom_features"],
self.model_params["max_degree"],
self.model_params["num_bond_features"])
graph_conv_1 = model_enc_1([atoms, bonds, edges])
graph_conv_2 = model_enc_2([graph_conv_1, bonds, edges])
graph_conv_3 = model_enc_3([graph_conv_2, bonds, edges])
fingerprint_1 = model_enc_fp_1([graph_conv_1, bonds, edges])
fingerprint_1 = Lambda(lambda x: K.sum(x, axis=1), output_shape=lambda s: (s[0], s[2]))(fingerprint_1)
fingerprint_2 = model_enc_fp_2([graph_conv_2, bonds, edges])
fingerprint_2 = Lambda(lambda x: K.sum(x, axis=1), output_shape=lambda s: (s[0], s[2]))(fingerprint_2)
fingerprint_3 = model_enc_fp_3([graph_conv_3, bonds, edges])
fingerprint_3 = Lambda(lambda x: K.sum(x, axis=1), output_shape=lambda s: (s[0], s[2]))(fingerprint_3)
final_fingerprint = keras.layers.add([fingerprint_1, fingerprint_2, fingerprint_3])
return Model([atoms, bonds, edges], [final_fingerprint])
def build_model(self, encoder, verbose=False):
atoms = Input(name='atom_inputs',shape=(self.model_params['max_atoms'],
self.model_params['num_atom_features']), dtype='float32')
bonds = Input(name='bond_inputs', shape=(self.model_params['max_atoms'],
self.model_params['max_degree'],
self.model_params['num_bond_features']),dtype='float32')
edges = Input(name='edge_inputs', shape=(self.model_params['max_atoms'],
self.model_params['max_degree']),dtype='int32')
encode_drug = encoder([atoms, bonds, edges])
# Fully connected
FC1 = Dense(self.model_params["dense_size"][0],
activation='relu',kernel_initializer='random_normal')(encode_drug)
FC2 = Dropout(self.model_params["dropout_rate"][0])(FC1)
FC2 = Dense(self.model_params["dense_size"][1],
activation='relu',kernel_initializer='random_normal')(FC2)
FC2 = Dropout(self.model_params["dropout_rate"][1])(FC2)
FC2 = Dense(self.model_params["dense_size"][2],
activation = None,kernel_initializer='random_normal')(FC2)
embeddings = Lambda(lambda x: K.l2_normalize(x,axis=1),name = 'Embeddings')(FC2)
gcn_model = Model(inputs=[atoms, bonds, edges], outputs = embeddings)
if verbose:
print('encoder')
encoder.summary()
print('GCN_model')
return gcn_model
def build_mining(self,gcn_model):
atoms = Input(name='atom_inputs',shape=(self.model_params['max_atoms'],
self.model_params['num_atom_features']), dtype='float32')
bonds = Input(name='bond_inputs', shape=(self.model_params['max_atoms'],
self.model_params['max_degree'],
self.model_params['num_bond_features']),dtype='float32')
edges = Input(name='edge_inputs', shape=(self.model_params['max_atoms'],
self.model_params['max_degree']),dtype='int32')
labels = Input(name = 'labels_inputs',shape = (1,),dtype = 'float32')
encoded = gcn_model([atoms,bonds,edges])
labels_plus_embeddings = concatenate([labels, encoded])
mining_net = Model(inputs = [atoms,bonds,edges,labels],outputs = labels_plus_embeddings)
adam = keras.optimizers.Adam(lr = self.model_params["lr"],
beta_1=0.9,
beta_2=0.999,
decay=0.0,
amsgrad=False)
mining_net.compile(optimizer=adam , loss = triplet_loss_adapted_from_tf)
return mining_net
def dataframe_to_gcn_input(self,input_data):
x_atoms_cold, x_bonds_cold, x_edges_cold = tensorise_smiles(input_data['rdkit'],
max_degree=self.model_params['max_degree'],max_atoms=self.model_params['max_atoms'])
return [x_atoms_cold, x_bonds_cold, x_edges_cold]
def triplet_loss_adapted_from_tf(y_true, y_pred):
del y_true
margin = gcn_params['margin']
labels = y_pred[:, :1]
labels = tf.cast(labels, dtype='int32')
embeddings = y_pred[:, 1:]
### Code from Tensorflow function [tf.contrib.losses.metric_learning.triplet_semihard_loss] starts here:
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
# lshape=array_ops.shape(labels)
# assert lshape.shape == 1
# labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=False)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
# global batch_size
batch_size = array_ops.size(labels) # was 'array_ops.size(labels)'
# Compute the mask.
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = math_ops.reduce_sum(mask_positives)
semi_hard_triplet_loss_distance = math_ops.truediv(
math_ops.reduce_sum(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0)),
num_positives,
name='triplet_semihard_loss')
### Code from Tensorflow function semi-hard triplet loss ENDS here.
return semi_hard_triplet_loss_distance
# -
# # Model Parameters
es = EarlyStopping(monitor='loss',patience=8, min_delta=0)
rlr = ReduceLROnPlateau(monitor='loss',factor=0.5, patience=4, verbose=1, min_lr=0.0000001)
gcn_params = {
"num_layers" : 3,
"max_atoms" : 70,
"num_atom_features" : 62,
"num_atom_features_original" : 62,
"num_bond_features" : 6,
"max_degree" : 5,
"conv_width" : [int(88), int(96), int(160)],
"fp_length" : [int(160), int(160), int(160)],
"activ_enc" : "selu",
"activ_dec" : "selu",
"learning_rates" : [0.001,0.001,0.001],
"learning_rates_fp": [0.005,0.005,0.005],
"losses_conv" : {
"neighbor_output": "mean_squared_error",
"self_output": "mean_squared_error",
},
"lossWeights" : {"neighbor_output": 1.0, "self_output": 1.0},
"metrics" : "mse",
"loss_fp" : "mean_squared_error",
"enc_layer_names" : ["enc_1", "enc_2", "enc_3"],
'callbacks' : [es,rlr],
'adam_decay': 0.0005329142291371636,
'beta': 5,
'p': 0.004465204118126482,
'dense_size' : [int(96), int(416), int(320)],
'dropout_rate' : [0.45547284530546817, 0.45547284530546817],
'lr' : 0.0019499018534396453,
'batch_size' : int(96),
'n_epochs' : int(60),
'margin' : 0.6011401246738063
}
xgb_params = {
"colsample_bylevel" : 0.19610957495328543,
"colsample_bytree" : 0.9504328963958085,
"gamma" : 0.66484093460,
"eta" : 0.9073217403165388,
"max_delta_step" : int(1),
"max_depth" : int(8),
"min_child_weight" : int(355),
"alpha" : 56.190739151936484,
"lambda" : 52.450354051682496,
"subsample" : 0.8316785844601918,
"max_bin" : int(208),
"eval_metric":'auc',
"objective":'binary:logistic',
"booster":'gbtree',
"single_precision_histogram" : True
}
class_XGB = XGB_predictor(xgb_params)
class_GCN = GCN_online_mining(gcn_params)
# # Hard Splits
# +
train_0 = pd.read_csv("data/p38/split_aveb/fold_0/train_0.csv",index_col=0)
train_1 = pd.read_csv("data/p38/split_aveb/fold_1/train_1.csv",index_col=0)
train_2 = pd.read_csv("data/p38/split_aveb/fold_2/train_2.csv",index_col=0)
train_3 = pd.read_csv("data/p38/split_aveb/fold_3/train_3.csv",index_col=0)
train_4 = pd.read_csv("data/p38/split_aveb/fold_4/train_4.csv",index_col=0)
train_5 = pd.read_csv("data/p38/split_aveb/fold_5/train_5.csv",index_col=0)
train_6 = pd.read_csv("data/p38/split_aveb/fold_6/train_6.csv",index_col=0)
train_set = [train_0,train_1,train_2,train_3,train_4,train_5,train_6]
del train_0,train_1,train_2,train_3,train_4,train_5,train_6
val_0 = pd.read_csv("data/p38/split_aveb/fold_0/val_0.csv",index_col=0)
val_1 = pd.read_csv("data/p38/split_aveb/fold_1/val_1.csv",index_col=0)
val_2 = pd.read_csv("data/p38/split_aveb/fold_2/val_2.csv",index_col=0)
val_3 = pd.read_csv("data/p38/split_aveb/fold_3/val_3.csv",index_col=0)
val_4 = pd.read_csv("data/p38/split_aveb/fold_4/val_4.csv",index_col=0)
val_5 = pd.read_csv("data/p38/split_aveb/fold_5/val_5.csv",index_col=0)
val_6 = pd.read_csv("data/p38/split_aveb/fold_6/val_6.csv",index_col=0)
val_set = [val_0,val_1,val_2,val_3,val_4,val_5,val_6]
del val_0,val_1,val_2,val_3,val_4,val_5,val_6
# -
del val_set , train_set
# # Semi Hard Splits
# +
target = 'p38'
base_path = f'C:/Users/tomas/Documents/GitHub/kinase_binding'
data_fpath = base_path+f'/data/{target}/data.csv'
df=pd.read_csv(data_fpath).set_index('biolab_index')
with open(base_path+f'/data/{target}/train_val_folds.pkl', "rb") as in_f:
train_val_folds = dill.load(in_f)
with open(base_path+f'/data/{target}/train_test_folds.pkl', "rb") as in_f:
train_test_folds = dill.load(in_f)
training_list = [df.loc[train_val_folds[0][0]],
df.loc[train_val_folds[1][0]],
df.loc[train_val_folds[2][0]],
df.loc[train_val_folds[3][0]],
df.loc[train_val_folds[4][0]],
df.loc[train_val_folds[5][0]],
]
validation_list = [df.loc[train_val_folds[0][1]],
df.loc[train_val_folds[1][1]],
df.loc[train_val_folds[2][1]],
df.loc[train_val_folds[3][1]],
df.loc[train_val_folds[4][1]],
df.loc[train_val_folds[5][1]],
]
# -
training_metrics = {}
validation_metrics = {}
for i in range(len(training_list)):
X_atoms_cold,X_bonds_cold,X_edges_cold = class_GCN.dataframe_to_gcn_input(validation_list[i])
Y_cold = validation_list[i].Binary
Y_dummy_cold = np.empty((X_atoms_cold.shape[0],gcn_params['dense_size'][2]+1))
X_atoms_train, X_bonds_train, X_edges_train = class_GCN.dataframe_to_gcn_input(training_list[i])
Y = training_list[i].Binary
Y_dummy_train = np.empty((X_atoms_train.shape[0],gcn_params['dense_size'][2]+1))
gcn_encoder = class_GCN.build_encoder()
gcn_model = class_GCN.build_model(gcn_encoder)
gcn_mining = class_GCN.build_mining(gcn_model)
gcn_mining.fit([X_atoms_train,X_bonds_train,X_edges_train,Y],
Y_dummy_train,
epochs = gcn_params['n_epochs'],
batch_size = gcn_params['batch_size'],
shuffle = True,
validation_data = ([X_atoms_cold,X_bonds_cold,X_edges_cold,Y_cold],Y_dummy_cold)
)
#Predict Embeddings
embeddings_cold = gcn_model.predict([X_atoms_cold,X_bonds_cold,X_edges_cold])
embeddings_train = gcn_model.predict([X_atoms_train, X_bonds_train, X_edges_train])
#Prepare data for XGBoost
dmatrix_train = class_XGB.to_xgb_input(Y,embeddings_train)
dmatrix_cold = class_XGB.to_xgb_input(Y_cold,embeddings_cold)
evalist = [(dmatrix_train,'train'),(dmatrix_cold,'eval')]
xgb_model = class_XGB.build_model(dmatrix_train,evalist,300)
xgb_pred_cold = xgb_model.predict(dmatrix_cold)
validation_metrics['Val_%s'%i] = calculate_metrics(np.array(Y_cold),xgb_pred_cold)
xgb_pred_train = xgb_model.predict(dmatrix_train)
training_metrics['Train_%s'%i] = calculate_metrics(np.array(Y),xgb_pred_train)
training_metrics
| learning/.ipynb_checkpoints/test_online_triplet_loss-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# (handling_errors)=
# # Handling errors
# When an _error_ occurs in Python, an _exception_ is _raised_.
#
# (error_types)=
# ## Error types
# The full list of built-in exceptions is available in the [documentation](https://docs.python.org/3/library/exceptions.html#concrete-exceptions).
#
# For example, when we create a tuple with only four elements and want to print the 5th element, we get this error message:
#
# ```python
# flower_names = ("iris", "poppy", "dandelion", "rose")
#
# print(flower_names[4])
#
# ---------------------------------------------------------------------------
# IndexError Traceback (most recent call last)
# <ipython-input-2-3497b57dd596> in <module>
# 4 "rose")
# 5
# ----> 6 print(flower_names[4])
#
# IndexError: tuple index out of range
# ```
#
# `IndexError` refers to incorrect index, such as out of bounds.
#
# (raising_errors)=
# ## Raising errors
# In-built functions trigger or `raise` errors in Python. We can do that as well by using _raise_:
#
# ```python
# raise ValueError
#
# ---------------------------------------------------------------------------
# ValueError Traceback (most recent call last)
# <ipython-input-5-e4c8e09828d5> in <module>
# ----> 1 raise ValueError
#
# ValueError:
# ```
#
# ## Catching errors
#
# (try_except)=
# ### Try and except blocks
# To handle errors in the code gracefully, we can use `try` and `except` blocks to avoid unnecessary crashes of the program. The `try` block executes the program and if the program fails, exception is raised and we can recover from the error. This is especially useful if we do not want our program to crash (imagine YouTube crashing!). The syntax is:
#
# ```python
# try:
# # some code
# except ExceptionName1:
# # code to be executed if ExceptionName1 is raised
# except ExceptionName2:
# # code to be executed if ExceptionName2 is raised
# …
# except:
# # lines to execute if there was an exception not caught above
# else:
# # lines to execute if there was no exception at all
# ```
#
# `else` is optional, at least one `except` needs to be raised. An example code below:
# +
a = "ten"
# Try to create a number out of a string
try:
float(a)
# Raise an error if characters are not numbers
except ValueError:
print("ValueError.\nPlease use digits in your string.")
# -
# (handling_errors_exercises)=
# ## Exercises
# ------------
# * **Identify** which errors will be thrown in the following code samples (without running it!):
#
# ```python
# s += 1
# ```
#
# ```python
# s = [1,0,1,3,2,4]
# s[9]
# ```
#
# ```python
# "str" + 7
# ```
#
# ```python
# [0]*(2**5000)
# ```
# ```{admonition} Answer
# :class: dropdown
#
# 1) NameError
#
# 2) IndexError
#
# 3) TypeError
#
# 4) OverflowError
# ```
# _________________
# * **Types are gone!** Say we have a list:
#
# ```python
# l = [1, True, 4, "blob", "boom", 5, 6, print]
# ```
#
# Compute the sum of the numerical elements of the list (do not use the `type()` function!). Is the sum what you expected? Do you notice anything interesting regarding the boolean constants?
#
# ```{margin}
# You can look up here more on flow control statemnts, such as {ref}`pass statement <pass>`.
# ```
#
# **HINT**: Consider using a `pass` statement in the `except` block which circumvents (*ignores*) the raised error and continues the execution.
# ````{admonition} Answer
# :class: dropdown
#
# ```python
# l = [1, True, 4, "blob", "boom", 5, 6, print]
#
# res = 0
# for element in l:
# try:
# res += element
# except TypeError:
# pass
#
# print(res)
# ```
#
# ````
| notebooks/b_coding/Intro to Python/9_Handling_errors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
#
# *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
#
# *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
# <!--NAVIGATION-->
# < [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb) | [Contents](Index.ipynb) | [Computation on Arrays: Broadcasting](02.05-Computation-on-arrays-broadcasting.ipynb) >
#
# <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.04-Computation-on-arrays-aggregates.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
#
# # Aggregations: Min, Max, and Everything In Between
# Often when faced with a large amount of data, a first step is to compute summary statistics for the data in question.
# Perhaps the most common summary statistics are the mean and standard deviation, which allow you to summarize the "typical" values in a dataset, but other aggregates are useful as well (the sum, product, median, minimum and maximum, quantiles, etc.).
#
# NumPy has fast built-in aggregation functions for working on arrays; we'll discuss and demonstrate some of them here.
# ## Summing the Values in an Array
#
# As a quick example, consider computing the sum of all values in an array.
# Python itself can do this using the built-in ``sum`` function:
import numpy as np
L = np.random.random(100)
sum(L)
# The syntax is quite similar to that of NumPy's ``sum`` function, and the result is the same in the simplest case:
np.sum(L)
# However, because it executes the operation in compiled code, NumPy's version of the operation is computed much more quickly:
big_array = np.random.rand(1000000)
# %timeit sum(big_array)
# %timeit np.sum(big_array)
# Be careful, though: the ``sum`` function and the ``np.sum`` function are not identical, which can sometimes lead to confusion!
# In particular, their optional arguments have different meanings, and ``np.sum`` is aware of multiple array dimensions, as we will see in the following section.
# ## Minimum and Maximum
#
# Similarly, Python has built-in ``min`` and ``max`` functions, used to find the minimum value and maximum value of any given array:
min(big_array), max(big_array)
# NumPy's corresponding functions have similar syntax, and again operate much more quickly:
np.min(big_array), np.max(big_array)
# %timeit min(big_array)
# %timeit np.min(big_array)
# For ``min``, ``max``, ``sum``, and several other NumPy aggregates, a shorter syntax is to use methods of the array object itself:
print(big_array.min(), big_array.max(), big_array.sum())
# Whenever possible, make sure that you are using the NumPy version of these aggregates when operating on NumPy arrays!
# ### Multi dimensional aggregates
#
# One common type of aggregation operation is an aggregate along a row or column.
# Say you have some data stored in a two-dimensional array:
M = np.random.random((3, 4))
print(M)
# By default, each NumPy aggregation function will return the aggregate over the entire array:
M.sum()
# Aggregation functions take an additional argument specifying the *axis* along which the aggregate is computed. For example, we can find the minimum value within each column by specifying ``axis=0``:
M.min(axis=0)
# The function returns four values, corresponding to the four columns of numbers.
#
# Similarly, we can find the maximum value within each row:
M.max(axis=1)
# The way the axis is specified here can be confusing to users coming from other languages.
# The ``axis`` keyword specifies the *dimension of the array that will be collapsed*, rather than the dimension that will be returned.
# So specifying ``axis=0`` means that the first axis will be collapsed: for two-dimensional arrays, this means that values within each column will be aggregated.
# ### Other aggregation functions
#
# NumPy provides many other aggregation functions, but we won't discuss them in detail here.
# Additionally, most aggregates have a ``NaN``-safe counterpart that computes the result while ignoring missing values, which are marked by the special IEEE floating-point ``NaN`` value (for a fuller discussion of missing data, see [Handling Missing Data](03.04-Missing-Values.ipynb)).
# Some of these ``NaN``-safe functions were not added until NumPy 1.8, so they will not be available in older NumPy versions.
#
# The following table provides a list of useful aggregation functions available in NumPy:
#
# |Function Name | NaN-safe Version | Description |
# |-------------------|---------------------|-----------------------------------------------|
# | ``np.sum`` | ``np.nansum`` | Compute sum of elements |
# | ``np.prod`` | ``np.nanprod`` | Compute product of elements |
# | ``np.mean`` | ``np.nanmean`` | Compute mean of elements |
# | ``np.std`` | ``np.nanstd`` | Compute standard deviation |
# | ``np.var`` | ``np.nanvar`` | Compute variance |
# | ``np.min`` | ``np.nanmin`` | Find minimum value |
# | ``np.max`` | ``np.nanmax`` | Find maximum value |
# | ``np.argmin`` | ``np.nanargmin`` | Find index of minimum value |
# | ``np.argmax`` | ``np.nanargmax`` | Find index of maximum value |
# | ``np.median`` | ``np.nanmedian`` | Compute median of elements |
# | ``np.percentile`` | ``np.nanpercentile``| Compute rank-based statistics of elements |
# | ``np.any`` | N/A | Evaluate whether any elements are true |
# | ``np.all`` | N/A | Evaluate whether all elements are true |
#
# We will see these aggregates often throughout the rest of the book.
# ## Example: What is the Average Height of US Presidents?
# Aggregates available in NumPy can be extremely useful for summarizing a set of values.
# As a simple example, let's consider the heights of all US presidents.
# This data is available in the file *president_heights.csv*, which is a simple comma-separated list of labels and values:
# !head -4 data/president_heights.csv
# We'll use the Pandas package, which we'll explore more fully in [Chapter 3](03.00-Introduction-to-Pandas.ipynb), to read the file and extract this information (note that the heights are measured in centimeters).
import pandas as pd
data = pd.read_csv('data/president_heights.csv')
heights = np.array(data['height(cm)'])
print(heights)
# Now that we have this data array, we can compute a variety of summary statistics:
print("Mean height: ", heights.mean())
print("Standard deviation:", heights.std())
print("Minimum height: ", heights.min())
print("Maximum height: ", heights.max())
# Note that in each case, the aggregation operation reduced the entire array to a single summarizing value, which gives us information about the distribution of values.
# We may also wish to compute quantiles:
print("25th percentile: ", np.percentile(heights, 25))
print("Median: ", np.median(heights))
print("75th percentile: ", np.percentile(heights, 75))
# We see that the median height of US presidents is 182 cm, or just shy of six feet.
#
# Of course, sometimes it's more useful to see a visual representation of this data, which we can accomplish using tools in Matplotlib (we'll discuss Matplotlib more fully in [Chapter 4](04.00-Introduction-To-Matplotlib.ipynb)). For example, this code generates the following chart:
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn; seaborn.set() # set plot style
plt.hist(heights)
plt.title('Height Distribution of US Presidents')
plt.xlabel('height (cm)')
plt.ylabel('number');
# These aggregates are some of the fundamental pieces of exploratory data analysis that we'll explore in more depth in later chapters of the book.
# <!--NAVIGATION-->
# < [Computation on NumPy Arrays: Universal Functions](02.03-Computation-on-arrays-ufuncs.ipynb) | [Contents](Index.ipynb) | [Computation on Arrays: Broadcasting](02.05-Computation-on-arrays-broadcasting.ipynb) >
#
# <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/02.04-Computation-on-arrays-aggregates.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
#
| vanderplas/PythonDataScienceHandbook-master/notebooks/02.04-Computation-on-arrays-aggregates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python for Bioinformatics
# Source: <NAME> [GitHub](https://github.com/tiagoantao), Book: [Bioinformatics with Python Cookbook Second Edition, published by Packt](https://github.com/PacktPublishing/Bioinformatics-with-Python-Cookbook-Second-Edition)
# ## Datasets
# - __NOTE__: You only need this if you do _not_ use the notebooks (as the notebooks will take care of the data
# - [Click here for the datasets used in the book](Datasets.ipynb)
# ## Python and the surrounding software ecology
# 
#
# * [Interfacing with R](00_Intro/Interfacing_R.ipynb)
# * [R Magic](00_Intro/R_magic.ipynb)
# ## Next Generation Sequencing
# 
#
# * [Accessing Databases](01_NGS/Accessing_Databases.ipynb)
# * [Basic Sequence Processing](01_NGS/Basic_Sequence_Processing.ipynb)
# * [Working with FASTQ files](01_NGS/Working_with_FASTQ.ipynb)
# * [Working with BAM files](01_NGS/Working_with_BAM.ipynb)
# * [Working with VCF files](01_NGS/Working_with_VCF.ipynb)
# * [Filtering SNPs](01_NGS/Filtering_SNPs.ipynb)
# ## Genomics
# 
#
# * [Working with high-quality reference genomes](02_Genomes/Reference_Genome.ipynb)
# * [Dealing with low-quality Reference Genomes](02_Genomes/Low_Quality.ipynb)
# * [Traversing Genome Annotations](02_Genomes/Annotations.ipynb)
# * [Extracting Genes from a reference using annotations](02_Genomes/Getting_Gene.ipynb)
# * [Finding orthologues with the Ensembl REST API](02_Genomes/Orthology.ipynb)
# * [Retrieving Gene Ontology information from Ensembl](02_Genomes/Gene_Ontology.ipynb)
# ## Population Genetics
# 
#
# * [Data Formats with PLINK](03_PopGen/Data_Formats.ipynb)
# * [The Genepop Format](03_PopGen/Genepop_Format.ipynb)
# * [Exploratory Analysis](03_PopGen/Exploratory_Analysis.ipynb)
# * [F statistics](03_PopGen/F-stats.ipynb)
# * [Principal Components Analysis (PCA)](03_PopGen/PCA.ipynb)
# * [Admixture/Structure](03_PopGen/Admixture.ipynb)
#
# ## Simulation in Population Genetics
# 
#
# * [Introducing Forward-time simulations](04_PopSim/Basic_SimuPOP.ipynb)
# * [Simulating selection](04_PopSim/Selection.ipynb)
# * [Doing population structure with island and stepping-stone models](04_PopSim/Pop_Structure.ipynb)
# * [Modeling complex demographic scenarios](04_PopSim/Complex.ipynb)
#
# ## Phylogenetics
#
# Note: image to be changed
# 
#
# * [Preparing the Ebola dataset](05_Phylo/Exploration.ipynb)
# * [Aligning genetic and genomic data](05_Phylo/Alignment.ipynb)
# * [Comparing sequences](05_Phylo/Comparison.ipynb)
# * [Reconstructing Phylogenetic trees](05_Phylo/Reconstruction.ipynb)
# * [Playing recursively with trees](05_Phylo/Trees.ipynb)
# * [Visualizing Phylogenetic data](05_Phylo/Visualization.ipynb)
#
# ## Proteomics
# 
#
# * [Finding a protein in multiple databases](06_Prot/Intro.ipynb)
# * [Introducing Bio.PDB](06_Prot/PDB.ipynb)
# * [Extracting more information from a PDB file](06_Prot/Stats.ipynb)
# * [Computing distances on a PDB file](06_Prot/Distance.ipynb)
# * [Doing geometric operations](06_Prot/Mass.ipynb)
# * [Implementing a basic PDB parser](06_Prot/Parser.ipynb)
# * [Parsing mmCIF files with Biopython](06_Prot/mmCIF.ipynb)
#
# The code for the PyMol recipe can be found on the pymol directory of the [github project](https://github.com/PacktPublishing/Bioinformatics-with-Python-Cookbook-Second-Edition)
# ## Advanced Python for Bioinformatics
# * [Setting the stage for high performance computing](08_Advanced/Intro.ipynb)
# * [Designing a poor-human concurrent executor](08_Advanced/Multiprocessing.ipynb)
# * [Doing parallel computing with IPython](08_Advanced/IPythonParallel.ipynb)
# * [Approximating the median in a large dataset](08_Advanced/Median.ipynb)
# * [Optimizing code with Cython and Numba](08_Advanced/Cython_Numba.ipynb)
# * [Programming with lazyness](08_Advanced/Lazy.ipynb)
# * [Thinking with generators](08_Advanced/Generators.ipynb)
#
# ## Other topics
# 
#
# * [Inferring shared chromosomal segments with Germline](09_Other/Germline.ipynb)
# * [Accessing the Global Biodiversity Information Facility (GBIF) via REST](09_Other/GBIF.ipynb)
# * [Geo-referencing GBIF datasets](09_Other/GBIF_extra.ipynb)
# * [Plotting protein interactions with Cytoscape the hard way](09_Other/Cytoscape.ipynb)
# ## Advanced NGS Processing
# * [Preparing the dataset for analysis](10_Advanced_NGS/Preparation.ipynb)
# * [Using Mendelian error information for quality control](10_Advanced_NGS/Mendel.ipynb)
# * [Using decision trees to explore the data](10_Advanced_NGS/Decision_Trees.ipynb)
# * [Exploring the data with standard statistics](10_Advanced_NGS/Exploration.ipynb)
# * [Finding genomic features from sequencing annotations](10_Advanced_NGS/2L.ipynb)
#
| Welcome.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7
# language: python
# name: py37
# ---
# + tags=[]
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
print(torch.__version__)
print(torch.cuda.is_available())
# -
# #### Check if is on CPU or GPU
#
# ```python
# # Tensor
# print(X.device)
#
# # Model
# print(next(model.parameters()).is_cuda)
# ```
# # Train data by GPU
# #### Sample Dataset
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
# +
# Create dataset: [100, 5]
N = 100
input_size = 5
random_dataset = RandomDataset(
size=input_size,
length=N)
# Dataloader: batch_size=30
batch_size = 30
rand_loader = DataLoader(
dataset=random_dataset,
batch_size=batch_size, shuffle=True)
# + tags=[]
print(f"Dataset shape: {random_dataset.data.size()}\n")
print(f"Dataloader length: {len(rand_loader)}")
for batch in rand_loader:
print(batch.size())
# -
# #### Sample model
class Model(nn.Module):
def __init__(self, input_size, output_size, use_GPU:bool=True, GPU_ids=[0]):
super(Model, self).__init__()
# Config
self.use_GPU = use_GPU
self.GPU_ids = GPU_ids
# Layers
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
## Hidden tensor
batch_size = input.size()[0]
if self.use_GPU:
hidden_tensor = torch.randn(batch_size, input_size).cuda()
else:
hidden_tensor = torch.randn(batch_size, input_size)
## FW
input = input + hidden_tensor
# (batch_size, input_size)
output = self.fc(input)
# (batch_size, output_size)
## Display Info
gpu_id = torch.cuda.current_device()
gpu_name = torch.cuda.get_device_name()
print(f"=== Using GPU[{gpu_id}] {gpu_name} ===")
print(f"input on {input.device}")
print(f"hidden_tensor on {hidden_tensor.device}")
print(f"output on {output.device}")
return output
# # Load to cpu
# + tags=[]
device = torch.device("cpu")
print(f"Use {device}")
# + tags=[]
# Load model
input_size = 5
output_size = 2
model = Model(input_size, output_size, use_GPU=False)
print(f"Is model using GPU: {next(model.parameters()).is_cuda}")
# + tags=[]
# Load data
for batch in rand_loader:
# Load data
Xb = batch
# Train
yb_ = model(Xb)
print(f"Outside: Xb on {Xb.device}, yb_ on {yb_.device}\n")
# -
# # Load to 1 GPU
# + tags=[]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Use {device}")
print(f"Number of GPUs: {torch.cuda.device_count()}")
# + tags=[]
# Load model
input_size = 5
output_size = 2
model = Model(input_size, output_size, use_GPU=True)
model.to(device)
print(f"Is model using GPU: {next(model.parameters()).is_cuda}")
# + tags=[]
# Load data
for batch in rand_loader:
# Load data
Xb = batch.to(device)
# Train
yb_ = model(Xb)
print(f"Outside: Xb on {Xb.device}, yb_ on {yb_.device}\n")
# -
# #### Save model on 1 GPU
# Check params
model.state_dict()
# Save model
torch.save(model.state_dict(), 'mymodel.pt')
# Load model
model_load = Model(input_size, output_size)
model_load.load_state_dict(torch.load('mymodel.pt'))
| Pytorch/Notes/Notes_on_GPUs/CPU_vs_1GPU.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.datasets import load_iris
#for splitting the test data and train data
from sklearn.model_selection import train_test_split
# calling decision tree
from sklearn import tree
#for checking % of accuracy
from sklearn.metrics import accuracy_score
#for plotting graph
import matplotlib.pyplot as mat
print ("------------------------------------DECISION TREE---------------------------------------")
iris=load_iris()
l1=[] # list to store test size
l2=[] # list to store accuracy %
for i in range (0,5):
p=float(input("enter test size: "))
l1.append(p)
print('l1 :',l1)
x,y,z,a=train_test_split(iris.data,iris.target,test_size=p)
"""
here x=train_iris(contains 90% of data)
y=test_iris(contais remaining 10% of data)
z=tain_target(contains 90% of target)
a=test_target(contais remaining 10% of target)"""
# calling decision tree
clf= tree.DecisionTreeClassifier()
#now training the data and target with decision
trained=clf.fit(x,z)
output=trained.predict(y)
print("trained output : ",output)
print("actual output : ",a)
check_pct=accuracy_score(a,output)
print("accuracy check :",check_pct)
l2.append(check_pct)
print('l2 :',l2)
mat.xlabel("test size")
mat.ylabel("accuracy score")
mat.grid(color='y')
mat.scatter(l1,l2)
mat.show()
#to make decision tree of given data
tree.export_graphviz(clf, out_file="decision_tree.dot", max_depth=6, feature_names=iris.feature_names, class_names=iris.target_names, label='all', filled=True, node_ids=True, rounded=True)
# -
| decision tree accurcy check .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.model_selection import KFold
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import cross_val_score
# %matplotlib inline
# %config InlineBackend.figure_formats = {'png', 'retina'}
pd.options.mode.chained_assignment = None # default='warn'?
# data_key DataFrame
data_key = pd.read_csv('key.csv')
# data_train DataFrame
data_train = pd.read_csv('train.csv')
# data_weather DataFrame
data_weather = pd.read_csv('weather.csv')
rain_text = ['FC', 'TS', 'GR', 'RA', 'DZ', 'SN', 'SG', 'GS', 'PL', 'IC', 'FG', 'BR', 'UP', 'FG+']
other_text = ['HZ', 'FU', 'VA', 'DU', 'DS', 'PO', 'SA', 'SS', 'PY', 'SQ', 'DR', 'SH', 'FZ', 'MI', 'PR', 'BC', 'BL', 'VC' ]
data_weather['codesum'].replace("+", "")
a = []
for i in range(len(data_weather['codesum'])):
a.append(data_weather['codesum'].values[i].split(" "))
for i_text in a[i]:
if len(i_text) == 4:
a[i].append(i_text[:2])
a[i].append(i_text[2:])
data_weather["nothing"] = 1
data_weather["rain"] = 0
data_weather["other"] = 0
b = -1
for ls in a:
b += 1
for text in ls:
if text in rain_text:
data_weather.loc[b, 'rain'] = 1
data_weather.loc[b, 'nothing'] = 0
elif text in other_text:
data_weather.loc[b,'other'] = 1
data_weather.loc[b, 'nothing'] = 0
# 모든 데이터 Merge
df = pd.merge(data_weather, data_key)
station_nbr = df['station_nbr']
df.drop('station_nbr', axis=1, inplace=True)
df['station_nbr'] = station_nbr
df = pd.merge(df, data_train)
# T 값 처리 하기. Remained Subject = > 'M' and '-'
df['snowfall'][df['snowfall'] == ' T'] = 0.05
df['preciptotal'][df['preciptotal'] == ' T'] = 0.005
# 주말과 주중 구분 작업 하기
df['date'] = pd.to_datetime(df['date'])
df['week7'] = df['date'].dt.dayofweek
df['weekend'] = 0
df.loc[df['week7'] == 5, 'weekend'] = 1
df.loc[df['week7'] == 6, 'weekend'] = 1
df1 = df[df['station_nbr'] == 1]; df11 = df[df['station_nbr'] == 11]
df2 = df[df['station_nbr'] == 2]; df12 = df[df['station_nbr'] == 12]
df3 = df[df['station_nbr'] == 3]; df13 = df[df['station_nbr'] == 13]
df4 = df[df['station_nbr'] == 4]; df14 = df[df['station_nbr'] == 14]
df5 = df[df['station_nbr'] == 5]; df15 = df[df['station_nbr'] == 15]
df6 = df[df['station_nbr'] == 6]; df16 = df[df['station_nbr'] == 16]
df7 = df[df['station_nbr'] == 7]; df17 = df[df['station_nbr'] == 17]
df8 = df[df['station_nbr'] == 8]; df18 = df[df['station_nbr'] == 18]
df9 = df[df['station_nbr'] == 9]; df19 = df[df['station_nbr'] == 19]
df10 = df[df['station_nbr'] == 10]; df20 = df[df['station_nbr'] == 20]
# -
df20 = df20.apply(pd.to_numeric, errors = 'coerce')
df20.describe().iloc[:, :15]
# 없는 Column = date, codesum, depart, sunrise, sunset, station_nbr
df20['store_nbr'].unique()
df20_drop_columns = ['date', 'codesum', 'depart', 'sunrise', 'sunset', 'station_nbr']
df20 = df20.drop(columns = df20_drop_columns)
# +
# np.nan를 포함하고 있는 변수(column)를 찾아서, 그 변수에 mean 값 대입해서 Frame의 모든 Value가 fill 되게 하기.
df20_columns = df20.columns
# Cateogry 값을 포함하는 변수는 np.nan에 mode값으로 대체하고, 나머지 실수 값을 포함한 변수는 np.nan에 mean값으로 대체
for i in df20_columns:
if (i == 'resultdir'):
df20[i].fillna(df20[i].mode()[0], inplace=True)
print(df20[i].mode()[0])
else:
df20[i].fillna(df20[i].mean(), inplace=True)
# 이제 모든 변수가 숫자로 표기 되었기 때문에, 가능 함.
# 상대 습도 추가 #
df20['relative_humility'] = 100*(np.exp((20.625*((df20['dewpoint']-32)/1.8))/(243.04+((df20['dewpoint']-32)/1.8)))/np.exp((20.625*((df20['tavg']-32)/1.8))/(243.04+((df20['tavg']-32)/1.8))))
# 체감온도 계산
df20["windchill"] = 35.74 + 0.6220*df20["tavg"] - 35.75*(df20["avgspeed"]**0.20) + 0.4275*df20["tavg"]*(df20["avgspeed"]**0.20)
df20 = df20[df20['units'] != 0]
# +
# 'np.log1p(units) ~ tmax + tmin + tavg + depart + dewpoint + wetbulb + heat + cool + sunrise + sunset + codesume + \
# snowfall + preciptotal + stnpressure + sealevel + resultspeed + resultdir + avgspeed + nothing + rain + other + \
# store_nbr + station_nbr + item_nbr'
model_df20 = sm.OLS.from_formula('np.log1p(units) ~ tmax + tmin + tavg + dewpoint + heat + cool + preciptotal + \
resultspeed + sealevel + snowfall + resultdir + avgspeed + C(nothing) + C(rain) + C(other) + C(item_nbr) + C(week7) + \
C(weekend) + relative_humility + windchill + 0', data = df20)
result_df20 = model_df20.fit()
print(result_df20.summary())
# +
model_df20 = sm.OLS.from_formula('np.log1p(units) ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(heat) + scale(cool) + \
scale(preciptotal) + scale(resultspeed) + scale(sealevel) + scale(snowfall)+ scale(resultdir) + scale(avgspeed) + C(nothing) + C(rain) + \
C(other) + C(item_nbr) + C(week7) + C(weekend) + scale(relative_humility) + scale(windchill) + 0', data = df20)
result_df20 = model_df20.fit()
print(result_df20.summary())
# -
anova_result_df20 = sm.stats.anova_lm(result_df20).sort_values(by=['PR(>F)'], ascending = False)
anova_result_df20[anova_result_df20['PR(>F)'] <= 0.05]
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(df20.values, i) for i in range(df20.shape[1])]
vif["features"] = df20.columns
vif = vif.sort_values("VIF Factor").reset_index(drop=True)
vif
# 10순위까지 겹치는것만 쓴다
# scale(relative_humility) + C(week7) + C(item_nbr)
# +
# resultspeed, avgspeed, C(weekend), preciptotal, C(other), C(week7), C(item_nbr),
model_df20 = sm.OLS.from_formula('np.log1p(units) ~ scale(relative_humility) + C(week7) + C(item_nbr) + 0', data = df20)
result_df20 = model_df20.fit()
print(result_df20.summary())
# +
# resultspeed, avgspeed, C(weekend), preciptotal, C(other), C(week7), C(item_nbr),
X20 = df20[['relative_humility', 'week7', 'item_nbr']]
y20 = df20['units']
model20 = LinearRegression()
cv20 = KFold(n_splits=20, shuffle=True, random_state=0)
cross_val_score(model20, X20, y20, scoring="r2", cv=cv20)
# +
# resultspeed, avgspeed, C(weekend), preciptotal, C(other), C(week7), C(item_nbr),
X20 = df20[['relative_humility', 'week7', 'item_nbr']]
y20 = df20['units']
model20 = LinearRegression()
cv20 = KFold(n_splits=20, shuffle=False, random_state=0)
cross_val_score(model20, X20, y20, scoring="r2", cv=cv20)
# -
| DataScience_Project1_Predict_products_sales_in_Walmart/station_20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#import SignalDecomposition_Perf as tsperf
# +
def add_some_noise(x , p , min_sig, max_sig, e , f):
delta = (x - min_sig) / (max_sig - min_sig);
if( (delta >= e) and (delta <= f) ):
if(np.random.random() < p):
return 1;
return 0;
def gen_trend(N , trendtype):
lTrend = pd.Series();
a = 100* (2 * np.random.random() - 1);
b = 10 * (2 * np.random.random() - 1);
c = (2 * np.random.random() );
print("TREND" , a , b ,c);
if(trendtype == "constant"):
lTrend = a
if(trendtype == "linear"):
x = np.arange(0,N) / (N );
lTrend = a * x + b;
if(trendtype == "poly"):
x = np.arange(0,N) / (N );
lTrend = a * x * x + b * x + c;
# lTrend.plot();
return lTrend;
def gen_cycle(N , cycle_length):
lCycle = pd.Series();
if(cycle_length > 0):
lCycle = np.arange(0,N) % cycle_length;
lValues = np.random.randint(0, cycle_length, size=(cycle_length, 1)) /cycle_length;
lCycle = pd.Series(lCycle).apply(lambda x : lValues[int(x)][0]);
if(cycle_length == 0):
lCycle = 0;
return lCycle;
def generate_random_TS_2(N , FREQ, seed, trendtype, cycle_length, transform, sigma = 1.0, exog_count = 20) :
tsspec = tsds.cTimeSeriesDatasetSpec();
tsspec.mName = "Signal_" + str(N) + "_" + str(FREQ) + "_" + str(seed) + "_" + str(trendtype) + "_" + str(cycle_length) + "_" + str(transform) + "_" + str(sigma) + "_" + str(exog_count) ;
print("GENERATING_RANDOM_DATASET" , tsspec.mName);
tsspec.mDescription = "Random generated dataset";
np.random.seed(seed);
df_train = pd.DataFrame();
#df_train['Date'] = np.arange(0,N)
'''
http://pandas.pydata.org/pandas-docs/stable/timeseries.html
DateOffset objects
In the preceding examples, we created DatetimeIndex objects at various frequencies by passing in frequency strings
like "M", "W", and "BM" to the freq keyword. Under the hood, these frequency strings are being translated into an
instance of pandas DateOffset, which represents a regular frequency increment.
Specific offset logic like "month", "business day", or "one hour" is represented in its various subclasses.
'''
df_train['Date'] = pd.date_range('2000-1-1', periods=N, freq=FREQ)
df_train['GeneratedTrend'] = gen_trend(N , trendtype);
df_train['GeneratedCycle'] = gen_cycle(N , cycle_length);
df_train['Noise'] = np.random.randn(N) * sigma;
df_train['Signal'] = df_train['GeneratedTrend'] + df_train['GeneratedCycle'] + df_train['Noise']
min_sig = df_train['Signal'].min();
max_sig = df_train['Signal'].max();
print(df_train.info())
tsspec.mExogenousVariables = [];
for e in range(exog_count):
label = "exog_" + str(e+1);
df_train[label] = df_train['Signal'].apply(lambda x : add_some_noise(x , 0.1 ,
min_sig,
max_sig,
e/exog_count ,
(e+3)/exog_count ));
tsspec.mExogenousVariables = tsspec.mExogenousVariables + [ label ];
# this is the full dataset . must contain future exogenius data
tsspec.mExogenousDataFrame = df_train;
pos_signal = df_train['Signal'] - min_sig + 1.0;
if(transform == "exp"):
df_train['Signal'] = np.exp(-pos_signal)
# df_train.to_csv(tsspec.mName + ".csv");
tsspec.mTimeVar = "Date";
tsspec.mSignalVar = "Signal";
tsspec.mHorizon = 12;
if(tsspec.mHorizon > (N//2)):
tsspec.mHorizon = N // 2;
tsspec.mFullDataset = df_train;
tsspec.mFullDataset[tsspec.mName] = tsspec.mFullDataset['Signal'];
tsspec.mPastData = df_train[:-tsspec.mHorizon];
tsspec.mFutureData = df_train.tail(tsspec.mHorizon);
return tsspec
# +
# %matplotlib inline
def plot_dataset(idataset):
df = idataset.mPastData
df.plot(idataset.mTimeVar, [idataset.mSignalVar , 'GeneratedTrend' , 'GeneratedCycle'] , figsize=(32, 16))
def process_dataset(idataset):
df = idataset.mPastData
# df.to_csv("outputs/rand_exogenous.csv")
H = idataset.mHorizon;
N = df.shape[0];
df1 = df;
lEngine = autof.cForecastEngine()
lExogenousData = (idataset.mExogenousDataFrame , idataset.mExogenousVariables)
lEngine.train(df1 , idataset.mTimeVar , idataset.mSignalVar, H, lExogenousData);
lEngine.getModelInfo();
# lEngine.standrdPlots(name = "outputs/my_exog_" + str(nbex) + "_" + str(n));
# -
dataset = generate_random_TS_2(N = 128 , FREQ = 'D', seed = 20, trendtype = "poly", cycle_length = 20, transform = "", sigma = 0.9, exog_count = 5);
plot_dataset(dataset)
| notebooks_sandbox/artificial_datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
# %matplotlib inline
from sklearn import datasets, tree, metrics
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier
# -
import sklearn; print(sklearn.__version__)
# # Using the readings, try and create a RandomForestClassifier for the iris dataset
iris = datasets.load_iris()
iris.keys()
X = iris.data[:,2:]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42, test_size=0.25,train_size=0.75)
#What is random_state?
#What is stratify?
# +
#What is this doing in the moon example exactly?
#X, y = make_moons(n_samples=100, noise=0.25, random_state=3)
# -
forest = RandomForestClassifier(n_estimators=5, random_state=100)
forest.fit(X_train, y_train)
print("accuracy on training set: %f" % forest.score(X_train, y_train))
print("accuracy on test set: %f" % forest.score(X_test, y_test))
# # Using a 25/75 training/test split, compare the results with the original decision tree model and describe the result to the best of your ability in your PR
X_train, X_test, y_train, y_test = train_test_split(x,y,test_size=0.25,train_size=0.75)
dt = tree.DecisionTreeClassifier()
dt = dt.fit(X_train,y_train)
y_pred=dt.predict(X_test)
Accuracy_score = metrics.accuracy_score(y_test, y_pred)
Accuracy_score
# +
#Comments on RandomForestClassifiers & Original Decision Tree Model
#While the Random Trees result is consistent, varying depending how you choose the random_state or the n_estimators,
#the result of the orgininal decision tree model varies a lot.
#The random_state defines how random the versions of the data is that the modelling takes into consideration, and
#the n_estimators regulates how many "random" datasets are used. It's fascinating to see how the this makes the
#result so much more consistent than the orginal decision tree model.
# +
#General commets on the homework
#I really enjoyed this homework and it really helped me understand, what is going on under the hood.
#I found this reading while I was doing the homework. It looks nice to go deeper? Do you know the
#guy? https://github.com/amueller/introduction_to_ml_with_python
#I feel I now need practice on real life dirty data sets, to fully understand how predictions models
#can work. I take my comments back, that I can't see how I can implement this into my reporting. I can. But how
#can I do this technically? i.e. with the data on PERM visas? Say input nationality, wage, lawyer, job title, and get a reply what the chances could be of
#getting a work visa? I also feel a little shaky on how I need to prep my data to feed in it into the predictor
#correctly.
# +
#Comments on classifier
#Questions:
#Not sure why it's 10fold cross validation, cv is set at 5?
#Why are we predicting the
# -
| 09 More Trees/homework/Skinner_Barnaby_9_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### gQuant Tutorial
# First import all the necessary modules.
# +
import sys; sys.path.insert(0, '..')
import os
import warnings
import ipywidgets as widgets
from gquant.dataframe_flow import TaskGraph
warnings.simplefilter("ignore")
# -
# In this tutorial, we are going to use gQuant to do a simple quant job. The task is fully described in a yaml file
# !head -n 31 ../task_example/simple_trade.yaml
# The yaml file is describing the computation task by a graph, we can visualize it
task_graph = TaskGraph.load_taskgraph('../task_example/simple_trade.yaml')
task_graph.draw(show='ipynb')
# We define a method to organize the output images
def plot_figures(o, symbol):
# format the figures
figure_width = '1200px'
figure_height = '400px'
bar_figure = o[2]
sharpe_number = o[0]
cum_return = o[1]
signals = o[3]
bar_figure.layout.height = figure_height
bar_figure.layout.width = figure_width
cum_return.layout.height = figure_height
cum_return.layout.width = figure_width
cum_return.title = 'P & L %.3f' % (sharpe_number)
bar_figure.marks[0].labels = [symbol]
cum_return.marks[0].labels = [symbol]
signals.layout.height = figure_height
signals.layout.width = figure_width
bar_figure.axes = [bar_figure.axes[1]]
cum_return.axes = [cum_return.axes[0]]
output = widgets.VBox([bar_figure, cum_return, signals])
return output
# We load the symbol name to symbol id mapping file:
node_stockSymbol = {"id": "node_stockSymbol",
"type": "StockNameLoader",
"conf": {"path": "./data/security_master.csv.gz"},
"inputs": []}
name_graph = TaskGraph([node_stockSymbol])
list_stocks = name_graph.run(outputs=['node_stockSymbol'])[0].to_pandas().set_index('asset_name').to_dict()['asset']
# Evaluate the output nodes and plot the results:
symbol = 'REXX'
action = "load" if os.path.isfile('./.cache/load_csv_data.hdf5') else "save"
o = task_graph.run(
outputs=['node_sharpeRatio', 'node_cumlativeReturn',
'node_barplot', 'node_lineplot', 'load_csv_data'],
replace={'load_csv_data': {action: True},
'node_barplot': {'conf': {"points": 300}},
'node_assetFilter':
{'conf': {'asset': list_stocks[symbol]}}})
cached_input = o[4]
plot_figures(o, symbol)
# Change the strategy parameters
o = task_graph.run(
outputs=['node_sharpeRatio', 'node_cumlativeReturn',
'node_barplot', 'node_lineplot'],
replace={'load_csv_data': {"load": cached_input},
'node_barplot': {'conf': {"points": 200}},
'node_ma_strategy': {'conf': {'fast': 1, 'slow': 10}},
'node_assetFilter': {'conf': {'asset': list_stocks[symbol]}}})
figure_combo = plot_figures(o, symbol)
figure_combo
# +
add_stock_selector = widgets.Dropdown(options=list_stocks.keys(),
value=None, description="Add stock")
para_selector = widgets.IntRangeSlider(value=[10, 30],
min=3,
max=60,
step=1,
description="MA:",
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True)
def para_selection(*stocks):
with out:
symbol = add_stock_selector.value
para1 = para_selector.value[0]
para2 = para_selector.value[1]
o = task_graph.run(
outputs=['node_sharpeRatio', 'node_cumlativeReturn',
'node_barplot', 'node_lineplot'],
replace={'load_csv_data': {"load": cached_input},
'node_barplot': {'conf': {"points": 200}},
'node_ma_strategy': {'conf': {'fast': para1, 'slow': para2}},
'node_assetFilter': {'conf': {'asset': list_stocks[symbol]}}})
figure_combo = plot_figures(o, symbol)
if (len(w.children) < 2):
w.children = (w.children[0], figure_combo,)
else:
w.children[1].children[1].marks = figure_combo.children[1].marks
w.children[1].children[2].marks = figure_combo.children[2].marks
w.children[1].children[1].title = 'P & L %.3f' % (o[0])
out = widgets.Output(layout={'border': '1px solid black'})
add_stock_selector.observe(para_selection, 'value')
para_selector.observe(para_selection, 'value')
selectors = widgets.HBox([add_stock_selector, para_selector])
w = widgets.VBox([selectors])
w
# -
| notebooks/02_single_stock_trade.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 ('base')
# language: python
# name: python3
# ---
import pandas as pd
test_df = pd.read_csv('../ag_news_csv/test.csv')
train_df = pd.read_csv('../ag_news_csv/train.csv')
train_sup_dict = {'label': y_train, 'sentence': train_text}
test_dict = {'label': y_test, 'sentence': test_text}
unsup_dict = {'sentence': unsup_text}
import pandas as pd
train_sup_df = pd.DataFrame.from_dict(train_sup_dict)
test_df = pd.DataFrame.from_dict(test_dict)
unsup_df = pd.DataFrame.from_dict(unsup_dict)
unsup_df.head
train_sup_df.to_csv('train_sup.csv', index=False)
unsup_df.to_csv('unsup.csv', index=False)
test_df.to_csv('test.csv', index=False)
# +
# 以上仅仅完成了将数据集处理成csv文件,接下来按照模型要求处理数据集
# 模型要求:70000个无监督数据,20/500/2500个训练数据,250000个测试数据
# 对于测试数据,保持不变,对于训练数据,分为两部分,一部分是无监督数据,一部分是有监督数据
# 从训练数据中,采样2500个训练数据,并下采样至500个,再下采样至20个,最后将剩余的和无监督混合
from sklearn.utils import shuffle
path = "./model_require/"
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
train = pd.read_csv('train_sup.csv')
# 不使用sample而是用shuffle是因为,这样可以便于将无监督数据分离出来
label0, label1 = shuffle(train[train['label'].isin([0])]), shuffle(
train[train['label'].isin([1])])
train0, unsup0 = label0[:1250], label0[1250:] # isin仅可提供一个list,所以0需要用list来表示
train1, unsup1 = label1[:1250], label1[1250:]
shuffle(pd.concat([train0, train1], ignore_index=True)
).to_csv(path+'train_2500.csv', index=False)
shuffle(pd.concat([train0.sample(250), train1.sample(250)], ignore_index=True)
).to_csv(path+'train_500.csv', index=False)
shuffle(pd.concat([train0.sample(10), train1.sample(10)], ignore_index=True)
).to_csv(path+'train_20.csv', index=False)
# -
unsup = pd.read_csv('unsup.csv')
unsup = pd.concat([unsup, pd.DataFrame(pd.concat(
[unsup0[:10000], unsup1[:10000]], ignore_index=True), columns=['sentence'])], ignore_index=True)
shuffle(unsup).to_csv(path+'unsup.csv', index=False)
test=pd.read_csv('test.csv')
shuffle(test).to_csv(path+'test.csv', index=False)# 对测试数据进行打乱
| ag_news/process.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python385jvsc74a57bd02c304d0744c63eb0cf275252f889b831c4efa7f5cd03bb8eb1791a6cbed8475e
# ---
# ## Simple Linear Regression
# +
# Simple Linear Regression
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import Dataset
dataset = pd.read_csv('../../data/datasets/Part 2 - Regression/Section 4 - Simple Linear Regression/Salary_Data.csv')
# Matrix
X = dataset.iloc[:, :-1].values
# Vector
y = dataset.iloc[:, 1].values
# Divide the data set into training set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
# Variable scaling
"""
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
"""
# +
# Create Simple Linear Regression model with training set
from sklearn.linear_model import LinearRegression
regression = LinearRegression()
regression.fit(X_train, y_train)
# -
# ## Predict the test set
y_pred = regression.predict(X_test)
print(y_pred)
# ## View training results
# Train
plt.scatter(X_train, y_train, color = "red")
plt.plot(X_train, regression.predict(X_train), color = "blue")
plt.title("Sueldo vs Años de Experiencia (Conjunto de Entrenamiento)")
plt.xlabel("Años de Experiencia")
plt.ylabel("Sueldo (en $)")
plt.show()
# Test
plt.scatter(X_test, y_test, color = "red")
plt.plot(X_train, regression.predict(X_train), color = "blue")
plt.title("Sueldo vs Años de Experiencia (Conjunto de Testing)")
plt.xlabel("Años de Experiencia")
plt.ylabel("Sueldo (en $)")
plt.show()
| notebooks/Module2-Regression/Simple-Linear-Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/victoria2012/test_deeplearning/blob/master/NaverSentimentAnalysis_loadmodel_LSTM_t.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="xR_shKZXdJQY"
import tensorflow as tf
# + colab={"base_uri": "https://localhost:8080/"} id="tNu0SJ9jd793" outputId="1d8c7e90-6a73-460b-ff5a-92c000088a3f"
loaded_model = tf.keras.models.load_model('./NaverSentimentAnalysis_LSTM_t.h5')
loaded_model
# + [markdown] id="cZ7-WI9Mfwce"
# ##### 과정 : 분류기 --> 불용어 제거(stopwords) --> 사전을 근거로 숫자화 --> padding --> predict
# + colab={"base_uri": "https://localhost:8080/"} id="SMejGshjgdKD" outputId="150d761d-01c2-4718-c3c8-deafdc0f6c74"
# !python -m pip install konlpy
# + id="fxYhYFCEfKPW"
import konlpy
# + id="deo3aDStgk_F"
okt = konlpy.tag.Okt() # 태그 분류기 초기화
# + id="Sl4JFXQknj_M"
import pickle
# + colab={"base_uri": "https://localhost:8080/"} id="pX1B-h1Jnm-p" outputId="4f60cf8f-3602-4809-fbb6-702a0953aa3a"
stopwords = pickle.load(open('./stopwords.pkl','rb'))
print(stopwords)
# + id="eV-miVDbhYLl"
# 불용어 처리의 다른 방법을 사용해 보자
# words = list()
# for tok in temp_x:
# if tok not in stopwords:
# words.append(tok)
# + colab={"base_uri": "https://localhost:8080/"} id="7ve_-wOYg2pc" outputId="3625a681-cd43-4288-f04a-ffbc0cf9caad"
# 감독이 예술이네. --> 웹 사용자 입력한 문자는 컴퓨터가 바로 해석할 수 없기 때문에 숫자로 바꿔서 넣어줘야 함. 긍정인지 부정인지 확인.
new_sentence = okt.morphs('감독이 예술이네.', stem=True) # 단어를 분류하고 원형을 찾아주는 작업. 딥러닝의 상태와 똑같이 만들어주기 위해 stem=True 넣어줌
new_sentence = [tok for tok in new_sentence if tok not in stopwords] # 리스트를 의미하는 대괄호[] 안에 for문과 if문을 넣어주고 맨 왼쪽에는 저장할 값을 지정
new_sentence
# + colab={"base_uri": "https://localhost:8080/"} id="RXSG3IVcmQ68" outputId="a679b485-1a51-4aed-d8e7-6d755613b3b9"
# pickle을 불러와서 토큰화하는 작업 진행
tokenizer = pickle.load(open('./tokenizer.pkl','rb')) # rb는 read binary의 뜻
tokenizer
# + colab={"base_uri": "https://localhost:8080/"} id="UK3TenXipXt1" outputId="ab407f30-6b8b-4e4b-c21d-2f7be036333a"
vob_size = len(tokenizer.word_index)
vob_size
# + colab={"base_uri": "https://localhost:8080/"} id="sLEhG1hxp-uN" outputId="8acb7b79-df87-4942-b002-8e3e8d354985"
encoded = tokenizer.texts_to_sequences([new_sentence]) # 대괄호 안에 넣어줘야 함.
encoded
# + colab={"base_uri": "https://localhost:8080/"} id="lvg9U-LnqVTT" outputId="0f1cb3db-fce6-43c9-de98-4e401e9f85de"
pad_new = tf.keras.preprocessing.sequence.pad_sequences(encoded, maxlen=50)
pad_new
# + colab={"base_uri": "https://localhost:8080/"} id="NlZmwb75eQhc" outputId="7410df56-77f3-4221-9105-9d7e1788b706"
# ['감독이 예술이네.']
score = loaded_model.predict(pad_new) # 무엇이 무엇을 predict하는가? 영어의 문법과 연관해서 생각해 볼 것
score
# + colab={"base_uri": "https://localhost:8080/"} id="wqR04AS7riqc" outputId="ec82e74f-d2fa-40e6-ba2b-379cfcfce25d"
if(score > 0.5):
print('긍정', score*100)
else:
print('부정', score*100)
# + id="M47po75Zrzar"
okt = konlpy.tag.Okt()
new_sentence = okt.morphs('감독이 예술이네.', stem=True)
new_sentence = [ tok for tok in new_sentence if tok not in stopwords ]
encoded = tokenizer.texts_to_sequences([new_sentence])
pad_new = tf.keras.preprocessing.sequence.pad_sequences(encoded, maxlen=50)
score = loaded_model.predict(pad_new)
# + id="YQfqEWe6tkgc"
def sentiment_predict(sentence):
okt = konlpy.tag.Okt()
new_sentence = okt.morphs(sentence, stem=True)
new_sentence = [ tok for tok in new_sentence if tok not in stopwords ]
encoded = tokenizer.texts_to_sequences([new_sentence])
pad_new = tf.keras.preprocessing.sequence.pad_sequences(encoded, maxlen=50)
score = loaded_model.predict(pad_new)
return score
# + colab={"base_uri": "https://localhost:8080/"} id="_zZZXyUou_Aq" outputId="46a62ab7-1367-4b1a-dc5f-b1a3c3ba0b1c"
words = '이 영화 꽝이네.'
sentiment_predict(words)
# + colab={"base_uri": "https://localhost:8080/"} id="zM3VfCiRvGCi" outputId="e65c94fe-f618-4028-d093-0af9ed50e421"
words = '액션도 좋고 줄거리도 재밌어요.'
sentiment_predict(words)
# + id="0mGptroYvsYz"
| NaverSentimentAnalysis_loadmodel_LSTM_t.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test,y_test)=mnist.load_data()
x_train = tf.keras.utils.normalize(x_train,axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128,activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(128,activation=tf.nn.softmax))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=3)
# -
val_loss, val_acc = model.evaluate(x_test, y_test)
print(val_loss, val_acc)
model.save('num_reader.model')
new_model = tf.keras.models.load_model('num_reader.model')
predictions = new_model.predict(x_test)
# +
#print(predictions)
# -
import numpy as np
print(np.argmax(predictions[7]))
# +
import matplotlib.pyplot as plt
plt.imshow(x_test[7], cmap = plt.cm.binary)
plt.show()
#print(x_train[0])
| Sentdex/Mnist/.ipynb_checkpoints/Mnist-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <table>
# <tr><td><img style="height: 150px;" src="images/geo_hydro1.jpg"></td>
# <td bgcolor="#FFFFFF">
# <p style="font-size: xx-large; font-weight: 900; line-height: 100%">AG Dynamics of the Earth</p>
# <p style="font-size: large; color: rgba(0,0,0,0.5);">Jupyter notebooks</p>
# <p style="font-size: large; color: rgba(0,0,0,0.5);"><NAME></p>
# </td>
# </tr>
# </table>
# # Dynamic systems in Geosciences
# ----
# *<NAME>,
# Geophysics Section,
# Institute of Geological Sciences,
# Freie Universität Berlin,
# Germany*
#
# **Additional jupyter notebooks for lecture**
#
# Lectures:
#
# - Lecture 1: **Introduction**
# - [Coding](Dynamics_lab01_coding.ipynb)
# - [Basic equations](Dynamics_lab01_BasicEquations.ipynb)
# - [openFOAM](Dynamics_lab01_openFoam.ipynb)
# - [ParaView](Dynamics_lab01_ParaView.ipynb)
#
# - Lecture 2: **Flux**
# - [Flux equations](Dynamics_lab02_flux.ipynb)
# - [Heat: Fourier law]
# - [Water: Darcy law]
# - [Particle: Fick law]
# - [Current: Ohm law]
# - [Sediment: Sediment law]
# - [Ice: Shallow-ice law]
#
# - Lecture 3: **Continuity**
# - [Continuity equation](Dynamics_lab03_continuity.ipynb)
# - [1D advection](Dynamics_lab03_advection1D.ipynb)
# - [1D diffusion](Dynamics_lab03_diffusion1D.ipynb)
# - [1D reaction](Dynamics_lab03_reaction1D.ipynb)
# - [openFOAM: laplacianFoam (heat diffusion)](Dynamics_lab03_HeatDiffusion_laplacianFoam1.ipynb)
# - [openFOAM: scalarTransportFoam (heat transport)](Dynamics_lab03_HeatTransport_scalarTransportFoam.ipynb)
# - [openFOAM: example 2D heat blob]
#
# - Lecture 4: **Heat**
# - [Radial profiles](Dynamics_lab04_RadialProfiles.ipynb)
# - [Seasons](Dynamics_lab04_Seasons.ipynb)
#
# - Lecture 5: **Gravity**
# - [Radial gravity profile](Dynamics_lab05_RadialGravity.ipynb)
# - [Normal gravity](Dynamics_lab05_theoretical_g.ipynb)
# - [Spherical harmonics](Dynamics_lab05_Ynm.ipynb)
# - [Gravity field from Stokes coefficients](Dynamics_lab05_Ynm_examples.ipynb)
#
# - Lecture 6: **Motion**
# - [Equation of motion]
#
# - Lecture 7: **Material**
# - [Elastic]
# - [Viscous]
# - [Viscoelastic]
#
# - Lecture 8: **Elastic material**
# - [Elastic]
# - [openFOAM: solidDisplacementFoam (plate with hole)](Dynamics_lab07_HoleInPlate_solidDisplacementFoam.ipynb)
#
# - Lecture 9: **Viscous material**
# - [Navier-Stokes equation](Dynamics_lab09_NavierStokes.ipynb)
# - [Hagen-Poiseuille](Dynamics_lab09_HagenPoiseuille.ipynb)
# - [openFOAM: icoFoam (Hagen-Poiseuille)](Dynamics_lab09_HagenPoiseuille_icoFoam.ipynb)
# - [openFOAM: simpleFoam (Hagen-Poiseuille)](Dynamics_lab09_HagenPoiseuille_simpleFoam.ipynb)
# - [openFOAM: icoScalarTransportFoam (Hagen-Poiseuille)](Dynamics_lab09_HagenPoiseuille_icoScalarTransportFoam.ipynb)
# - [Mantle convection]
# - [openFOAM: convectiveFoam]
#
# - Lecture 10: **Viscoelastic material**
# - [Heaviside loading]
#
# - Lecture 11: **Reactions**
# - [Diffusion and advection](Dynamics_lab11_Diffusion_Advection.ipynb)
# - [One reactant](Dynamics_lab11_One_Reactant.ipynb)
# - [Two reactants](Dynamics_lab11_Two_Reactants.ipynb)
# - [Three reactants](Dynamics_lab11_Three_Reactants.ipynb)
#
# - Lecture 12: **Shallow water**
# - [Shallow-water equations](Dynamics_lab12_SWE.ipynb)
# - [openFOAM: shallowWaterFoam (Dam break)](Dynamics_lab12_DamBreak_shallowWaterFoam.ipynb)
# - [Analytical examples for dam break](Dynamics_lab12_DamBreak.ipynb)
# - [1D advection revisited](Dynamics_lab12_advection1D_FiniteVolume_LLF.ipynb)
# - [1D Numerical solution SWE](Dynamics_lab12_SWE1DwithLocalLaxFriedrich.ipynb)
# - [2D advection revisited](Dynamics_lab12_advection2D_FiniteVolume_LLF.ipynb)
# - [2D Numerical solution SWE](Dynamics_lab12_SWE2DwithLocalLaxFriedrich.ipynb)
# ----
| .ipynb_checkpoints/index-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
#Store Data Variables
import json
with open('feature_data.json', 'r') as f:
features = json.load(f)
from scipy.io import loadmat
train_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['train_idx'].flatten()
query_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['query_idx'].flatten()
labels = loadmat('cuhk03_new_protocol_config_labeled.mat')['labels'].flatten()
gallery_idxs = loadmat('cuhk03_new_protocol_config_labeled.mat')['gallery_idx'].flatten()
filelist = loadmat('cuhk03_new_protocol_config_labeled.mat')['filelist'].flatten()
camId = loadmat('cuhk03_new_protocol_config_labeled.mat')['camId'].flatten()
# +
X = np.array(features)
y = np.array(labels)
filelist = np.array(filelist)
camId = np.array(camId)
# +
mask_train = np.array(train_idxs).ravel()
mask_query = np.array(query_idxs).ravel()
mask_gallery = np.array(gallery_idxs).ravel()
mask_train = np.subtract(mask_train, 1)
mask_query = np.subtract(mask_query, 1)
mask_gallery = np.subtract(mask_gallery, 1)
X_train, X_query, X_gallery = X[mask_train, :], X[mask_query, :], X[mask_gallery, :]
y_train, y_query, y_gallery = y[mask_train], y[mask_query], y[mask_gallery]
filelist_train, filelist_query, filelist_gallery = filelist[mask_train], filelist[mask_query], filelist[mask_gallery]
camId_train, camId_query, camId_gallery = camId[mask_train], camId[mask_query], camId[mask_gallery]
# -
def get_acc_score(y_valid, y_q, tot_label_occur):
recall = 0
true_positives = 0
k = 0
max_rank = 30
rank_A = np.zeros(max_rank)
AP_arr = np.zeros(11)
while (recall < 1) or (k < max_rank):
if (y_valid[k] == y_q):
true_positives = true_positives + 1
recall = true_positives/tot_label_occur
precision = true_positives/(k+1)
AP_arr[round((recall-0.05)*10)] = precision
for n in range (k, max_rank):
rank_A[n] = 1
k = k+1
max_precision = 0
for i in range(10, -1, -1):
max_precision = max(max_precision, AP_arr[i])
AP_arr[i] = max_precision
AP_ = AP_arr.sum()/11
return AP_, rank_A
# +
from scipy.spatial import distance
from sklearn.metrics import pairwise
def evaluate_metric(X_query, camId_query, y_query, X_gallery, camId_gallery, y_gallery, metric = 'euclidian', parameters = None):
rank_accuracies = []
AP = []
# Break condition for testing
#q = 0
for query, camId_q, y_q in zip(X_query, camId_query, y_query):
q_g_dists = []
y_valid = []
for gallery, camId_g, y_g in zip(X_gallery, camId_gallery, y_gallery):
if ((camId_q == camId_g) and (y_q == y_g)):
continue
else:
if metric == 'euclidian':
dist = distance.euclidean(query, gallery)
elif metric == 'sqeuclidean':
dist = distance.sqeuclidean(query, gallery)
elif metric == 'seuclidean':
dist = distance.seuclidean(query, gallery)
elif metric == 'minkowski':
dist = distance.minkowski(query, gallery, parameters)
elif metric == 'chebyshev':
dist = distance.chebyshev(query, gallery)
elif metric == 'chi2':
dist = -pairwise.additive_chi2_kernel(query.reshape(1, -1), gallery.reshape(1, -1))[0][0]
elif metric == 'braycurtis':
dist = distance.braycurtis(query, gallery)
elif metric == 'canberra':
dist = distance.canberra(query, gallery)
elif metric == 'cosine':
dist = distance.cosine(query, gallery)
elif metric == 'correlation':
dist = distance.correlation(query, gallery)
elif metric == 'mahalanobis':
dist = distance.mahalanobis(query, gallery, parameters)
else:
raise NameError('Specified metric not supported')
q_g_dists.append(dist)
y_valid.append(y_g)
tot_label_occur = y_valid.count(y_q)
q_g_dists = np.array(q_g_dists)
y_valid = np.array(y_valid)
_indexes = np.argsort(q_g_dists)
# Sorted distances and labels
q_g_dists, y_valid = q_g_dists[_indexes], y_valid[_indexes]
AP_, rank_A = get_acc_score(y_valid, y_q, tot_label_occur)
AP.append(AP_)
rank_accuracies.append(rank_A)
#if q > 5:
# break
#q = q+1
rank_accuracies = np.array(rank_accuracies)
total = rank_accuracies.shape[0]
rank_accuracies = rank_accuracies.sum(axis = 0)
rank_accuracies = np.divide(rank_accuracies, total)
i = 0
print ('Accuracies by Rank:')
while i < rank_accuracies.shape[0]:
print('Rank ', i+1, ' = %.2f%%' % (rank_accuracies[i] * 100), '\t',
'Rank ', i+2, ' = %.2f%%' % (rank_accuracies[i+1] * 100), '\t',
'Rank ', i+3, ' = %.2f%%' % (rank_accuracies[i+2] * 100), '\t',
'Rank ', i+4, ' = %.2f%%' % (rank_accuracies[i+3] * 100), '\t',
'Rank ', i+5, ' = %.2f%%' % (rank_accuracies[i+4] * 100))
i = i+5
AP = np.array(AP)
mAP = AP.sum()/AP.shape[0]
print('mAP = %.2f%%' % (mAP * 100))
return rank_accuracies, mAP
# -
rank_accuracies_l = []
mAP_l = []
metric_l = []
# +
# Baseline Euclidian
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric ='euclidian',
parameters = None)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Euclidian')
# +
# Square Euclidian
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric = 'sqeuclidean',
parameters = None)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Square Euclidian')
# +
#Manhattan Distance
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric = 'minkowski',
parameters = 1)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Manhattan')
# +
# Chebyshev - L_infinity
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric ='chebyshev',
parameters = None)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Chebyshev')
# +
# Chi-Square
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric ='chi2',
parameters = None)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Chi Square')
# +
# Braycurtis
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric ='braycurtis',
parameters = None)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Bray Curtis')
# +
# Canberra
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric ='canberra',
parameters = None)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Canberra')
# +
# Cosine
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric ='cosine',
parameters = None)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Cosine')
# +
# Correlation
rank_accuracies, mAP = evaluate_metric(X_query, camId_query, y_query,
X_gallery, camId_gallery, y_gallery,
metric ='correlation',
parameters = None)
rank_accuracies_l.append(rank_accuracies)
mAP_l.append(mAP)
metric_l.append('Correlation')
# +
plt.figure(figsize=(8.0, 6.0))
color_list = ['green', 'blue', 'red', 'purple', 'orange', 'magenta', 'cyan', 'black', 'indianred', 'lightseagreen', 'gold', 'lightgreen']
for i in range(len(metric_l)):
plt.plot(np.arange(1, 31), 100*rank_accuracies_l[i], color=color_list[i], linestyle='dashed', label='Metric: '+ metric_l[i])
plt.title('CMC Curves for a range of standard distance metrics')
plt.xlabel('Rank')
plt.ylabel('Recogniton Accuracy / %')
plt.legend(loc='best')
# -
| Jupyter Notebooks/Baseline KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Clustering Consulting Project
#
# A large technology firm needs your help, they've been hacked! Luckily their forensic engineers have grabbed valuable data about the hacks, including information like session time,locations, wpm typing speed, etc. The forensic engineer relates to you what she has been able to figure out so far, she has been able to grab meta data of each session that the hackers used to connect to their servers. These are the features of the data:
#
# * 'Session_Connection_Time': How long the session lasted in minutes
# * 'Bytes Transferred': Number of MB transferred during session
# * 'Kali_Trace_Used': Indicates if the hacker was using Kali Linux
# * 'Servers_Corrupted': Number of server corrupted during the attack
# * 'Pages_Corrupted': Number of pages illegally accessed
# * 'Location': Location attack came from (Probably useless because the hackers used VPNs)
# * 'WPM_Typing_Speed': Their estimated typing speed based on session logs.
#
#
# The technology firm has 3 potential hackers that perpetrated the attack. Their certain of the first two hackers but they aren't very sure if the third hacker was involved or not. They have requested your help! Can you help figure out whether or not the third suspect had anything to do with the attacks, or was it just two hackers? It's probably not possible to know for sure, but maybe what you've just learned about Clustering can help!
#
# **One last key fact, the forensic engineer knows that the hackers trade off attacks. Meaning they should each have roughly the same amount of attacks. For example if there were 100 total attacks, then in a 2 hacker situation each should have about 50 hacks, in a three hacker situation each would have about 33 hacks. The engineer believes this is the key element to solving this, but doesn't know how to distinguish this unlabeled data into groups of hackers.**
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('NumHackers').getOrCreate()
df =
| Num of Hackers/.ipynb_checkpoints/Clustering_Consulting_Project-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Astronomical Source Detection
# Import numpy and sep as usual.
# The .fits files needed for this repository can be found at the following site:
#
# https://archive.stsci.edu/pub/hlsp/hudf12/
#
# Alternatively, you can get them from the following Google drive:
#
# https://bit.ly/2Pz9qjK
#
# For further clarification as to which files are needed, see additional_info.txt located in Google Drive
# .
# Import numpy and sep as usual.
import numpy as np
import sep
# Importing the additional setup for reading the test image and displaying plots.
# +
from astropy.io import fits
import matplotlib.pyplot as plt
from matplotlib import rcParams
#Ellipse will be used later in the code for object detection
from matplotlib.patches import Ellipse
# %matplotlib inline
rcParams['figure.figsize'] = [10., 10.]
# -
# Adding a color class to change font colors and styles.
# +
#use colors.subclass(or command; e.g bold).colorname to print
#examples: print(colors.bold, colors.fg.blue, "this will be bold and blue")
#everything after this will have that format until the following command
#is given: print(colors.reset, "now, this text will be normal")
class colors:
reset='\033[0m' #reset all colors with colors.reset
bold='\033[01m'
underline='\033[04m'
strikethrough='\033[09m'
reverse='\033[07m'
class fg: #foreground subclass
black='\033[30m'
red='\033[31m'
green='\033[32m'
orange='\033[33m'
blue='\033[34m'
purple='\033[35m'
cyan='\033[36m'
lightgrey='\033[37m'
darkgrey='\033[90m'
lightred='\033[91m'
lightgreen='\033[92m'
yellow='\033[93m'
lightblue='\033[94m'
pink='\033[95m'
lightcyan='\033[96m'
class bg: #background subclass
black='\033[40m'
red='\033[41m'
green='\033[42m'
orange='\033[43m'
blue='\033[44m'
purple='\033[45m'
cyan='\033[46m'
lightgrey='\033[47m'
# -
# Getting data from fits image.
# +
data = fits.getdata("hlsp_hudf12_hst_wfc3ir_udfmain_f105w_v1.0_drz.fits")
#keep in mind to replace the directory prior to running on personal terminal
#(e.g: ""/Users/***/Desktop/image.fits")
#use the following if an error arises in this cell:
data = data.byteswap(inplace=True).newbyteorder()
# -
# Printing the image and saving as a .png
# +
m, s = np.mean(data), np.std(data)
plt.imshow(data, interpolation='nearest', cmap='gray', vmin=m-s, vmax=m+s, origin='lower')
plt.colorbar()
#save as .png
plt.savefig("f105w.png")
# -
# Measuring a spatially varying background on the image
# +
bkg = sep.Background(data)
#You may also mask pixels with the following code:
bkg = sep.Background(data, bw=64, bh=64, fw=3, fh=3)
# -
# Printing the data type and shape.
print(type(data))
print(data.shape)
# Getting a "global" mean and noise of the image background.
print(bkg.globalback)
print(bkg.globalrms)
# Evaluating background as 2D array with same size as original image, showing it, and saving it as .png
# +
bkg_image = bkg.back()
#alternate way to get background evaluation:
#bkg_image=np.array(bkg)
plt.imshow(bkg_image, interpolation='nearest', cmap='gray', origin='lower')
plt.colorbar()
#saving as a .png
plt.savefig("f105w_backgroundsubtraction1.png")
# -
# Now, doing the same as above, but with background noise.
# +
bkg_rms = bkg.rms()
plt.imshow(bkg_rms, interpolation='nearest', cmap='gray', origin='lower')
plt.colorbar()
#saving as a .png
plt.savefig("f105w_backgroundsubtraction_noise.png")
# -
# Now, we subtract the background
# +
data_sub = data - bkg
#It is also possible to subtract the background using:
#data_sub = bkg.subfrom(data)
# -
# ## Object Detection
# Now that we have subtacted the background, we can begin object detection
# We're setting the threshold to be a constant of 15σ where σ=bkg.globalrms
# +
objects = sep.extract(data_sub, 15, err=bkg.globalrms)
#printing the number of detected objects
print("Objects detected: %d" % len(objects))
# -
# Using objects['x'] and objects['y'], we can get the centroid coordinates of the objects.
#
# To do a visual check of where the objects are, we will over-plot the object coords with basic shape parameters.
#
# To do this we will used the imported Ellipse from matplotlib.patches, plot background subtracted images(comment 1), plot an ellipse for each object(comment 2), and saving image as a PNG(comment 3).
# +
#1
fig, ax = plt.subplots()
m, s = np.mean(data_sub), np.std(data_sub)
im = ax.imshow(data_sub, interpolation='nearest', cmap='gray', vmin=m-s, vmax=m+s, origin='lower')
#2
for i in range(len(objects)):
e = Ellipse(xy=(objects['x'][i], objects['y'][i]),
width = 6*objects['a'][i],
height = 6*objects['b'][i],
angle = objects['theta'][i] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax.add_artist(e)
#3
plt.savefig("fl05w_objectdetection.png")
# -
# There are many fields for objects that give various information, below is an output of the available fields
objects.dtype.names
# ## Aperture Photometry
#
# Performing a simple circular aperture photometry with a 3 pixel radius at the locations of the objects.
#
# flux, fluxerr, and flag are all 1D arrays with one entry per object; we will print the first 10 results as well.
# +
flux,fluxerr,flag = sep.sum_circle(data_sub,objects['x'],objects['y'],3.0,err=bkg.globalrms,gain=1.0)
#printing the first ten results
for i in range(10):
print(colors.bold,colors.fg.purple,"object {:d}: flux = {:f} +/- {:f}".format(i, flux[i], fluxerr[i]),colors.reset)
# -
# Printing the flattened data's type.
print(type(data.flatten()))
print(type(data.flat))
# Showing the histogram of the data and saving.
# +
#fluxhist = flux, fluxerr, flag
#NBINS = 1000
histogram = plt.hist(flux.flatten(),bins='auto',log=True,range=[-5.,100.])
#saving as a .png
plt.savefig("histogram_f105w.png")
| astr119project3_f105w.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: graco
# language: python
# name: graco
# ---
# +
from pyclustering.cluster.kmedoids import kmedoids
from collections import defaultdict
from sklearn.cluster import KMeans
from scipy.stats import hypergeom
from itertools import islice, product
from functools import partial
import time
import random
import numpy as np
import pandas as pd
import seaborn as sns
import networkx as nx
import matplotlib.pyplot as plt
# +
# %matplotlib inline
sns.set()
DATA_DIRECTORY = "/home/clusterduck123/Desktop/git/supplements/data"
RAW_DATA_DIRECTORY = f"{DATA_DIRECTORY}/raw_data"
PPI_DIRECTORY = f"{DATA_DIRECTORY}/PPI"
ANNOTATIONS_DIRECTORY = f"{DATA_DIRECTORY}/annotations"
MATRIX_DIRECTORY = f"{DATA_DIRECTORY}/matrix"
CLUSTERS_DIRECTORY = f"{DATA_DIRECTORY}/clusters"
name2string = {'tvd0':'TVD_0', 'tvd1':'TVD_1', 'tvd2':'TVD_2', 'tvd3':'TVD_3',
'tvd0123':'TVD_{0123}', 'newtvd':'TVD_{new}', 'tijana':'Tijana',
'GDV_euclidean' :'GDV_{eucl}' , 'GDV_zscore_euclidean' :'GDV_{z-eucl}' ,
'GDV_cityblock' :'GDV_{city}' , 'GDV_zscore_cityblock' :'GDV_{z-city}' ,
'GDV_seuclidean' :'GDV_{seucl}' , 'GDV_zscore_seuclidean' :'GDV_{z-seucl}' ,
'GDV_cosine' :'GDV_{cos}' , 'GDV_zscore_cosine' :'GDV_{z-cos}' ,
'GDV_correlation':'GDV_{cor}' , 'GDV_zscore_correlation':'GDV_{z-cor}' ,
'GDV_sqeuclidean':'GDV_{eucl^2}', 'GDV_zscore_sqeuclidean':'GDV_{z-eucl^2}',
'GDV_chebyshev' :'GDV_{cheby}' , 'GDV_zscore_chebyshev' :'GDV_{z-cheby}' ,
'GDV_canberra' :'GDV_{can}' , 'GDV_zscore_canberra' :'GDV_{z-can}' ,
'GDV_braycurtis' :'GDV_{bray}' , 'GDV_zscore_braycurtis' :'GDV_{z-bray}' ,
'GDV_mahalanobis':'GDV_{mahala}', 'GDV_zscore_mahalanobis':'GDV_{z-mahala}',
'GCV_euclidean' :'GCV_{eucl}' , 'GCV_zscore_euclidean' :'GCV_{z-eucl}' ,
'GCV_cityblock' :'GCV_{city}' , 'GCV_zscore_cityblock' :'GCV_{z-city}' ,
'GCV_seuclidean' :'GCV_{seucl}' , 'GCV_zscore_seuclidean' :'GCV_{z-seucl}' ,
'GCV_cosine' :'GCV_{cos}' , 'GCV_zscore_cosine' :'GCV_{z-cos}' ,
'GCV_correlation':'GCV_{cor}' , 'GCV_zscore_correlation':'GCV_{z-cor}' ,
'GCV_sqeuclidean':'GCV_{eucl^2}', 'GCV_zscore_sqeuclidean':'GCV_{z-eucl^2}',
'GCV_chebyshev' :'GCV_{cheby}' , 'GCV_zscore_chebyshev' :'GCV_{z-cheby}' ,
'GCV_canberra' :'GCV_{can}' , 'GCV_zscore_canberra' :'GCV_{z-can}' ,
'GCV_braycurtis' :'GCV_{bray}' , 'GCV_zscore_braycurtis' :'GCV_{z-bray}' ,
'GCV_mahalanobis':'GCV_{mahala}', 'GCV_zscore_mahalanobis':'GCV_{z-mahala}',}
# -
# # Cluster independent variables
# +
annotation_df = pd.read_csv(f"{ANNOTATIONS_DIRECTORY}/BioGRID-SGD_CC_sc.csv")
GO_population = {go_id for go_id in set(annotation_df.GO_ID)
if 5 <= len(annotation_df[annotation_df.GO_ID == go_id]) <= 500}
annotation_df = annotation_df[annotation_df.GO_ID.isin(GO_population)]
# annotation_df = annotation_df[annotation_df.Level > -1]
# GO_population = set(annotation_df.GO_ID)
# Conversion dictionaries
int2GO = dict(enumerate(GO_population))
GO2int = dict(zip(int2GO.values(), int2GO.keys()))
GO2genes = {go_id:set(annotation_df.Systematic_ID[annotation_df.GO_ID == go_id])
for go_id in GO_population}
gene2GO = {gene :set(annotation_df.GO_ID[annotation_df.Systematic_ID == gene])
for gene in set(annotation_df.Systematic_ID)}
# -
# ### Preparation
# Let $N$ be the number of genes in the PPI.
# Each GO-term defines a 'state' in which $K$ proteins are annotated with this term; these are seen a _successes_.
# A given cluster defines an 'experiment', in which the number of draws, $n$, corresponds to the length of the cluster.
# The number of _successful draws_ $k$ corresponds to the number of annotated genes in the given cluster.
# +
# List of success states
list_of_success_states = list(GO2genes.values())
# This will be our K, see below. Reshped to fit the shape of k 'array_of_observed_successes'
array_of_total_successes = np.array(list(map(len,list_of_success_states))).reshape(-1,1)
# -
# ### Here we GO
# +
MIN_CLUSTERS = 2
MAX_CLUSTERS = 20
alpha = [0.01, 0.05, 0.1]
hc_cluster_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
mc_cluster_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
lc_cluster_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
hc_GO_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
mc_GO_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
lc_GO_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
hc_gene_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
mc_gene_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
lc_gene_coverage = defaultdict(partial(np.ndarray, MAX_CLUSTERS-MIN_CLUSTERS))
# -
all_distances = ['euclidean', 'cityblock', 'seuclidean', 'sqeuclidean',
'cosine', 'correlation', 'chebyshev', 'canberra',
'braycurtis', 'mahalanobis']
ALL_distances = [A+B for (A,B) in product(['GDV_', 'GDV_zscore_', 'GCV_', 'GCV_zscore_'], all_distances)]
# +
METHOD = "kmedoids"
for distance in ['tvd0123']:
print(distance)
MATRIX_NAME = f"sc_BioGRID_{distance}"
t1 = time.time()
for i, n_clusters in enumerate(range(2, MAX_CLUSTERS)):
with open(f"{CLUSTERS_DIRECTORY}/{METHOD}/{MATRIX_NAME}_{n_clusters}.txt", 'r') as f:
clusters = list(map(str.split, f))
list_of_experiments = list(map(set,clusters))
# For each GO term and cluster we get an experiment
array_of_observed_successes = np.array([[len(draws & success_states) for draws in list_of_experiments]
for success_states in list_of_success_states])
N = sum(map(len,clusters)) # PPI size, i.e. number of all genes that appear in a cluster
K = array_of_total_successes # defined in section 'Preparation'
n = list(map(len, clusters)) # cluster lengths
k = array_of_observed_successes
# scipy has a really messed up nomeclature...
p_values_array = 1-hypergeom.cdf(k=k-1, M=N, N=n, n=K)
p_values_df = pd.DataFrame(p_values_array, index=GO_population)
GO_index = p_values_df.index
m = p_values_array.size
hc_enrichment_df = p_values_df < alpha[0]/m
mc_enrichment_df = p_values_df < alpha[1]/m
lc_enrichment_df = p_values_df < alpha[2]/m
# Calculate cluster coverage
hc_cluster_coverage[distance][i] = sum(hc_enrichment_df.any())/n_clusters
mc_cluster_coverage[distance][i] = sum(mc_enrichment_df.any())/n_clusters
lc_cluster_coverage[distance][i] = sum(lc_enrichment_df.any())/n_clusters
# Calculate GO-term coverage
hc_GO_coverage[distance][i] = sum(hc_enrichment_df.any(axis=1))/len(GO_population)
mc_GO_coverage[distance][i] = sum(mc_enrichment_df.any(axis=1))/len(GO_population)
lc_GO_coverage[distance][i] = sum(lc_enrichment_df.any(axis=1))/len(GO_population)
# Calculate gene coverage
hc_gene_coverage[distance][i] = sum(1 for (i, cluster) in enumerate(clusters) for gene in cluster
if gene2GO.get(gene, set()) & set(GO_index[hc_enrichment_df[i]]))/N
mc_gene_coverage[distance][i] = sum(1 for (i, cluster) in enumerate(clusters) for gene in cluster
if gene2GO.get(gene, set()) & set(GO_index[mc_enrichment_df[i]]))/N
lc_gene_coverage[distance][i] = sum(1 for (i, cluster) in enumerate(clusters) for gene in cluster
if gene2GO.get(gene, set()) & set(GO_index[lc_enrichment_df[i]]))/N
t2 = time.time()
print(f'{n_clusters}: {t2-t1:.2f}sec', end='\r')
# -
list(map(len,clusters)) #tijana
list(map(len,clusters)) #tvd0123
plot_distances = ['tijana', 'tvd0123']
# +
#Cluster coverage
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
Blues = iter(sns.color_palette("Blues",6)[::-1])
Reds = iter(sns.color_palette("Reds", 6)[::-1])
for distance in plot_distances:
if distance.startswith('GDV'):
color = next(Reds)
elif distance.startswith('GCV'):
color = next(Blues)
ax.plot(range(2,MAX_CLUSTERS), mc_cluster_coverage[distance],
label=f'${name2string[distance]}$',
linewidth=2.5,
# color=color,
alpha=0.75
);
ax.fill_between(range(2,MAX_CLUSTERS),
hc_cluster_coverage[distance],
lc_cluster_coverage[distance],
alpha=0.1,
# color=color
);
ax.set_title('Cluster coverage', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/dummy1.png")
# +
#Cluster coverage
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
for distance in plot_distances:
ax.plot(range(2,MAX_CLUSTERS), mc_GO_coverage[distance],
label=f'${name2string[distance]}$',
linewidth=2.5);
ax.fill_between(range(2,MAX_CLUSTERS),
hc_GO_coverage[distance],
lc_GO_coverage[distance],
alpha=0.1);
ax.set_title('GO-term coverage', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/dummy2.png")
# +
#Cluster coverage
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
for distance in plot_distances:
ax.plot(range(2,MAX_CLUSTERS), mc_gene_coverage[distance],
label=f'${name2string[distance]}$',
linewidth=2.5);
ax.fill_between(range(2,MAX_CLUSTERS),
hc_gene_coverage[distance],
lc_gene_coverage[distance],
alpha=0.1);
ax.set_title('gene-term coverage', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/dummy3.png")
# -
| enrichment_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tuning Deep Feature Synthesis
#
# There are several parameters that can be tuned to change the output of DFS. We'll explore these parameters using the following `transactions` EntitySet.
import featuretools as ft
es = ft.demo.load_mock_customer(return_entityset=True)
es
# ## Using "Seed Features"
#
# Seed features are manually defined and problem specific features that a user provides to DFS. Deep Feature Synthesis will then automatically stack new features on top of these features when it can.
#
# By using seed features, we can include domain specific knowledge in feature engineering automation. For the seed feature below, the domain knowlege may be that, for a specific retailer, a transaction above $125 would be considered an expensive purchase.
# +
expensive_purchase = ft.Feature(es["transactions"].ww["amount"]) > 125
feature_matrix, feature_defs = ft.dfs(entityset=es,
target_dataframe_name="customers",
agg_primitives=["percent_true"],
seed_features=[expensive_purchase])
feature_matrix[['PERCENT_TRUE(transactions.amount > 125)']]
# -
# We can now see that the ``PERCENT_TRUE`` primitive was automatically applied to the boolean `expensive_purchase` feature from the `transactions` table. The feature produced as a result can be understood as the percentage of transactions for a customer that are considered expensive.
#
# ## Add "interesting" values to columns
#
# Sometimes we want to create features that are conditioned on a second value before calculations are performed. We call this extra filter a "where clause". Where clauses are used in Deep Feature Synthesis by including primitives in the `where_primitives` parameter to DFS.
#
# By default, where clauses are built using the ``interesting_values`` of a column.
#
# Interesting values can be automatically determined and added for each DataFrame in a pandas EntitySet by calling `es.add_interesting_values()`.
#
# Note that Dask and Koalas EntitySets cannot have interesting values determined automatically for their DataFrames. For those EntitySets, or when interesting values are already known for columns, the `dataframe_name` and `values` parameters can be used to set interesting values for individual columns in a DataFrame in an EntitySet.
values_dict = {'device': ["desktop", "mobile", "tablet"]}
es.add_interesting_values(dataframe_name='sessions', values=values_dict)
# Interesting values are stored in the DataFrame's Woodwork typing information.
es['sessions'].ww.columns['device'].metadata
# Now that interesting values are set for the `device` column in the `sessions` table, we can specify the aggregation primitives for which we want where clauses using the ``where_primitives`` parameter to DFS.
feature_matrix, feature_defs = ft.dfs(entityset=es,
target_dataframe_name="customers",
agg_primitives=["count", "avg_time_between"],
where_primitives=["count", "avg_time_between"],
trans_primitives=[])
feature_matrix
# Now, we have several new potentially useful features. Here are two of them that are built off of the where clause "where the device used was a tablet":
feature_matrix[["COUNT(sessions WHERE device = tablet)", "AVG_TIME_BETWEEN(sessions.session_start WHERE device = tablet)"]]
# The first geature, `COUNT(sessions WHERE device = tablet)`, can be understood as indicating *how many sessions a customer completed on a tablet*.
#
# The second feature, `AVG_TIME_BETWEEN(sessions.session_start WHERE device = tablet)`, calculates *the time between those sessions*.
#
# We can see that customer who only had 0 or 1 sessions on a tablet had ``NaN`` values for average time between such sessions.
#
#
# ## Encoding categorical features
#
# Machine learning algorithms typically expect all numeric data or data that has defined numeric representations, like boolean values corresponding to `0` and `1`. When Deep Feature Synthesis generates categorical features, we can encode them using Featureools.
# +
feature_matrix, feature_defs = ft.dfs(entityset=es,
target_dataframe_name="customers",
agg_primitives=["mode"],
trans_primitives=['time_since'],
max_depth=1)
feature_matrix
# -
# This feature matrix contains 2 columns that are categorical in nature, ``zip_code`` and ``MODE(sessions.device)``. We can use the feature matrix and feature definitions to encode these categorical values into boolean values. Featuretools offers functionality to apply one hot encoding to the output of DFS.
feature_matrix_enc, features_enc = ft.encode_features(feature_matrix, feature_defs)
feature_matrix_enc
# The returned feature matrix is now encoded in a way that is interpretable to machine learning algorithms. Notice how the columns that did not need encoding are still included. Additionally, we get a new set of feature definitions that contain the encoded values.
features_enc
# These features can be used to calculate the same encoded values on new data. For more information on feature engineering in production, read the [Deployment](deployment.ipynb) guide.
| docs/source/guides/tuning_dfs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import relevant packages
import numpy as np
import pandas as pd
import altair as alt
# +
# get our data ready
data = pd.read_csv("data/alldomains_states_data.csv")
data['visit_date'] = pd.to_datetime(data['visit_date'])
# delete the Jervis Bay territory and not set rows
data = data[data.region != '(not set)']
data = data[data.region != 'Jervis Bay Territory']
data = data.reset_index()
data.head()
# +
# scale by population size
populations = {"New South Wales" : 7317500,
"Queensland" : 4599400,
"South Australia" : 1659800,
"Northern Territory" : 231200,
"Victoria" : 5640900,
"Tasmania" : 511000,
"Western Australia" : 2366900,
"Australian Capital Territory" : 366900}
for row in range(len(data)):
pop = populations[data["region"][row]]
data["total_visits"][row] = data["total_visits"][row]/pop*10000
# -
data
# +
chart = alt.Chart(data).mark_area().encode(
alt.X('visit_date:T',title='Date'),
alt.Y('sum(total_visits):Q', stack='center', axis = None, title='Total Visits'),
alt.Color('region:N',
scale=alt.Scale(scheme='category20')
)
).properties(
width=700,
height=600,
title = 'Users by state over a year scaled by state population'
).interactive()
chart.configure_header(
titleColor='grey',
titleFontSize=22,
)
# -
| state_data_streamgraph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="CFQicrAKNKp-"
def create_cubes(n):
list1 = []
for x in range(n):
list1.append(x**3)
return list1
# + colab={"base_uri": "https://localhost:8080/"} id="WPI2fsNVPONP" executionInfo={"status": "ok", "timestamp": 1616790776596, "user_tz": 180, "elapsed": 558, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}} outputId="c27771fd-2e23-48d8-8826-fb899d4b415d"
create_cubes(10) #CREATING A LIST(Storing all the list values!!!)
# + colab={"base_uri": "https://localhost:8080/"} id="LUwUGDqnPXUK" executionInfo={"status": "ok", "timestamp": 1616790794183, "user_tz": 180, "elapsed": 621, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}} outputId="8dcbf3ae-8107-4cb1-b3e0-ad0f55a44d90"
for x in create_cubes(10): #FOR NOT CREATING A LIST(Storing all the list values!!)
print(x)
# + id="QVRZXlLlPoym"
def create_cubes_generated(m):
for z in range(m):
yield z**3 #PRODUÇÃO == YIELD
# + colab={"base_uri": "https://localhost:8080/"} id="wedNDPQBP9Gw" executionInfo={"status": "ok", "timestamp": 1616791025856, "user_tz": 180, "elapsed": 694, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}} outputId="4d81e13b-4943-4f5b-ea1f-7ffc5ffac546"
for z in create_cubes_generated(10): #Generators don´t store a list like the others above!!
print(z)
# + id="yEwA4TeuvjOM" executionInfo={"status": "ok", "timestamp": 1617302664476, "user_tz": 180, "elapsed": 634, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}}
#Fibonacci Generation
def gen_fibon(n):
a=1
b=1
for i in range(n):
yield a
a,b = b,a+b #generating tuple
# + colab={"base_uri": "https://localhost:8080/"} id="_5zWtACAv7mh" executionInfo={"status": "ok", "timestamp": 1617302665559, "user_tz": 180, "elapsed": 486, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}} outputId="8ff52921-de07-4c81-d17b-bacfd10a2435"
for number in gen_fibon(10): #USING YIELD!
print(number)
# + id="EWOVY-FvwWAi"
#USING LISTS!!
def fibon(n):
a=1
b=1
list1 = []
for i in range(n):
list1.append(a)
a,b = b,a+b
return list1[]
# + id="rEIYF0Fyw3bG" executionInfo={"status": "ok", "timestamp": 1617303065660, "user_tz": 180, "elapsed": 689, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}}
#NEXT function
def simple_gen():
for x in range(3):
yield x
# + colab={"base_uri": "https://localhost:8080/"} id="DRVQ-oPUxlXr" executionInfo={"status": "ok", "timestamp": 1617303115514, "user_tz": 180, "elapsed": 611, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}} outputId="eff05f8d-18b5-4d86-fbba-26dcdd25bbca"
for number in simple_gen():
print(number)
# + colab={"base_uri": "https://localhost:8080/"} id="YX9u5OVYxzgy" executionInfo={"status": "ok", "timestamp": 1617303162751, "user_tz": 180, "elapsed": 622, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}} outputId="a1536383-7f0a-45f2-a08d-f0e7a13d1e89"
g = simple_gen()
print(next(g)) #NEXT iterates through the yield function
print(next(g))
# + colab={"base_uri": "https://localhost:8080/"} id="W5wlHS80ygTr" executionInfo={"status": "ok", "timestamp": 1617303588515, "user_tz": 180, "elapsed": 650, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}} outputId="6f70cec4-837a-438b-b81d-afc1d549d0d2"
#ITER function
#It makes the satatement and convert to an iterator func.
#next(s) will NOT work because it´s not iterable!
s = "hello"
for letter in s:
print(letter)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="NH-Nt8HszY0w" executionInfo={"status": "ok", "timestamp": 1617303590606, "user_tz": 180, "elapsed": 808, "user": {"displayName": "<NAME>\u00e7alves", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiJHEOifHSFrMCFSRzkvII3hRu6pdUTDDpI_GipLjU=s64", "userId": "16114882800029312634"}} outputId="039fe66e-65fa-42a1-cefc-8c52e984a71c"
s_iterated = iter(s) #Here we had to convert s!!
next(s_iterated)
| Python Notebooks/Generators - Bootcamp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["header"]
# <table width="100%">
# <tr style="border-bottom:solid 2pt #009EE3">
# <td class="header_buttons">
# <a href="unit_conversion_ecg.zip" download><img src="../../images/icons/download.png" alt="biosignalsnotebooks | download button"></a>
# </td>
# <td class="header_buttons">
# <a href="https://mybinder.org/v2/gh/biosignalsplux/biosignalsnotebooks/mybinder_complete?filepath=biosignalsnotebooks_environment%2Fcategories%2FPre-Process%2Funit_conversion_ecg.dwipynb" target="_blank"><img src="../../images/icons/program.png" alt="biosignalsnotebooks | binder server" title="Be creative and test your solutions !"></a>
# </td>
# <td></td>
# <td class="header_icons">
# <a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/home.png" alt="biosignalsnotebooks | home button"></a>
# </td>
# <td class="header_icons">
# <a href="../MainFiles/contacts.ipynb"><img src="../../images/icons/contacts.png" alt="biosignalsnotebooks | contacts button"></a>
# </td>
# <td class="header_icons">
# <a href="https://github.com/biosignalsplux/biosignalsnotebooks" target="_blank"><img src="../../images/icons/github.png" alt="biosignalsnotebooks | github button"></a>
# </td>
# <td class="header_logo">
# <img src="../../images/ost_logo.png" alt="biosignalsnotebooks | project logo">
# </td>
# </tr>
# </table>
# + [markdown] tags=["intro_info_title"]
# <link rel="stylesheet" href="../../styles/theme_style.css">
# <!--link rel="stylesheet" href="../../styles/header_style.css"-->
# <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
#
# <table width="100%">
# <tr>
# <td id="image_td" width="15%" class="header_image_color_4"><div id="image_img" class="header_image_4"></div></td>
# <td class="header_text">ECG Sensor - Unit Conversion </td>
# </tr>
# </table>
# + [markdown] tags=["intro_info_tags"]
# <div id="flex-container">
# <div id="diff_level" class="flex-item">
# <strong>Difficulty Level:</strong> <span class="fa fa-star checked"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# </div>
# <div id="tag" class="flex-item-tag">
# <span id="tag_list">
# <table id="tag_list_table">
# <tr>
# <td class="shield_left">Tags</td>
# <td class="shield_right" id="tags">pre-process☁ecg☁conversion</td>
# </tr>
# </table>
# </span>
# <!-- [OR] Visit https://img.shields.io in order to create a tag badge-->
# </div>
# </div>
# -
# The <strong><span class="color2">OpenSignals</span></strong> outputted file formats contain raw data, so each sample has a digital unit.
#
# In scientific terms it is recommended the use of specific units, like electric tension (V) or electric current (A).
# Each sensor that <strong>PLUX</strong> commercialise has a datasheet where a transfer function is mentioned for unit conversion be done.
#
# The next lines are intended to explain how this conversion can be programmatically done.
#
# In spite of the unit conversion procedure has some common steps applicable to all sensors, the current <strong><span class="color5">Jupyter Notebook</span></strong> is dedicated to the unit conversion procedure of signals acquired with ECG sensor.
# <hr>
# <p class="steps">1 - Importation of the needed packages</p>
# + tags=["hide_out"]
# biosignalsnotebooks Python package with useful functions that support and complement the available Notebooks
import biosignalsnotebooks as bsnb
# Function used for creating a numpy array, where a mathematical operation can be applied to each entry in
# an easy and automatic way. On the other side, linspace, here will be used for generation of a time-axis.
from numpy import array, linspace
# -
# <p class="steps">2 - Download of the sensor datasheet (from <a href="https://www.biosignalsplux.com/index.php/learn/documentation">https://www.biosignalsplux.com/index.php/learn/documentation</a>).</p>
# In this case we are working with ECG, being our file located at <a href="http://www.biosignalsplux.com/datasheets/ECG_Sensor_Datasheet.pdf">http://www.biosignalsplux.com/datasheets/ECG_Sensor_Datasheet.pdf</a>
# + tags=["hide_in"]
# Embedding of .pdf file
from IPython.display import IFrame
IFrame(src="../../images/pre-process/unit_conversion_ecg/ECG_Sensor_Datasheet.pdf#page=2", width="100%", height="350")
# -
# <p class="steps">3 - Extraction of the transfer function from the beginning of the second page</p>
# \begin{equation}
# ECG_{mV} = \frac{(\frac{ADC}{2^n} - \frac{1}{2}).VCC}{G_{ECG}}
# \end{equation}
#
# <table width="100%">
# <tr>
# <td width="33%" style="text-align:left;vertical-align:top">$ECG_{mV}$ - ECG value in mV</td>
# <td width="33%" style="text-align:left;vertical-align:top">$ADC$ - Digital value sampled from the channel (initialism of "Analog to Digital Converter")</td>
# <td width="33%" style="text-align:left;vertical-align:top">$n$ - Number of bits of the channel (dependent on the chosen resolution specified on <span class="color2">OpenSignals</span> previously to the acquisition stage [8, 12 or 16 bits])</td>
# </tr>
# <tr>
# <td style="text-align:left;vertical-align:top">$VCC$ - 3000 mV</td>
# <td style="text-align:left;vertical-align:top">$G_{ECG}$ - 1000</td>
# <td></td>
# </tr>
# </table>
# <p class="steps">4 - Loading of data stored in <strong><span class="color2">biosignalsnotebooks</span></strong> own signal library</p>
# Data loading
data, header = bsnb.load_signal("ecg_sample", get_header=True)
# In the following cell, some relevant information is stored inside variables. This relevant information includes the mac-address of the device, channel number and signal acquisition parameters such as resolution and sampling rate.
#
# For a detailed explanation of how to access this info, the <a href="../Load/signal_loading_preparatory_steps.ipynb" target="_blank">"Signal Loading - Working with File Header"<img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> Notebook should be consulted.
ch = "CH1" # Channel
sr = 200 # Sampling rate
resolution = 16 # Resolution (number of available bits)
# Access to acquired signal samples and storage inside a new variable.
signal = data[ch]
# <p class="steps">5 - Final unit conversion (to <span class="color5">mV</span>) by applying the transfer function sample by sample</p>
# Definition of $VCC$ and $G_{ECG}$ constants
vcc = 3000 # mV
gain = 1000
signal_mv = (((array(signal) / 2**resolution) - 0.5) * vcc) / gain
# <p class="steps">6 - Time axis generation</p>
time = bsnb.generate_time(signal_mv, sr)
# Comparison between RAW and mV signal.
# + tags=["hide_in"]
bsnb.plot([time, time], [signal, signal_mv], y_axis_label=["Raw Data", "Electric Voltage (mV)"],
grid_lines=1, grid_columns=2, grid_plot=True)
# -
# <span class="color2">Similar Notebooks</span>, dedicated to the unit conversion of other sensors, are available in the following "conversion" section of <a href="../MainFiles/by_tag.ipynb">Notebooks Grouped by Tag page <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>
#
# A list of PLUX's available sensors (and the respective datasheets) can be accessed at <a href="http://www.biosignalsplux.com/en/products/sensors">Sensors page <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a>.
#
# <strong><span class="color7">We hope that you have enjoyed this guide. </span><span class="color2">biosignalsnotebooks</span><span class="color4"> is an environment in continuous expansion, so don't stop your journey and learn more with the remaining <a href="../MainFiles/biosignalsnotebooks.ipynb">Notebooks <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a></span></strong> !
# + [markdown] tags=["footer"]
# <hr>
# <table width="100%">
# <tr>
# <td class="footer_logo">
# <img src="../../images/ost_logo.png" alt="biosignalsnotebooks | project logo [footer]">
# </td>
# <td width="40%" style="text-align:left">
# <a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf" target="_blank">☌ Project Presentation</a>
# <br>
# <a href="https://github.com/biosignalsplux/biosignalsnotebooks" target="_blank">☌ GitHub Repository</a>
# <br>
# <a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">☌ How to install biosignalsnotebooks Python package ?</a>
# <br>
# <a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/signal_samples.ipynb">☌ Signal Library</a>
# </td>
# <td width="40%" style="text-align:left">
# <a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/biosignalsnotebooks.ipynb">☌ Notebook Categories</a>
# <br>
# <a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_diff.ipynb">☌ Notebooks by Difficulty</a>
# <br>
# <a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_signal_type.ipynb">☌ Notebooks by Signal Type</a>
# <br>
# <a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_tag.ipynb">☌ Notebooks by Tag</a>
# </td>
# </tr>
# </table>
# + tags=["hide_both"]
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
# + tags=["hide_both"] language="html"
# <script>
# // AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
# require(
# ['base/js/namespace', 'jquery'],
# function(jupyter, $) {
# $(jupyter.events).on("kernel_ready.Kernel", function () {
# console.log("Auto-running all cells-below...");
# jupyter.actions.call('jupyter-notebook:run-all-cells-below');
# jupyter.actions.call('jupyter-notebook:save-notebook');
# });
# }
# );
# </script>
| notebookToHtml/biosignalsnotebooks_html_publish/Categories/Pre-Process/unit_conversion_ecg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Kaggle Competition for House Prices: Advanced Regression Techniques
# This code is from <NAME> GitHub
# +
# Import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# to add plots to your Jupyter notebook we use the next command
# # %matplotlib inline
pd.set_option('display.max_rows', 90) # by default is 10, if change to None print ALL
pd.set_option('display.max_columns', 90) # by default is 10, if change to None print ALL
# Remeber in Mac Esc+a Esc+b Esc+d+d
# +
# More libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
import plotly.offline as py
from plotly import tools
import plotly.figure_factory as ff
py.init_notebook_mode(connected=True)
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.linear_model import Ridge
import lightgbm as lgb
import warnings
warnings.filterwarnings('ignore')
plt.style.use('ggplot')
seed = 4432
# -
df=pd.read_csv('train.csv')
# df.shape
df.head()
df.isnull().sum()
train = df
df['MSZoning'].value_counts()
# Heat map of all the features and their number of missing values
sns.heatmap(df.isnull(),yticklabels=False,cbar=False)
# +
# Another way to see the null values
no_missing_col = [c for c in train.columns if train[c].isnull().sum() ==0]
missing_col = [c for c in train.columns if train[c].isnull().sum() >0]
print(f'Missing value in {len(missing_col)} columns and no missing value in {len(no_missing_col)} columns')
missing = train[missing_col].isnull().sum()
plt.figure(figsize=(14,6))
sns.barplot(x = missing.index, y = missing.values)
plt.xticks(rotation=90);
# -
df.info()
# +
no_missing_col = [c for c in test.columns if test[c].isnull().sum() ==0]
missing_col = [c for c in test.columns if test[c].isnull().sum() >0]
print(f'Missing value in {len(missing_col)} columns and no missing value in {len(no_missing_col)} columns')
missing = test[missing_col].isnull().sum()
plt.figure(figsize=(14,6))
sns.barplot(x = missing.index, y = missing.values)
plt.xticks(rotation=90);
# -
# # Missing values
df['LotFrontage']=df['LotFrontage'].fillna(df['LotFrontage'].mean())
df.drop(['Alley'],axis=1,inplace=True)
df['BsmtCond']=df['BsmtCond'].fillna(df['BsmtCond'].mode()[0])
df['BsmtQual']=df['BsmtQual'].fillna(df['BsmtQual'].mode()[0])
df['FireplaceQu']=df['FireplaceQu'].fillna(df['FireplaceQu'].mode()[0])
df['GarageType']=df['GarageType'].fillna(df['GarageType'].mode()[0])
df.drop(['GarageYrBlt'],axis=1,inplace=True)
df['GarageFinish']=df['GarageFinish'].fillna(df['GarageFinish'].mode()[0])
df['GarageQual']=df['GarageQual'].fillna(df['GarageQual'].mode()[0])
df['GarageCond']=df['GarageCond'].fillna(df['GarageCond'].mode()[0])
df.drop(['PoolQC','Fence','MiscFeature'],axis=1,inplace=True)
df.shape
df.drop(['Id'],axis=1,inplace=True)
df.isnull().sum()
df['MasVnrType']=df['MasVnrType'].fillna(df['MasVnrType'].mode()[0])
df['MasVnrArea']=df['MasVnrArea'].fillna(df['MasVnrArea'].mode()[0])
sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='coolwarm')
df['BsmtExposure']=df['BsmtExposure'].fillna(df['BsmtExposure'].mode()[0])
sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='YlGnBu')
df['BsmtFinType2']=df['BsmtFinType2'].fillna(df['BsmtFinType2'].mode()[0])
df.dropna(inplace=True)
df.shape
df.head()
df.isnull().sum()
# +
##HAndle Categorical Features
# -
columns=['MSZoning','Street','LotShape','LandContour','Utilities','LotConfig','LandSlope','Neighborhood',
'Condition2','BldgType','Condition1','HouseStyle','SaleType',
'SaleCondition','ExterCond',
'ExterQual','Foundation','BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2',
'RoofStyle','RoofMatl','Exterior1st','Exterior2nd','MasVnrType','Heating','HeatingQC',
'CentralAir',
'Electrical','KitchenQual','Functional',
'FireplaceQu','GarageType','GarageFinish','GarageQual','GarageCond','PavedDrive']
len(columns)
def category_onehot_multcols(multcolumns):
df_final=final_df
i=0
for fields in multcolumns:
print(fields)
df1=pd.get_dummies(final_df[fields],drop_first=True)
final_df.drop([fields],axis=1,inplace=True)
if i==0:
df_final=df1.copy()
else:
df_final=pd.concat([df_final,df1],axis=1)
i=i+1
df_final=pd.concat([final_df,df_final],axis=1)
return df_final
main_df=df.copy()
# +
## Combine Test Data
test_df=pd.read_csv('formulatedtest.csv')
# -
test_df.shape
test_df.head()
final_df=pd.concat([df,test_df],axis=0)
final_df['SalePrice']
final_df.shape
final_df=category_onehot_multcols(columns)
final_df.shape
final_df =final_df.loc[:,~final_df.columns.duplicated()]
final_df.shape
final_df
df_Train=final_df.iloc[:1422,:]
df_Test=final_df.iloc[1422:,:]
df_Train.head()
df_Test.head()
df_Train.shape
df_Test.drop(['SalePrice'],axis=1,inplace=True)
X_train=df_Train.drop(['SalePrice'],axis=1)
y_train=df_Train['SalePrice']
# # Ridge Regression
# +
from sklearn.linear_model import Ridge
from sklearn.model_selection import GridSearchCV
ridge=Ridge()
parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100]}
ridge_regressor = GridSearchCV(ridge,parameters,scoring='neg_mean_squared_error',cv=5)
ridge_regressor.fit(X_train,y_train)
# -
print(ridge_regressor.best_params_)
print(ridge_regressor.best_score_)
# Convert the results of CV into a dataframe
results = pd.DataFrame(ridge_regressor.cv_results_)[['params', 'mean_test_score', 'rank_test_score']]
results.sort_values('rank_test_score')
# # Lasso Regression
# +
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
lasso=Lasso()
parameters={'alpha':[1e-15,1e-10,1e-8,1e-3,1e-2,1,5,10,20,30,35,40,45,50,55,100,200,300,400,1000]}
lasso_regressor=GridSearchCV(lasso,parameters,scoring='neg_mean_squared_error',cv=5)
lasso_regressor.fit(X_train,y_train)
print(lasso_regressor.best_params_)
print(lasso_regressor.best_score_)
# -
# Convert the results of CV into a dataframe
results = pd.DataFrame(lasso_regressor.cv_results_)[['params', 'mean_test_score', 'rank_test_score']]
results.sort_values('rank_test_score')
print('Predictions with Polynomial Regression')
print('MAE:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# ## Prediciton and selecting the Algorithm
import xgboost
classifier=xgboost.XGBRegressor()
import xgboost
regressor=xgboost.XGBRegressor()
booster=['gbtree','gblinear']
base_score=[0.25,0.5,0.75,1]
# +
## Hyper Parameter Optimization
n_estimators = [100, 500, 900, 1100, 1500]
max_depth = [2, 3, 5, 10, 15]
booster=['gbtree','gblinear']
learning_rate=[0.05,0.1,0.15,0.20]
min_child_weight=[1,2,3,4]
# Define the grid of hyperparameters to search
hyperparameter_grid = {
'n_estimators': n_estimators,
'max_depth':max_depth,
'learning_rate':learning_rate,
'min_child_weight':min_child_weight,
'booster':booster,
'base_score':base_score
}
# -
# Set up the random search with 4-fold cross validation
random_cv = RandomizedSearchCV(estimator=regressor,
param_distributions=hyperparameter_grid,
cv=5, n_iter=50,
scoring = 'neg_mean_absolute_error',n_jobs = 4,
verbose = 5,
return_train_score = True,
random_state=42)
random_cv.fit(X_train,y_train)
random_cv.best_estimator_
random_cv.best_estimator_
regressor=xgboost.XGBRegressor(base_score=0.25, booster='gbtree', colsample_bylevel=1,
colsample_bytree=1, gamma=0, learning_rate=0.1, max_delta_step=0,
max_depth=2, min_child_weight=1, missing=None, n_estimators=900,
n_jobs=1, nthread=None, objective='reg:linear', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=True, subsample=1)
regressor.fit(X_train,y_train)
import pickle
filename = 'finalized_model.pkl'
pickle.dump(classifier, open(filename, 'wb'))
df_Test.drop(['SalePrice'],axis=1,inplace=True)
df_Test.shape
df_Test.head()
df_Test.drop(['SalePrice'],axis=1).head()
y_pred=regressor.predict(df_Test.drop(['SalePrice'],axis=1))
y_pred
##Create Sample Submission file and Submit using ANN
pred=pd.DataFrame(ann_pred)
sub_df=pd.read_csv('sample_submission.csv')
datasets=pd.concat([sub_df['Id'],pred],axis=1)
datasets.columns=['Id','SalePrice']
datasets.to_csv('sample_submission.csv',index=False)
# ## Step2
pred.columns=['SalePrice']
temp_df=df_Train['SalePrice'].copy()
temp_df.column=['SalePrice']
df_Train.drop(['SalePrice'],axis=1,inplace=True)
df_Train=pd.concat([df_Train,temp_df],axis=1)
df_Test.head()
df_Test=pd.concat([df_Test,pred],axis=1)
df_Train=pd.concat([df_Train,df_Test],axis=0)
df_Train.shape
X_train=df_Train.drop(['SalePrice'],axis=1)
y_train=df_Train['SalePrice']
# ## Artificial Neural Network Implementation
# +
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LeakyReLU,PReLU,ELU
from keras.layers import Dropout
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(output_dim = 50, init = 'he_uniform',activation='relu',input_dim = 174))
# Adding the second hidden layer
classifier.add(Dense(output_dim = 25, init = 'he_uniform',activation='relu'))
# Adding the third hidden layer
classifier.add(Dense(output_dim = 50, init = 'he_uniform',activation='relu'))
# Adding the output layer
classifier.add(Dense(output_dim = 1, init = 'he_uniform'))
# Compiling the ANN
classifier.compile(loss=root_mean_squared_error, optimizer='Adamax')
# Fitting the ANN to the Training set
model_history=classifier.fit(X_train.values, y_train.values,validation_split=0.20, batch_size = 10, nb_epoch = 1000)
# -
ann_pred=classifier.predict(df_Test.drop(['SalePrice'],axis=1).values)
from keras import backend as K
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
| 3. Linear Non-linear regression and its generalizations/Linear Regression/Kaggle-Competitions-master/Advance House PRice PRediction/Final Projects Kaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python3
# ---
# ## Geometry Optimization for `rdkit`
# ### Demonstrates the capacity to optimize the geometry of a molecule.
# - Remember to have PyMol running in the background
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem.Draw.MolDrawing import MolDrawing, DrawingOptions
ibu = Chem.MolFromSmiles('CC(C)Cc1ccc(cc1)C(C)C(=O)O')
ibu
ibuH = Chem.AddHs(ibu)
ibuH
AllChem.EmbedMolecule(ibuH)
ibuH
from rdkit.Chem import PyMol
v = PyMol.MolViewer()
v.ShowMol(ibuH)
v.server.do('ray')
v.GetPNG()
AllChem.MMFFOptimizeMolecule(ibuH)
v.ShowMol(ibuH,name='optimized',showOnly=True);
v.server.do('ray')
v.GetPNG()
| Notebooks/RDkit_PyMol.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda3]
# language: python
# name: conda-env-anaconda3-py
# ---
# Let's look at some iron ore data
# +
import pandas
import seaborn
import matplotlib.pyplot as plt
import os
import numpy as np
seaborn.set()
df = pandas.read_csv(os.path.abspath('iron_ore_study.csv'))
# Splits from oscar Fe>60%, SiO2<9, Al2O3<2, P<0.08
split_points = [
('FE', 60, [False, True]),
('SIO2', 9, [True, False]),
('AL2O3', 2, [True, False]),
('P', 0.08, [True, False]),
]
# It's ore if everything is True
df['is_ore'] = np.vstack([
pandas.cut(df[elem], bins=[0, split, 100], labels=is_ore)
for elem, split, is_ore in split_points
]).sum(axis=0) == 4
# Take a look
seaborn.pairplot(df.iloc[::5], hue='is_ore', plot_kws={'alpha': 0.5})
# -
seaborn.countplot('is_ore', data=df)
# Plot our x data
seaborn.jointplot('SIO2', 'FE', df, joint_kws={'alpha': 0.4, 'marker': '.'})
seaborn.jointplot('P', 'FE', df, joint_kws={'alpha': 0.4, 'marker': '.'})
# ## Logistic regression
#
# You can think of the logistic function as a function that takes a real number (as comes out of the linear regression) and 'squashes' it into a 0, 1 label. It's defined as
#
# $$
# g(z) = \frac{1}{1 + e^{-z}}
# $$
# +
def logistic(z):
return 1 / (1 + np.exp(-z))
zs = np.linspace(-10, 10)
logistic_data = pandas.DataFrame(
{'z': zs, 'logistic': logistic(zs)}
)
logistic_data.plot('z', 'logistic')
# -
# If we can generate a linear model fit with one parameter - call it $f$:
#
# $$
# z = f(x) = a + b x
# $$
#
# where $a$ is the intercept, $b$ the coefficient and $x$ is the input features. Then we get label predictions
#
# $$
# \mathrm{label} = g(f(x)) > threshold
# $$
# ## Problem
#
# Let's pretend we have a crappy sensor which only measures Al. Can we still make good predictions of ore/not ore using just this feature?
#
# We should look at transforming our aluminium data so that we go from (0, inf) -> (-inf, inf). We'll do this in a hacky sense by using a log function but we should really use a log-ratio transform here!
seaborn.distplot(df['AL2O3'])
seaborn.distplot(np.log(df['AL2O3']))
# We'll do this using a scikit-learn pipeline - this lets us chain transformations and predictions into one object which makes life a lot easier.
# +
from sklearn import preprocessing, pipeline, linear_model
# Make up our pipeline where we transform the aluminium first to make it more gaussian!
regressor = pipeline.Pipeline([
('transform', preprocessing.FunctionTransformer(np.log, validate=True)),
('model', linear_model.LogisticRegression())
])
# -
# Next we map the data into the `y ~ f(X)` format that scikit-learn wants
X = df[['AL2O3']]
y = df['is_ore']
# Now fitting the model is as simple as
regressor.fit(X, y)
# Once we've fitted the model we can make predictions straight away
predict_df = pandas.DataFrame(
{'test_al2o3_values': [0.5, 1, 2, 3, 4]} # Are these values ore?
)
regressor.predict(predict_df)
# To see what's going on in a bit more depth, we can pull the coefficients out of the scikit-learn pipeline
model = regressor.named_steps.model
model.intercept_, model.coef_
# and rewrite our logistic function to include the linear model
def logistic(regressor, X):
"""
Plot our logistic model given input values x
We're doing this so that we can see the output of the logistic function - normally
you'd just do `regressor.predict(x)` to get actual 1, 0 labels for your data.
Parameters:
regressor - a fitted logistic regression pipeline
x - the values to evaulate the function at
"""
# We can pull the model and transforms from our pipeline
model = regressor.named_steps.model
tf = regressor.named_steps.transform
# Next we replay the steps in the pipeline to make a prediction
z = model.intercept_ + model.coef_[0][0] * tf.transform(X)
return 1 / (1 + np.exp(-z)).ravel()
# Now we can evaulate our logistic function for our test values
logistic(regressor, predict_df)
# With these in hand lets generate some plots
# +
f, ax = plt.subplots(1, 1)
# Some aluminium values to predict from
al_compositions = pandas.DataFrame(
{'test_al2o3_values': np.linspace(0.1, 3)}
)
# An offset to stop everything plotting on top of everything else
offset = 0.02
# shows predictions given contents
predictions = regressor.predict(al_compositions)
ax.plot(al_compositions, predictions + offset, '.', alpha=0.7, label='predicted (+ offset)')
# shows measured values plus jitter
jitter = np.random.normal(scale=0.01, size=len(df))
ax.plot(df['AL2O3'], df['is_ore'] + jitter - offset, '.', alpha=0.1, label='measured (+ jitter - offset)')
# shows logistic function fitted from regressor
ax.plot(al_compositions, logistic(regressor, al_compositions), '--', label='fitted logistic function')
# Generate the logistic curve showing the location of
ax.set_ylim(-0.1, 1.1)
ax.legend()
ax.set_title('Logistic regression with scikit-learn')
f.tight_layout()
# -
# Try this using one of the other variables!
# ## Measuring model performance
#
# We don't get everything right! How can we get a feeling for the model performance? What are some of the issues that we might need to take into account?
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.33)
# -
# Now we can train on just the training set, predict on the test set and see how we do!
regressor.fit(X_train, y_train)
y_predict = regressor.predict(X_test)
results = pandas.DataFrame({
'AL2O3': X_test['AL2O3'],
'is_ore_actual': y_test,
'is_ore_predicted': y_predict,
'count': 1
})
results.head()
# Now we can see the number of false positives and false negatives using pivot_table from last week
confusion = results.pivot_table(values='count', index='is_ore_actual', columns='is_ore_predicted', aggfunc='sum')
confusion
# and plot with seaborn
seaborn.heatmap(confusion, cmap='Reds')
# Alternatively we can unstack the array into true and false negatives and positives
confusion.unstack()
# And we can convert these values to fractions of the total
true_neg, false_neg, true_pos, false_pos = confusion.unstack() / confusion.unstack().sum()
true_neg, false_neg, true_pos, false_pos
# When are true positives and false positives important?
#
# ## Threshold
#
# We haven't done anything with the threshold yet - how should we pick the value for this?
#
# We've already got a confusion matrix - we can take the ratio of the true_positive vs the false_positive rates and compare the two
confusion
correct = np.diag(confusion)
incorrect = np.diag(np.roll(confusion, 1, axis=1))
correct, incorrect
total_correct = correct.sum()
total_incorrect = incorrect.sum()
total_correct, total_incorrect
# And we can plot this for each threshold. We need a way of adjusting the class weights in the model. Scikit-learn doesn't let you specify a threshold directly but
def fit_with_class_threshold(threshold):
"Fit a logistic regression to get an ROC value for a given threshold"
# Transform our threshold into class weights
class_weights = {True: threshold, False: 1 - threshold}
# Make a regressor
regressor = pipeline.Pipeline([
('transform', preprocessing.FunctionTransformer(np.log, validate=True)),
('model', linear_model.LogisticRegression(class_weight=class_weights))
])
# Fit it
regressor.fit(X_train, y_train)
# Make some predictions, see how we did
results = pandas.DataFrame({
'AL2O3': X_test['AL2O3'],
'is_ore_actual': y_test,
'is_ore_predicted': regressor.predict(X_test),
'count': 1
})
confusion = results.pivot_table(
values='count',
index='is_ore_actual',
columns='is_ore_predicted',
aggfunc='sum')
true_neg, false_neg, true_pos, false_pos = confusion.unstack()
correct = np.diag(confusion)
incorrect = np.diag(np.roll(confusion, 1, axis=1))
# Return results as a dictionary
return {
'threshold': threshold,
'total_correct': correct.sum(),
'total_incorrect': incorrect.sum(),
'true_negative': true_neg,
'false_negative': false_neg,
'true_positive': true_pos,
'false_positive': false_pos
}
# Now we can generate results using our metric
fit_with_class_threshold(0.1)
fit_with_class_threshold(0.9)
# Now we can iterate over all our thresholds and see what does the best
results = pandas.DataFrame.from_records(
[fit_with_class_threshold(t) for t in np.linspace(0.1, 0.9)],
index='threshold'
)
results.head()
# We'll generate a few plots
results.plot(y=['total_correct', 'total_incorrect'])
results.plot(y=['false_negative', 'true_negative'])
results.plot(y=['false_positive', 'true_positive'])
# ## Extension - modelling with statsmodels
#
# For what it's worth we can also generate these using statsmodels
import statsmodels.api as sm
from numpy import log
# We have to handle preprocessing ourselves
# +
from statsmodels.tools import add_constant
def preprocess(x):
"Our preprocessing pipeline for Al2O3"
return add_constant(np.log(x)) # add_constant adds an intercept to the fit
# -
# Statsmodels uses stats jargon
# - endog -> endogenous variable -> y
# - exog -> exogenous variable -> X
endog = df.is_ore
exog = preprocess(df['AL2O3'])
# Fitting the model is pretty similar though
model = sm.Logit(endog, exog)
results = model.fit()
results.summary()
# Statsmodels does a bit more statistical testing/automated confidence intervals for us at the cost of having to manage crossvalidation etc ourselves. Depending on what you're trying to achieve this could be a viable way to go.
#
# As before we can immediately make some predictions - statsmodels gives us the value of the logistic function
preds = model.predict(results.params, preprocess(test_al_values))
preds
preds > 0.5
# and compare to scikit-learn
regressor.predict(test_al_values.reshape(-1, 1))
# We can also compare the logistic values get slightly different answers here - probably down to the solver used under the hood
print(' statsmodels:', model.predict(results.params, preprocess(test_al_values)))
print('scikit-learn:', logistic(regressor, test_al_values))
# We can generate the same plot again though
# +
f, ax = plt.subplots(1, 1)
# Some aluminium values to predict from
al_compositions = np.linspace(0.1, 3)
# An offset to stop everything plotting on top of everything else
offset = 0.02
# shows predictions given contents
predictions = model.predict(results.params, preprocess(al_compositions)) > 0.5
ax.plot(al_compositions, predictions + offset, '.', alpha=0.7, label='predicted (+ offset)')
# shows measured values plus jitter
jitter = np.random.normal(scale=0.01, size=len(df))
ax.plot(df['AL2O3'], df['is_ore'] + jitter - offset, '.', alpha=0.1, label='measured (+ jitter - offset)')
# shows logistic function fitted from regressor
ax.plot(al_compositions, model.predict(results.params, preprocess(al_compositions)), '--', label='fitted logistic function')
# Generate the logistic curve showing the location of
ax.set_ylim(-0.1, 1.1)
ax.legend()
ax.set_title('Logistic regression with statsmodels')
f.tight_layout()
# -
| notebooks/logistic_regression_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
# +
import pandas as pd
# %matplotlib notebook
# %matplotlib notebook
import matplotlib.pyplot as plt
import seaborn as sns
# -
def percent_ethinicity(inputcsv, column1, column2, column3=None, column4=None, column5=None,column6=None):
df = pd.read_csv(inputcsv)
df[column2] = df[column2].astype(int)
df[column3] = df[column3].astype(int)
df[column5] = df[column5].astype(int)
df[column6] = df[column6].astype(int)
df_test = df.groupby([column1])[column4].value_counts(normalize=True).mul(100).unstack(column4).fillna(0)
#value_count_df_col =
df1 = df[[column1, column2, column3, column5, column6]]
groupped_data = df1.groupby([column1], as_index=False).agg({column2:'mean', column3:'mean', column5:'mean', column6:'mean'})
groupped_data[column2] = 100 * groupped_data[column2]
groupped_data[column3] = 100 * groupped_data[column3]
groupped_data[column5] = 100 * groupped_data[column5]
groupped_data[column6] = 100 * groupped_data[column6]
return df, groupped_data, df_test
racecol = "subject_race"
column2 = "search_person"
column3 = "search_vehicle"
datecolumn = "date"
column4 = "raw_vehicle_search_search_based_on"
column5 = "search_conducted"
column6 = "frisk_performed"
inputcsv = os.path.join("tx_austin_2020_04_01.csv")
orig_df, grouped_data, df_test = percent_ethinicity(inputcsv, racecol, column2, column3, column4, column5, column6)
grouped_data
df_test
orig_df[1:20]
# +
def plot_perc(dataframe, col1):
dataframe = dataframe.set_index(col1)
dataframe.plot(kind = "bar",sort_columns=True)
plt.title("search pecentage across race")
plt.xticks(fontsize=10)
plt.tight_layout()
plt.savefig(inputcsv[:-4] + ".png")
# -
plot_perc
plot_perc(grouped_data, racecol)
orig_df["raw_vehicle_search_search_based_on"].value_counts()
#df_test = df_test.set_index(racecol)
df_test.plot(kind = "bar",sort_columns=True)
plt.title("vehicle search based on")
plt.xticks(fontsize=10)
plt.tight_layout()
plt.savefig(inputcsv[:-4]+"_violation" + ".png")
orig_df.dropna(axis=0,how="any", subset = ["raw_vehicle_search_search_based_on"], inplace =True)
| usteam2/src/search_percentage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Batch Normalization – Solutions
# Batch normalization is most useful when building deep neural networks. To demonstrate this, we'll create a convolutional neural network with 20 convolutional layers, followed by a fully connected layer. We'll use it to classify handwritten digits in the MNIST dataset, which should be familiar to you by now.
#
# This is **not** a good network for classfying MNIST digits. You could create a _much_ simpler network and get _better_ results. However, to give you hands-on experience with batch normalization, we had to make an example that was:
# 1. Complicated enough that training would benefit from batch normalization.
# 2. Simple enough that it would train quickly, since this is meant to be a short exercise just to give you some practice adding batch normalization.
# 3. Simple enough that the architecture would be easy to understand without additional resources.
# This notebook includes two versions of the network that you can edit. The first uses higher level functions from the `tf.layers` package. The second is the same network, but uses only lower level functions in the `tf.nn` package.
#
# 1. [Batch Normalization with `tf.layers.batch_normalization`](#example_1)
# 2. [Batch Normalization with `tf.nn.batch_normalization`](#example_2)
# The following cell loads TensorFlow, downloads the MNIST dataset if necessary, and loads it into an object named `mnist`. You'll need to run this cell before running anything else in the notebook.
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
# # Batch Normalization using `tf.layers.batch_normalization`<a id="example_1"></a>
#
# This version of the network uses `tf.layers` for almost everything, and expects you to implement batch normalization using [`tf.layers.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization)
# We'll use the following function to create fully connected layers in our network. We'll create them with the specified number of neurons and a ReLU activation function.
#
# This version of the function does not include batch normalization.
"""
DO NOT MODIFY THIS CELL
"""
def fully_connected(prev_layer, num_units):
"""
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
"""
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
# We'll use the following function to create convolutional layers in our network. They are very basic: we're always using a 3x3 kernel, ReLU activation functions, strides of 1x1 on layers with odd depths, and strides of 2x2 on layers with even depths. We aren't bothering with pooling layers at all in this network.
#
# This version of the function does not include batch normalization.
"""
DO NOT MODIFY THIS CELL
"""
def conv_layer(prev_layer, layer_depth):
"""
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
"""
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
# **Run the following cell**, along with the earlier cells (to load the dataset and define the necessary functions).
#
# This cell builds the network **without** batch normalization, then trains it on the MNIST dataset. It displays loss and accuracy data periodically while training.
# +
"""
DO NOT MODIFY THIS CELL
"""
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs,
labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually, just to make sure batch normalization really worked
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
# -
# With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
#
# Using batch normalization, you'll be able to train this same network to over 90% in that same number of batches.
#
# # Add batch normalization
#
# To add batch normalization to the layers created by `fully_connected`, we did the following:
# 1. Added the `is_training` parameter to the function signature so we can pass that information to the batch normalization layer.
# 2. Removed the bias and activation function from the `dense` layer.
# 3. Used `tf.layers.batch_normalization` to normalize the layer's output. Notice we pass `is_training` to this layer to ensure the network updates its population statistics appropriately.
# 4. Passed the normalized values into a ReLU activation function.
def fully_connected(prev_layer, num_units, is_training):
"""
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:param is_training: bool or Tensor
Indicates whether or not the network is currently training, which tells the batch normalization
layer whether or not it should update or use its population statistics.
:returns Tensor
A new fully connected layer
"""
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
# To add batch normalization to the layers created by `conv_layer`, we did the following:
# 1. Added the `is_training` parameter to the function signature so we can pass that information to the batch normalization layer.
# 2. Removed the bias and activation function from the `conv2d` layer.
# 3. Used `tf.layers.batch_normalization` to normalize the convolutional layer's output. Notice we pass `is_training` to this layer to ensure the network updates its population statistics appropriately.
# 4. Passed the normalized values into a ReLU activation function.
#
# If you compare this function to `fully_connected`, you'll see that – when using `tf.layers` – there really isn't any difference between normalizing a fully connected layer and a convolutional layer. However, if you look at the second example in this notebook, where we restrict ourselves to the `tf.nn` package, you'll see a small difference.
def conv_layer(prev_layer, layer_depth, is_training):
"""
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:param is_training: bool or Tensor
Indicates whether or not the network is currently training, which tells the batch normalization
layer whether or not it should update or use its population statistics.
:returns Tensor
A new convolutional layer
"""
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
# Batch normalization is still a new enough idea that researchers are still discovering how best to use it. In general, people seem to agree to remove the layer's bias (because the batch normalization already has terms for scaling and shifting) and add batch normalization _before_ the layer's non-linear activation function. However, for some networks it will work well in other ways, too.
#
# Just to demonstrate this point, the following three versions of `conv_layer` show other ways to implement batch normalization. If you try running with any of these versions of the function, they should all still work fine (although some versions may still work better than others).
#
# **Alternate solution that uses bias in the convolutional layer but still adds batch normalization before the ReLU activation function.**
def conv_layer(prev_layer, layer_num, is_training):
strides = 2 if layer_num % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_num*4, 3, strides, 'same', use_bias=True, activation=None)
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
# **Alternate solution that uses a bias and ReLU activation function _before_ batch normalization.**
def conv_layer(prev_layer, layer_num, is_training):
strides = 2 if layer_num % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_num*4, 3, strides, 'same', use_bias=True, activation=tf.nn.relu)
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
return conv_layer
# **Alternate solution that uses a ReLU activation function _before_ normalization, but no bias.**
def conv_layer(prev_layer, layer_num, is_training):
strides = 2 if layer_num % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_num*4, 3, strides, 'same', use_bias=False, activation=tf.nn.relu)
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
return conv_layer
# To modify `train`, we did the following:
# 1. Added `is_training`, a placeholder to store a boolean value indicating whether or not the network is training.
# 2. Passed `is_training` to the `conv_layer` and `fully_connected` functions.
# 3. Each time we call `run` on the session, we added to `feed_dict` the appropriate value for `is_training`.
# 4. Moved the creation of `train_opt` inside a `with tf.control_dependencies...` statement. This is necessary to get the normalization layers created with `tf.layers.batch_normalization` to update their population statistics, which we need when performing inference.
# +
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Add placeholder to indicate whether or not we're training the model
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
# Tell TensorFlow to update the population statistics while training
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually, just to make sure batch normalization really worked
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
# -
# With batch normalization, we now get excellent performance. In fact, validation accuracy is almost 94% after only 500 batches. Notice also the last line of the output: `Accuracy on 100 samples`. If this value is low while everything else looks good, that means you did not implement batch normalization correctly. Specifically, it means you either did not calculate the population mean and variance while training, or you are not using those values during inference.
#
# # Batch Normalization using `tf.nn.batch_normalization`<a id="example_2"></a>
#
# Most of the time you will be able to use higher level functions exclusively, but sometimes you may want to work at a lower level. For example, if you ever want to implement a new feature – something new enough that TensorFlow does not already include a high-level implementation of it, like batch normalization in an LSTM – then you may need to know these sorts of things.
#
# This version of the network uses `tf.nn` for almost everything, and expects you to implement batch normalization using [`tf.nn.batch_normalization`](https://www.tensorflow.org/api_docs/python/tf/nn/batch_normalization).
#
# This implementation of `fully_connected` is much more involved than the one that uses `tf.layers`. However, if you went through the `Batch_Normalization_Lesson` notebook, things should look pretty familiar. To add batch normalization, we did the following:
# 1. Added the `is_training` parameter to the function signature so we can pass that information to the batch normalization layer.
# 2. Removed the bias and activation function from the `dense` layer.
# 3. Added `gamma`, `beta`, `pop_mean`, and `pop_variance` variables.
# 4. Used `tf.cond` to make handle training and inference differently.
# 5. When training, we use `tf.nn.moments` to calculate the batch mean and variance. Then we update the population statistics and use `tf.nn.batch_normalization` to normalize the layer's output using the batch statistics. Notice the `with tf.control_dependencies...` statement - this is required to force TensorFlow to run the operations that update the population statistics.
# 6. During inference (i.e. when not training), we use `tf.nn.batch_normalization` to normalize the layer's output using the population statistics we calculated during training.
# 7. Passed the normalized values into a ReLU activation function.
#
# If any of thise code is unclear, it is almost identical to what we showed in the `fully_connected` function in the `Batch_Normalization_Lesson` notebook. Please see that for extensive comments.
def fully_connected(prev_layer, num_units, is_training):
"""
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:param is_training: bool or Tensor
Indicates whether or not the network is currently training, which tells the batch normalization
layer whether or not it should update or use its population statistics.
:returns Tensor
A new fully connected layer
"""
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
gamma = tf.Variable(tf.ones([num_units]))
beta = tf.Variable(tf.zeros([num_units]))
pop_mean = tf.Variable(tf.zeros([num_units]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_units]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(layer, [0])
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalized_output)
# The changes we made to `conv_layer` are _almost_ exactly the same as the ones we made to `fully_connected`. However, there is an important difference. Convolutional layers have multiple feature maps, and each feature map uses shared weights. So we need to make sure we calculate our batch and population statistics **per feature map** instead of per node in the layer.
#
# To accomplish this, we do **the same things** that we did in `fully_connected`, with two exceptions:
# 1. The sizes of `gamma`, `beta`, `pop_mean` and `pop_variance` are set to the number of feature maps (output channels) instead of the number of output nodes.
# 2. We change the parameters we pass to `tf.nn.moments` to make sure it calculates the mean and variance for the correct dimensions.
def conv_layer(prev_layer, layer_depth, is_training):
"""
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:param is_training: bool or Tensor
Indicates whether or not the network is currently training, which tells the batch normalization
layer whether or not it should update or use its population statistics.
:returns Tensor
A new convolutional layer
"""
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
gamma = tf.Variable(tf.ones([out_channels]))
beta = tf.Variable(tf.zeros([out_channels]))
pop_mean = tf.Variable(tf.zeros([out_channels]), trainable=False)
pop_variance = tf.Variable(tf.ones([out_channels]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
# Important to use the correct dimensions here to ensure the mean and variance are calculated
# per feature map instead of for the entire layer
batch_mean, batch_variance = tf.nn.moments(layer, [0,1,2], keep_dims=False)
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalized_output)
# To modify `train`, we did the following:
# 1. Added `is_training`, a placeholder to store a boolean value indicating whether or not the network is training.
# 2. Each time we call `run` on the session, we added to `feed_dict` the appropriate value for `is_training`.
# 3. We did **not** need to add the `with tf.control_dependencies...` statement that we added in the network that used `tf.layers.batch_normalization` because we handled updating the population statistics ourselves in `conv_layer` and `fully_connected`.
# +
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Add placeholder to indicate whether or not we're training the model
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually, just to make sure batch normalization really worked
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
# -
# Once again, the model with batch normalization quickly reaches a high accuracy. But in our run, notice that it doesn't seem to learn anything for the first 250 batches, then the accuracy starts to climb. That just goes to show - even with batch normalization, it's important to give your network a bit of time to learn before you decide it isn't working.
| batch-norm/Batch_Normalization_Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div class="alert alert-block alert-success">
# <b><center>Elasticsearch 7.7</center></b>
# <b><center>데이터 모델링</center></b>
# </div>
from elasticsearch import Elasticsearch, helpers
# # 연결 및 클러스터 상태확인
es = Elasticsearch(hosts="elastic.rsnet", port=80)
es.cluster.health()
# # indices 생성/삭제
INDEX_NAME = "movie_search"
index_movie = {
"settings": {
"number_of_shards": 5,
"number_of_replicas": 1
},
"mappings": {
"properties": {
"movieCd": {"type": "keyword"},
"movieNm": {"type": "text", "analyzer": "standard"},
"movieNmEn": {"type": "text", "analyzer": "standard"},
"prdtYear": {"type": "integer"},
"openDt": {"type": "integer"},
"typeNm": {"type": "keyword"},
"prdtStatNm": {"type": "keyword"},
"nationAlt": {"type": "keyword"},
"genreAlt": {"type": "keyword"},
"repNationNm": {"type": "keyword"},
"repGenreNm": {"type": "keyword"},
"companies": {
"properties": {
"companyCd": {"type": "keyword"},
"companyNm": {"type": "keyword"}
}
},
"directors": {
"properties": {
"peopleNm": {"type": "keyword"}
}
}
}
}
}
if es.indices.exists(INDEX_NAME):
es.indices.delete(INDEX_NAME)
es.indices.create(INDEX_NAME, index_movie, ignore=400)
# ## 매핑확인
es.indices.get_mapping(INDEX_NAME)
# # 문서 생성
movie_data_1 = {
"movieCd": "20173732",
"movieNm": "살아남은 아이",
"movieNmEn": "Last Child",
"prdtYear": "2017",
"openDt": "",
"typeNm": "장편",
"prdtStatNm": "기타",
"nationAlt": "한국",
"genreAlt": "드라마,가족",
"repNationNm": "한국",
"repGenreNm": "드라마"
}
es.index(INDEX_NAME, movie_data_1, id=1)
# # 문서 조회
es.search({"query" : {
"match_all": {}
}}, INDEX_NAME)
es.search({"query" : {
"match": {
"typeNm": "단편"
}
}}, INDEX_NAME)
es.search({
"size":0,
"aggs": {
"indices": {
"terms": {
"field": "_index",
"size": 10
}
}
}
}, INDEX_NAME)
es.reindex()
| lecture_source/big_data/elk02_modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import *
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import boto3
from sagemaker import get_execution_role
import sagemaker
from sagemaker.predictor import csv_serializer, json_deserializer
# -
# # Load data
df_teste = pd.read_csv("s3://workshop-pucminas-wcdbda/datasets/validation/data.csv", header=None)
df_teste.shape
df_teste[0].value_counts()
X_test = df_teste.iloc[:,1:]
y_test_true = df_teste.iloc[:,0].values
# # Pipeline Testes
def get_predictions(X_test, batch=100):
predictions = []
for test in np.array_split(X_test,batch):
#break
result = predictor.predict(test.values)
result = result.decode("utf-8")
result = result.split(',')
#print (test.shape)
predictions += [float(r) for r in result]
return predictions
def metricas_classificacao(y_true, y_pred, classes, normalize=False, title="Matriz de confusão"):
resultados = {'matriz confusão': confusion_matrix(y_true, y_pred),
'acurácia': round(accuracy_score(y_true, y_pred),4),
'f1 score': round(f1_score(y_true, y_pred),4),
'precision': round(precision_score(y_true, y_pred),4),
'recall': round(recall_score(y_true, y_pred),4),
'roc auc': round(roc_auc_score(y_true, y_pred),4)
}
return resultados
def plot_confusion_matrix(y_test_true, predictions):
sns.heatmap(confusion_matrix(y_test_true, predictions),annot=True,cbar=False, cmap='Blues', fmt='g')
plt.ylabel("True Values")
plt.xlabel("Predicted Values")
plt.title("CONFUSSION MATRIX VISUALIZATION")
plt.figure(figsize=(8,8))
plt.show()
endpoints_names = ["xgboost-data-default", "xgboost-data-under", "xgboost-data-smote"]
results = {}
for endpoint in endpoints_names:
print("ENDPOINT ", endpoint)
print("Loading endpoint...")
predictor = sagemaker.predictor.RealTimePredictor(endpoint=endpoint)
predictor.content_type = 'text/csv'
predictor.serializer = csv_serializer
predictor.deserializer = None
print("Predictions...")
predictions = get_predictions(X_test)
print("Metrics...")
resultados_metricas = metricas_classificacao(y_test_true, predictions, [0,1])
results[endpoint] = {"predictions": predictions, "metricas":resultados_metricas}
plot_confusion_matrix(y_test_true, predictions)
for model in results.keys():
print(model)
for m in results[model]["metricas"].keys():
print(m, "=",results[model]["metricas"][m])
print("-------------")
| src/Loading and Testings EndPoints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import window, col
from pyspark.sql.types import StructType, StructField, LongType, StringType, DoubleType
from time import sleep
# +
spark_conf = SparkConf()
spark_conf.setMaster("spark://master:7077")
spark_conf.setAppName("Lab7_5")
spark_conf.set("spark.driver.memory", "2g")
spark_conf.set("spark.executor.cores", "1")
spark_conf.set("spark.driver.cores", "1")
# Create the spark session, which is the entry point to Spark SQL engine.
spark = SparkSession.builder.config(conf=spark_conf).getOrCreate()
# -
data_schema = StructType([
StructField("Arrival_Time", LongType(), True),
StructField("Creation_Time", LongType(), True),
StructField("Device", StringType(), True),
StructField("Index", LongType(), True),
StructField("Model", StringType(), True),
StructField("User", StringType(), True),
StructField("gt", StringType(), True),
StructField("x", DoubleType(), True),
StructField("y", DoubleType(), True),
StructField("z", DoubleType(), True),
])
# Read from a source
sdf = spark.readStream.schema(data_schema).option("maxFilesPerTrigger", 1).json("../data/activity")
# +
# Create the event time column
with_event_time_df = sdf.selectExpr("*", "cast(cast(Creation_Time as double)/1000000000 as timestamp) as event_time")
with_event_time_df.printSchema()
with_event_time_df.groupBy(window(col("event_time"), "10 minutes"), "User", "gt").count().writeStream.queryName("user_activity_events_per_window").format("memory").outputMode("complete").start()
# -
for x in range(10):
spark.sql("SELECT * FROM user_activity_events_per_window").show()
sleep(10)
# Stop the spark context
spark.stop()
| labs/lab-7/notebooks/lab7_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# Gaussian discriminant analysis con diversa matrice di covarianza per le distribuzioni delle due classi e conseguente separatore lineare.
# +
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# -
import pandas as pd
import numpy as np
import scipy.stats as st
# +
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (16, 8)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
colors = ['#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c',
'#137e6d', '#be0119', '#3b638c', '#af6f09', '#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b',
'#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09']
cmap = mcolors.LinearSegmentedColormap.from_list("", ["#82cafc", "#069af3", "#0485d1", colors[0], colors[8]])
# -
# Leggiamo i dati da un file csv in un dataframe pandas. I dati hanno 3 valori: i primi due corrispondono alle features e sono assegnati alle colonne x1 e x2 del dataframe; il terzo è il valore target, assegnato alla colonna t. Vengono poi creati una matrice X delle features e un vettore target t
# +
# legge i dati in dataframe pandas
data = pd.read_csv("../../data/ex2data1.txt", header=0, delimiter=',', names=['x1','x2','t'])
# calcola dimensione dei dati
n = len(data)
n0 = len(data[data.t==0])
# calcola dimensionalità delle features
nfeatures = len(data.columns)-1
X = np.array(data[['x1','x2']])
t = np.array(data['t'])
# -
# Visualizza il dataset.
fig = plt.figure(figsize=(16,8))
ax = fig.gca()
ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, color=colors[0], alpha=.7)
ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40,c=colors[1], alpha=.7)
plt.xlabel('$x_1$', fontsize=12)
plt.ylabel('$x_2$', fontsize=12)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.title('Dataset', fontsize=12)
plt.show()
# Calcola le medie delle due distribuzioni.
mu0=np.array(np.mean(data[data.t==0][['x1','x2']]))
mu1=np.array(np.mean(data[data.t==1][['x1','x2']]))
# Consideriamo matrici di covarianza diverse per le distribuzioni delle due classi. Le stimiamo a partire dagli elementi delle due classi.
# +
# considera gli elementi delle due classi centrati intorno alle rispettive medie
X0=np.array(data[data.t==0][['x1','x2']])-mu0
X1=np.array(data[data.t==1][['x1','x2']])-mu1
# calcola le matrici di covarianza per le distribuzioni di C0 e C1
sigma0=np.cov(X0.T)
sigma1=np.cov(X1.T)
# -
# Stimiamo la probabilità a priori della classe C0 come rapporto tra il numero di elementi del dataset appartenenti alla classe e la dimensione totale del dataset.
prior=float(n0)/n
# Definiamo la griglia 100x100 da utilizzare per la visualizzazione delle varie distribuzioni.
# insieme delle ascisse dei punti
u = np.linspace(min(X[:,0]), max(X[:,0]), 100)
# insieme delle ordinate dei punti
v = np.linspace(min(X[:,1]), max(X[:,1]), 100)
# deriva i punti della griglia: il punto in posizione i,j nella griglia ha ascissa U(i,j) e ordinata V(i,j)
U, V = np.meshgrid(u, v)
# Calcola sui punti della griglia le probabilità delle classi $p(x|C_0), p(x|C_1)$ e le probabilità a posteriori delle classi $p(C_0|x), p(C_1|x)$
# +
# funzioni che calcolano le probabilità secondo le distribuzioni delle due classi
vf0=np.vectorize(lambda x,y:st.multivariate_normal.pdf([x,y],mu0,sigma0))
vf1=np.vectorize(lambda x,y:st.multivariate_normal.pdf([x,y],mu1,sigma1))
# calcola le probabilità delle due distribuzioni sulla griglia
p0=vf0(U,V)
p1=vf1(U,V)
# -
# Visualizzazione della distribuzione di $p(x|C_0)$.
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
# inserisce una rappresentazione della probabilità della classe C0 sotto forma di heatmap
imshow_handle = plt.imshow(p0, origin='lower', extent=(min(X[:,0]), max(X[:,0]), min(X[:,1]), max(X[:,1])), alpha=.7)
plt.contour(U, V, p0, linewidths=[.7], colors=[colors[6]])
# rappresenta i punti del dataset
ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, c=colors[0], alpha=.7)
ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40,c=colors[1], alpha=.7)
# rappresenta la media della distribuzione
ax.scatter(mu0[0], mu0[1], s=150,c=colors[3], marker='*', alpha=.6)
# inserisce titoli, etc.
plt.xlabel('$x_1$', fontsize=12)
plt.ylabel('$x_2$', fontsize=12)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.xlim(u.min(), u.max())
plt.ylim(v.min(), v.max())
plt.title('Distribuzione di $p(x|C_0)$', fontsize=12)
plt.show()
# Visualizzazione della distribuzione di $p(x|C_1)$.
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
# inserisce una rappresentazione della probabilità della classe C0 sotto forma di heatmap
imshow_handle = plt.imshow(p1, origin='lower', extent=(min(X[:,0]), max(X[:,0]), min(X[:,1]), max(X[:,1])), alpha=.7)
plt.contour(U, V, p1, linewidths=[.7], colors=[colors[6]])
# rappresenta i punti del dataset
ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, c=colors[0], alpha=.7)
ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40,c=colors[1], alpha=.7)
# rappresenta la media della distribuzione
ax.scatter(mu1[0], mu1[1], s=150,c=colors[3], marker='*', alpha=.6)
# inserisce titoli, etc.
plt.xlabel('$x_1$', fontsize=12)
plt.ylabel('$x_2$', fontsize=12)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.xlim(u.min(), u.max())
plt.ylim(v.min(), v.max())
plt.title('Distribuzione di $p(x|C_1)$', fontsize=12)
plt.show()
# Calcoliamo ora la distribuzione a posteriori delle classi $C_0$ e $C_1$ per tutti i punti della griglia, applicando la regola di Bayes
# +
# calcola il rapporto tra le likelihood delle classi per tutti i punti della griglia
z=p0/p1
# calcola il rapporto tra le probabilità a posteriori delle classi per tutti i punti della griglia
zbayes=p0*prior/(p1*(1-prior))
# calcola evidenza del dataset
ev = p0*prior+p1*(1-prior)
# calcola le probabilità a posteriori di C0 e di C1
pp0 = p0*prior/ev
pp1 = p1*(1-prior)/ev
# -
# Visualizzazione di $p(C_0|x)$
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
imshow_handle = plt.imshow(pp0, origin='lower', extent=(min(X[:,0]), max(X[:,0]), min(X[:,1]), max(X[:,1])), alpha=.5)
ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, c=colors[1], alpha=.7)
ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40, c=colors[1], alpha=.7)
plt.contour(U, V, zbayes, [1.0], colors=[colors[7]],linewidths=[1])
plt.contour(U, V, z, [1.0], colors=[colors[7]],linewidths=[1], linestyles='dashed')
plt.xlabel('$x_1$', fontsize=12)
plt.ylabel('$x_2$', fontsize=12)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.xlim(u.min(), u.max())
plt.ylim(v.min(), v.max())
plt.title("Distribuzione di $p(C_0|x)$", fontsize=12)
plt.show()
# Visualizzazione di $p(C_1|x)$
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
imshow_handle = plt.imshow(pp1, origin='lower', extent=(min(X[:,0]), max(X[:,0]), min(X[:,1]), max(X[:,1])), alpha=.5)
ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, c=colors[0], alpha=.7)
ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40, c=colors[1], alpha=.7)
plt.contour(U, V, zbayes, [1.0], colors=[colors[7]],linewidths=[1])
plt.contour(U, V, z, [1.0], colors=[colors[7]],linewidths=[1], linestyles='dashed')
plt.xlabel('$x_1$', fontsize=12)
plt.ylabel('$x_2$', fontsize=12)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.xlim(u.min(), u.max())
plt.ylim(v.min(), v.max())
plt.title("Distribuzione di $p(C_1|x)$", fontsize=12)
plt.show()
# Effettua predizioni sugli elementi del dataset.
# probabilità degli elementi rispetto alla distribuzione di C0
p0_d = vf0(X[:,0],X[:,1])
# probabilità degli elementi rispetto alla distribuzione di C1
p1_d = vf1(X[:,0],X[:,1])
# rapporto tra le probabilità di appartenenza a C0 e C1
z_d = p0_d*prior/(p1_d*(1-prior))
# predizioni del modello
pred = np.where(z_d<1, 1, 0)
# numero di elementi mal classificati
nmc = abs(pred-t).sum()
# accuracy
acc = 1-float(nmc)/n
print("Accuracy: {0:5.4f}".format(acc))
| codici/.ipynb_checkpoints/gdaquad-checkpoint.ipynb |
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Octave
% language: octave
% name: octave
% ---
% The following does the install as required to use `findpeaks`. It may produce the following messages initially:
% ```
% warning: creating installation directory /home/terry/octave
% warning: called from
% install at line 30 column 5
% pkg at line 394 column 9
% ```
% It produces the following messages:
% ```
% For information about changes from previous versions of the struct package, run 'news control'.
% For information about changes from previous versions of the io package, run 'news signal'.
% ```
%
% Note that this just needs to be done once for a given Jupyter lab invocation. Subsequent `Octave` kernels only need to invoke the following:
% ```
% pkg load signal
% ```
pkg install -forge control
pkg install -forge signal
| legacy/findpeaks_install.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Data Scraping in Python</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>The Data Story Cycle</center></h1>
#
# <img src='data_cycle2.png' height="100%" width="100%">
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Software for Navigating the Data Cycle in Python</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Data Extration <u>(BeautifulSoup, Selenium and Scrapy)</u></li>
# <li>Data Transformation <u>(Numpy and Pandas)</u></li>
# <li>Data Loading <u>(SQL, Flask Restful API, and Pandas)</u></li>
# <li>Data Exploration <u>(Pandas, Numpy and Matplotlib)</u></li>
# <li>Data Mining <u>(Pandas, NumPy, Scikit-learn, NLTK and Tensor Flow)</u></li>
# <li>Data Visualization & Interaction <u>(Matplotlib, D3 and MplD3)</u></li>
# </ul>
# </div>
# -
# <h1><center>Software (no programming needed) for Navigating the Data Cycle</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Data Extration <u>(ParseHub, Knime and Excel/Google Sheet)</u></li>
# <li>Data Transformation <u>(Excel/Google Sheet, Knime and OpenRefine)</u></li>
# <li>Data Loading <u>(Excel/Google Sheet, Knime and Airtable)</u></li>
# <li>Data Exploration <u>(Excel/Google Sheet, Tableau, Knime and Google Data Studio)</u></li>
# <li>Data Mining <u>(Excel/Google Sheet, Knime)</u></li>
# <li>Data Visualization & Interaction <u>(Excel/Google Sheet, Tableau, Knime and Google Data Studio)</u></li>
# </ul>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Why Python?</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Simple syntax designed as a teaching language for learning Computer Science</li>
# <li>Great language for learning about object oriented programming, a mordern programming approach</u></li>
# <li>Vast support of programming libraries, especially for AI and data science works</li>
# <li>Commercial adoptions have increased rapidly, providing plenty of job opportunities</li>
# <li>Used by YouTube, Dropbox, Google, Instagram, Spotify, Reddit, Yahoo Maps, Pinterest, Washington Post, NASA, etc.</li>
# <li>For web programming, like PHP and C#, Python is usually used for back-end processing.</li>
# </ul>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Revisit Front-end Processing</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>For web development, most projects are divided by front-end and back-end processing.</li>
# <li>Front-end is used for handling user interface</li>
# <li>Back-end is used for handling data model computation and database processing</li>
# <li>HTML,CSS, and JavaScript make up the <b>Big Three</b> of front-end web development</li>
# <li>Web scraping depends on a deep understanding of front-end web publishing in HTML/CSS/JS for reversing the publishing flow to extract data from the published web pages</li>
# </ul>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Big Three in Web Publishing</center></h1>
# <img width='100%' height='100%'src='git_html_css.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='html_as_a_noun.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='dom.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='html_code_view.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>The Basic Grammer of HTML</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Starts with a start tag</li>
# <li>End with an end tag</li>
# <li>Content of element is everything between the start and end tags</li>
# <li>Some elements have empty content and no end tag</li>
# <li>Most elements have attributes</li>
# <li>HTML adopts a tree structure called DOM</li>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='css_as_adjective.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='70%' height='70%' src='box_model.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>CSS Inspection with Chrome</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Use right mouse button to trigger inspector</li>
# <li>Select an element in the page to inspect</li>
# <li>Traverse the DOM tree to inspect the hierarchy of elments</li>
# <li>Observe the styles under the style tab at the lower right corner of the browser</li>
# <li>May change the properties to observe impact on the layout</li>
# <li>Changes will not be stored after the screen is refreshed</li>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='custom_selector.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='inline_css.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='internal_css.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='external_css_1.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='external_css_2.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>The Basic Grammer of CSS</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Styles define how to display HTML elements</li>
# <li>Each style description is made up of Selector and Declaration</li>
# <li>Selector defines which HTML element should be used for display and the declaration defines how
# each declaration contains properties and values</li>
# <li>There are base and custom selectors (ID and CLASS are customer selectors)
# <li>Style definition can be placed inline, in the head section or in an external file (e.g. style.css) </li>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='js_as_verb.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='internal_js.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <img width='100%' height='100%' src='external_js.png'>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Python Development with Anaconda and Jupyter Notebook</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Why Anaconda?</li>
# <li>Most of the popular Python libraries such as BeautifulSoup, NumPy, Pandas, Mathplotlib are preinstalled</li>
# <li>Other additional software tools such as R Studio, JupyterLab, Jupyter Notebook are also preinstalled</li>
# <li>Cross-platform support on Mac, Windows, and Linux</li>
# <li>Jupyter Notebook is a great tool for learning Python</li>
# <li>Jupyter Notebook supports HTML/CSS and LaTex markup syntax</li>
# <li>Jupyter Notebook supports slideshow presentation</li>
# <li>Github support for Jupyter notebook document</li>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>HTML/CSS Demonstration</center></h1>
# <div style="margin-left:100px;">
# To show the following effect, make sure "markdown" is chosen for rendering in Jupyter Notebook and then type:<br><br>
# <div style="margin-left:100px;">
# <div style="margin-left:100px;"><div style="color:red;">Word in Red</div>, then you see:</div><br>
# <div style="margin-left:100px;"><div style="color:red;">Word in Red</div></div>
# <div style="margin-left:100px;"></div>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Latex Demonstration</center></h1>
# <div style="margin-left:100px;">
# To show the following formula, make sure "markdown" is chosen for rendering in and then type:<br><br>
# <div style="margin-left:100px;">\begin{equation*}</div>
# <div style="margin-left:100px;">s = \sqrt \frac{\sum_{k=1}^n\left(x-\overline{x}\right)^2}{n}</div>
# <div style="margin-left:100px;">\end{equation*}</div>
# </div>
#
# \begin{equation*}
# s = \sqrt \frac{\sum_{k=1}^n\left(x-\overline{x}\right)^2}{n}
# \end{equation*}
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Running Jupyter Notebook in Slideshow Mode</center></h1>
# <div style="margin-left:100px;">
# At the command prompt, type the following:<br><br><br>
# <div style="font-size:20px;"><center>jupyter nbconvert <your Jupyter Notebook file name>.ipynb --to slides --post serve</center></div>
# <br><br><br>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Basic Data Types in Python</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Integer</li>
# <li>Float</li>
# <li>String</li>
# <li>Boolean</li>
# </ul>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Integer</center></h1>
# <div style="margin-left:100px;">
# To show the result of your code, make sure "code" display option is chosen instead of the "markdown" option for rendering in Jupyter Notebook and then type:<br>
# <br>
# <ol>
# <li>Press the "+" button on the button toolbar to create a new In-box</li>
# <li>Enter 123</li>
# <li>Hit "Run" on the button toolbar to execute your code.</li>
# <li>Press the "+" button on the button toolbar to create a new In-box</li>
# <li>Enter type(123)</li>
# <li>Hit "Run" on the button toolbar to execute your code</li>
# <li>See what you've got</li>
# </ol>
# </div>
# -
type('123.45')
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Float</center></h1>
# <div style="margin-left:100px;">
# To show the result of your code, make sure "code" display option is chosen instead of the "markdown" option for rendering in Jupyter Notebook and then type:<br>
# <br>
# <ol>
# <li>Press the "+" button on the button toolbar to create a new In-box</li>
# <li>Enter 123.45</li>
# <li>Hit "Run" on the button toolbar to execute your code.</li>
# <li>Press the "+" button on the button toolbar to create a new In-box</li>
# <li>Enter type(123.45)</li>
# <li>Hit "Run" on the button toolbar to execute your code</li>
# <li>See what you've got</li>
# </ol>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>String</center></h1>
# <div style="margin-left:100px;">
# To show the result of your code, make sure "code" display option is chosen instead of the "markdown" option for rendering in Jupyter Notebook and then type:<br>
# <br>
# <ol>
# <li>Press the "+" button on the button toolbar to create a new In-box</li>
# <li>Enter '123.45'</li>
# <li>Hit "Run" on the button toolbar to execute your code.</li>
# <li>Press the "+" button on the button toolbar to create a new In-box</li>
# <li>Enter type('123.45')</li>
# <li>Hit "Run" on the button toolbar to execute your code</li>
# <li>See what you've got</li>
# </ol>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Assign Data Type to Variables</center></h1>
# <div style="margin-left:100px;">
# To show the result of your code, make sure "code" display option is chosen instead of the "markdown" option for rendering in Jupyter Notebook and then type:<br>
# <br>
# <ol>
# <li>Press the "+" button on the button toolbar to create a new In-box</li>
# <li>Enter 'x = 123'</li>
# <li>Enter 'y = 123.45</li>
# <li>Enter 'z = '123.45'</li>
# <li>Enter 'print(x,y,z)</li>
# <li>By the way, the command "print" is a built-in function in Python</li>
# <li>Enter 'print(type(x),type(y),type(z))'. </li>
# <li>Hit "Run" on the button toolbar to execute your code</li>
# <li>See what you've got</li>
# </ol>
# </div>
# -
x = 123
y = 123.45
z = '123.45'
print(x,y,z)
print(type(x),type(y),type(z))
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Boolean</center></h1>
# <div style="margin-left:100px;">
# To show the result of your code, make sure "code" display option is chosen instead of the "markdown" option for rendering in Jupyter Notebook and then type:<br>
# <br>
# <ol>
# <li>Press the "+" button on the button toolbar to create a new In-box</li>
# <li>Enter 'sign_up = True'</li>
# <li>Enter type(type(sign_up)</li>
# <li>Hit "Run" on the button toolbar to execute your code</li>
# <li>See what you've got</li>
# </ol>
# </div>
# -
sign_up = True
print(type(sign_up))
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>More on Expressions and Variables</center></h1>
# <div style="margin-left:100px;">
# To show the result of your code, make sure "code" display option is chosen instead of the "markdown" option for rendering in Jupyter Notebook and then type:<br>
# <br>
# <ol>
# <li>Expressions are made up of mathematical operations</li>
# <ul>
# <li>Addition +</li>
# <li>Subtraction -</li>
# <li>Multiplication *</li>
# <li>Division /</li>
# <li>Rounded division //</li>
# <li>Exponent **</li>
# <li>Modulus % (division returns remainder)</li>
# </ul>
# <li>Variables (used for storing values)</li>
# <ul>
# <li>x = 123</li>
# <li>y = 123.45</li>
# <li>z = '123.45'</li>
# <li>What is the result of adding x + y
# </ul>
# </ol>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
#
# + slideshow={"slide_type": "slide"}
x = 123
y = 123.45
z = 'The result of ' + str(x) + " + " + str(y) + " = " + str(x+y)
print(type(str(y)))
print(z)
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>More on String</center></h1>
# <div style="margin-left:100px;">
# To show the result of your code, make sure "code" display option is chosen instead of the "markdown" option for rendering in Jupyter Notebook and then type:<br>
# <br>
# <ol>
# <li>The concept of an "index": position of each character in the string</li>
# <li>The first position is always "0"</li>
# <li>Notation of the index is expressed as:string_var[beginning position:ending position]</li>
# <li>The ending position is not included
# <li>The len(string_var) function will return the length of the string</li>
# <li>Let's say z = "I am from CUHK."</li>
# <li>What will the command print(z[1:4]) return?</li>
# <li>How can you print out the entire string using the index</li>
# <li>Negative index starts from the end of the string e.g. name[-1:] </li>
# </ol>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# -
z = "I am from CUHK."
print(z[0:4])
print(z[0:15])
print (z[-3:])
# + slideshow={"slide_type": "slide"}
z = "I am from CUHK."
print(len(z))
print(z[0:14])
print(z[-3:])
print(z[10:14])
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Useful String Functions</center></h1>
# <div style="margin-left:100px;">
# To show the result of your code, make sure "code" display option is chosen instead of the "markdown" option for rendering in Jupyter Notebook and then type:<br>
# <br>
# <ol>
# <li>name.upper()</li>
# <li>name.replace(destination,source)</li>
# <li>name.find(destination)</li>
# <li>name.split()</li>
# <li>name.count(target)</li>
# </ol>
# </div>
# -
famous_person = "<NAME>"
print(famous_person)
famous_person2 = famous_person.replace("Charles","Harry")
print(famous_person, famous_person2)
print(famous_person.split())
print(famous_person.count("e"))
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + slideshow={"slide_type": "slide"}
name = "<NAME>"
print(name)
name = name.replace("Charles","William")
name = name.upper()
print(name)
print(name.find("Charles"))
print(name.find("WILLIAM"))
# -
names = "<NAME> and <NAME>"
first_prince_position = names.find("Prince")
print("1st Prince Position:"+str(first_prince_position))
second_prince_position = names.find("Prince",first_prince_position+1)
print("2nd Prince Position:"+str(second_prince_position))
son_of_charles = names[second_prince_position:]
print(son_of_charles)
# str(x) function will convert integer and float into a string
x = 3.14
y = str(x)
print(y)
type(y)
# float(x) function will convert a string with decimal point into a float
x = 3.14
y = str(x)
z1 = float(y)
print(z1)
z2 = int(y[0:y.find(".")])
print(z2)
print(type(z1))
print(type(z2))
# Create a rounding function
x = 273.87127
pos = str(x).find(".")+1
val = str(x)[pos:pos+1]
if (int(val) >= 5):
print(str(x+1)[0:pos-1])
else:
print(str(x)[0:pos-1])
# Another way to implement a rounding function
x = 273.47127
pos = str(str_x).find('.')+1
val = str(x)[pos:pos+1]
if (val in "1234"):
x = x
else:
x = x + 1
print(str(x)[0:pos-1])
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Basic Data Structures in Python</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>List and Tuple</li>
# <li>Dictionary and Set</li>
# </ul>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Tuple and List</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Tuples are arrays enclosed with round brackets for storing multiple variables</li>
# <li>Variables with data types can be stored into a Tuple</li>
# <li>A Tuple operates like a string and therefore can be indexed from the beginning (positive) and the end (negative)</li>
# <li>Tuples are immutable</li>
# <li>Inorder to manipulate a tuple, a new one has to be created</li>
# <li>Tuples can be nested e.g. Tuple2 = (1,2,(3,4),5)</li>
# <li>Due to its rigidity, List is more commonly used than Tuple.</li>
# <li>Lists are like Tubles but are mutable and enclosed with square brackets</li>
# <li>List, similar to a Tuple, operates like a string when it comes to access individual element within the data structure</li>
# </ul>
# </div>
# -
tuple1 = (1,1.3,2,'CUHK')
print(tuple1)
list1 = [1,1.3,2,'CUHK']
print(list1)
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# -
my_list = [1,2,3,4,5,2.5]
print(sorted(my_list))
print(my_list)
my_list = my_list + [6,7,8]
print(my_list)
my_list.append([9,10,11])
print(my_list)
del(my_list[9])
print(my_list)
score_list = [100,95,85,60,60.5,70]
print(score_list)
print(score_list[3])
score_list.append([50,45,65,55])
print(score_list)
del(score_list[6])
print(score_list[3:7])
print(score_list)
score_list.append([1,2,3,"I am from CUHK.",True])
print(score_list)
second_list = [11,22,33]
second_list.append([44,55,66])
score_list.append(second_list)
print(score_list)
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Set and Dictionary</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Similar to lists and tuples, sets support different Python types</li>
# <li>Sets use {} (braces) to embed values/elements</li>
# <li>Sets do not allow duplicates</li>
# <li>Lists can be converted into sets with the set function</li>
# <li>Dictionary store data in an array of key-value pairs in braces</li>
# <li>For instance, here is a dictionary instance: dict = {“key1”:1,”key2”:2,”key3”:3}
# </li>
# <li>1st column representing the key and 2nd column representing the value</li>
# <li><dict_name>.keys() returns all the keys</li>
# <li><dict_name>.values.() returns all the values
# </ul>
# </div>
# -
dict1 = {'Peter':80,"David":90,"Mary":100}
print(dict1)
print(dict1.keys())
print(dict1.values())
print(dict1["David"])
scorelist = []
scorelist.append(dict1)
print(scorelist)
test1 = {"Peter":50,"David":60,"Mary":70}
assignment1 = {"Peter":50,"David":70,"Mary":80}
score_list = []
score_list.append(test1)
score_list.append(assignment1)
print(score_list)
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Basic Operations in Python</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Variable Assignment</li>
# <li>Mathematical Operations</li>
# <li>Functional Decomposition and Abstraction</li>
# <li>Logical Operations</li>
# <li>Looping Operations</li>
# </ul>
# </div>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# -
scorelist = []
test1 = {"Peter":50,"David":60,"Mary":65,"Harry":80}
test2 = {"Peter":90,"David":90,"Mary":85,"Harry":70}
scorelist.append(test1)
scorelist.append(test2)
print(scorelist)
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Looping Operations</center></h1>
# <div style="margin-left:100px;">
# -
i = 1
for i in range(1,10):
print(i)
# <h1><center>Try it out</center></h1>
hrs_list = [30.0,40.0,50.0,60.0]
rate_list = [65.0,75.0,65.0,75.0]
name_list = ['John',"Mike","Mary","Jane"]
index = 0
fee_list = []
for number in name_list:
hrs = hrs_list[index]
rate = rate_list[index]
fee = hrs * rate
fee_list.append(fee)
money_made = name_list[index] + " makes " + str(fee) + "."
print(money_made)
index = index + 1
money_list = []
money_list.append(name_list)
money_list.append(fee_list)
print(money_list)
# <h1><center>Try it out</center></h1>
hrs_list = [30.0,40.0,50.0,60.0]
rate_list = [65.0,75.0,65.0,75.0]
name_list = ['John',"Mike","Mary","Jane"]
fee_list = []
for number in range(len(name_list)):
hrs = hrs_list[number]
rate = rate_list[number]
fee = hrs * rate
fee_list.append(fee)
money_made = name_list[number] + " makes " + str(fee) + "."
print(money_made)
money_list = []
money_list.append(name_list)
money_list.append(fee_list)
print(money_list)
# <h1><center>Try it out</center></h1>
hrs_list = [30.0,40.0,50.0,60.0]
rate_list = [65.0,75.0,65.0,75.0]
name_list = ['John',"Mike","Mary","Jane"]
fee_list = []
number = 0
while number < len(name_list):
hrs = hrs_list[number]
rate = rate_list[number]
fee = hrs * rate
fee_list.append(fee)
money_made = name_list[number] + " makes " + str(fee) + "."
print(money_made)
number = number + 1
money_list = []
money_list.append(name_list)
money_list.append(fee_list)
print(money_list)
print(len(name_list))
# + slideshow={"slide_type": "slide"}
d = {'a': 'apple', 'b': 'berry', 'c': 'cherry'}
for key in d:
print(key, d[key])
# -
hrs_list = [30.0,40.0,50.0,60.0]
rate_list = [65.0,75.0,65.0,75.0]
name_list = ['John',"Mike","Mary","Jane"]
hrs = {"John":30.0,"Mike":40,"Mary":50,"Jane":60}
rates = {"John":65.0,"Mike":75.0,"Mary":65.0,"Jane":75.0}
for i in name_list:
fees = hrs[i]* rates[i]
make_money = " makes " + str(fees) + "."
print(i,make_money)
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Conditional Operations</center></h1>
# <div style="margin-left:100px;">
# -
my_list = ['red','green','blue','orange','black']
index = 0
for i in my_list:
if i == 'blue':
print(index, "Blue is printed.")
index += 1
else:
print(index, my_list[index])
# index += 1
index = index + 1
print("This is done.")
hrs_list = [30.0,40.0,50.0,60.0]
rate_list = [65.0,75.0,65.0,75.0]
name_list = ['John',"Mike","Mary","Jane"]
hrs = {"John":30.0,"Mike":40,"Mary":50,"Jane":60}
rates = {"John":65.0,"Mike":75.0,"Mary":65.0,"Jane":75.0}
less_than_3200 = False
greater_equal_3200 = False
for i in name_list:
fees = hrs[i]* rates[i]
make_money = " makes " + str(fees) + "."
if (fees < 3200.0):
if (less_than_3200 == False):
print("Less than 3200")
less_than_3200 = True
print(" ",i,make_money)
else:
if (greater_equal_3200 == False):
print("More than or equal to 3200")
greater_equal_3200 = True
print(" ",i,make_money)
# <h1><center>Decomposition and Abstraction of Computational Problems Using Function</center></h1>
# +
# Create a rounding function given rounded_to_int(x) and x can be any integer or float.
# so rounded_to_int(3.14) will return 3 and rounded_to_int(3.54) will return 4.
# Create a rounding function
def rounded_to_int(x):
pos = str(x).find(".")+1
val = str(x)[pos:pos+1]
if (int(val) >= 5):
result = str(x+1)[0:pos-1]
else:
result = str(x)[0:pos-1]
return result
print(rounded_to_int(3.14))
print(rounded_to_int(3.54))
# -
# <h1><center>A Tuition Fee Computation Example</center></h1>
def compute_ta_fees(hrs,rate):
fee = hrs*rate
return fee
ta_fees = compute_ta_fees(30,60.40)
print(ta_fees)
def compute_ta_fees(hrs,rate,name):
fee = hrs*rate
return name + " has received $" + str(fee) + "."
ta_fees = compute_ta_fees(30,60.40,'Bernard')
print(ta_fees)
def compute_ta_fees(hrs,rate,name):
fee = hrs*rate
money_to_mom = fee*.2
return name + "'s mom has received $" + str(money_to_mom) + "."
ta_fees = compute_ta_fees(36,67.30,'Mary')
print(ta_fees)
def compute_ta_fees(hrs,rate,name):
fee = hrs*rate
money_to_mom = fee*.2
return name + "'s mom has received $" + str(money_to_mom) + "."
# print ta_fees
hrs_list = [30.0,40.0,50.0,60.0]
rate_list = [65.0,75.0,65.0,75.0]
name_list = ['John',"Mike","Mary","Jane"]
index = 0
output_list = []
for number in name_list:
hrs = hrs_list[index]
rate = rate_list[index]
name = name_list[index]
ta_fees = compute_ta_fees(hrs,rate,name)
print(ta_fees)
output_list.append(ta_fees)
index = index + 1
print(output_list)
# +
purchase_list = ["ABC 2T HD", "MS wireless mouse", "TS Wireless keyboard"]
inventory = {
"ABC 2T HD":60,
"MS wireless mouse":0,
"TS Wireless keyboard":32,
"CC 500G USB Drive":25
}
prices = {
"ABC 2T HD":800,
"MS wireless mouse":200,
"TS Wireless keyboard":120,
"CC 500G USB Drive":450
}
# Write your code below!
def compute_bill(part_list):
total = 0
for item in part_list:
if inventory[item] > 0:
total = total + prices[item]
inventory[item] = inventory[item] - 1
print(item,":",inventory[item])
return total
print("Total:",compute_bill(purchase_list))
# +
john = {
"name": "John",
"homework": [90.0, 97.0, 75.0, 92.0],
"quizzes": [88.0, 40.0, 94.0],
"tests": [75.0, 90.0]
}
alan = {
"name": "Alan",
"homework": [100.0, 92.0, 98.0, 100.0],
"quizzes": [82.0, 83.0, 91.0],
"tests": [89.0, 97.0]
}
tom = {
"name": "Tom",
"homework": [0.0, 87.0, 75.0, 22.0],
"quizzes": [0.0, 75.0, 78.0],
"tests": [100.0, 100.0]
}
def average(numbers): # Step 4
total = sum(numbers)
total = float(total)
return total/len(numbers)
def get_average(student): # Step 3
homework = average(student["homework"]) # Step 4
quizzes = average(student["quizzes"])
tests = average(student["tests"])
return 0.1 * homework + 0.3 * quizzes + 0.6 * tests
def get_letter_grade(score): # Step 5
if score >= 90:
return "A"
elif score >=80:
return "B"
elif score >=70:
return "C"
elif score >=60:
return "D"
else:
return "F"
def get_class_average(class_list): # Step 2
results = []
for student in class_list:
student_avg = get_average(student) # Step 3
results.append(student_avg)
return average(results)
students = [john, alan, tom] # Step 1 - students is a list of dict objects with each dict representing the
# records of the student's scores in homeworks, quizzes, and tests
class_avg = get_class_average(students)# Step 2
print(class_avg)
print(get_letter_grade(class_avg)) # Step 5
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Try it out</center></h1>
# -
# <h1><center>From Spreadsheet Table to Pandas Table</center></h1>
# <div style="margin-left:100px;">
# <h1><center>Introduction to Pandas</center></h1>
# <div style="margin-left:100px;">
# <ul>
# <li>Pandas allows us to deal with 2 data structures: series and data frame</li>
# <li>In our workshop, we'll only concentrate on data frame as it is more commonly used</li>
# <li>A data frame is consisted of rows and columns. It can be made from Python list and dictionary objects</li>
# <li>Pandas can put a dictionary of list into a data frame</li>
# <li>There are build-in Panas functions for reading (e.g. df=pd.read_csv('<file name>')) and writing to CSV files (e.g df.to_csv('<file name>', sep='\t', encoding='utf-8'))</li>
# </ul>
# </div>
import pandas as pd
df1 = pd.DataFrame({
# Define dataframe as a dictionary object
'Product ID': [1, 2, 3, 4],
# add Product Name and Color here
'Product Name': ['t-shirt','jeans','shirt','skirt'],
'Color':['blue','green','red','black'],
'Units Sold':[250,300,180,200]
})
print(df1)
# <h1><center>With a little bit of background in HTML/CSS/JS and Python, <br>it's now time to scrap!</center></h1>
# <div style="margin-left:100px;">
# + [markdown] slideshow={"slide_type": "slide"}
# <h1><center>Scraping Data with Beautiful Soup and Storing the Result as CSV File<br>
# using Startup Beat as Example</center></h1>
# <div style="margin-left:100px;">
# +
import requests
import csv
from bs4 import BeautifulSoup
quote_page = requests.get('http://startupbeat.hkej.com/?tag=fintech&paged=1')
soup = BeautifulSoup(quote_page.content,'html.parser')
data = []
for article in soup.find_all('div',class_='archive-text'):
url = article.a.get('href')
post_date = article.div.ul.li.text
data.append((url, post_date))
with open('startup_beat_demo.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
header = ['url','post date']
writer.writerow(header)
for url, post_date in data:
writer.writerow([url, post_date])
# -
# <h1><center>Try it out</center></h1>
# <h1><center>Scraping with Beautiful Soup and Saving Result in CSV with Pandas<br>
# Using Startup Beat as Example</center></h1>
# +
# %%time
import requests
import csv
import pandas as pd
from bs4 import BeautifulSoup
from time import sleep
from random import randint
from time import time
# quote_page = requests.get('http://startupbeat.hkej.com/?tag=fintech&paged=1')
# soup = BeautifulSoup(quote_page.text,'html.parser')
header = ['page #','title','url','details','post date']
data = []
# Display and store away 2 pages of scrapped data from startupbeat.hkej.com
for i in range(1,3):
quote_page = requests.get('http://startupbeat.hkej.com/?tag=fintech&paged='+str(i))
sleep(randint(8,15))
print("\n***** Page " + str(i) +" in action *****")
soup = BeautifulSoup(quote_page.content,'html.parser')
for article in soup.find_all("div", attrs={"class":"archive-text"}):
# for article in soup.find_all('div',class_='archive-text'):
page_no = str(i)
title = article.a.text.encode('utf-8').strip()
decoded_title = title.decode('utf-8')
url = article.a.get('href')
details = article.p.text.encode('utf-8').strip()
decoded_details = details.decode('utf-8')
post_date = article.div.ul.li.text
print(decoded_title)
print(url)
print(decoded_details)
print(post_date)
data.append((page_no, decoded_title, url, decoded_details, post_date))
df = pd.DataFrame(data,
columns = header
)
df.to_csv('projects/startup_beat_data_1.csv', sep='\t', encoding='utf-8')
# -
# <h1><center>Try it out</center></h1>
# <h1><center>Job Listing Scraping Example (orginal source: https://medium.com/@msalmon00/web-scraping-job-postings-from-indeed-96bd588dcb4b by <NAME>
# </center></h1>
# +
import requests
import bs4
from bs4 import BeautifulSoup
import pandas as pd
import time
URL = "https://www.indeed.com/jobs?q=data+scientist+%2420%2C000&l=New+York&start=10"
#conducting a request of the stated URL above:
page = requests.get(URL)
#specifying a desired format of “page” using the html parser - this allows python to read the various components of the page, rather than treating it as one long string.
soup = BeautifulSoup(page.text,"html.parser")
#printing soup in a more structured tree format that makes for easier reading
# print(soup.prettify())
jobs = []
def extract_job_title_from_result(soup):
for div in soup.find_all(name="div"):
for a in div.find_all(name="a", attrs={"data-tn-element":"jobTitle"}):
jobs.append(a["title"])
return(jobs)
extract_job_title_from_result(soup)
# +
companies = []
def extract_company_from_result(soup):
for div in soup.find_all(name="div", attrs={"class":"row"}):
company = div.find_all(name="span", attrs={"class":"company"})
if len(company) > 0:
for b in company:
companies.append(b.text.strip())
else:
sec_try = div.find_all(name="span", attrs={"class":"result-link-source"})
for span in sec_try:
companies.append(span.text.strip())
return(companies)
extract_company_from_result(soup)
# -
jobs
companies
#import pandas to convert list to data frame
import pandas as pd
df=pd.DataFrame(jobs,columns=['Jobs'])
df['Companies']=companies
df
# <h1><center>Try it out</center></h1>
# <h1><center>Scraping NBA Data
# (<a href='http://savvastjortjoglou.com/nba-draft-part01-scraping.html' target='_blank'>Original Source:Scraping and Cleaning the NBA Draft by
# <NAME>)</a></center></h1>
# +
from urllib.request import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import csv
url = "http://www.basketball-reference.com/draft/NBA_2014.html"
html = urlopen(url)
soup = BeautifulSoup(html,'html.parser')
column_headers = []
for th in soup.findAll('tr',limit=2)[1].findAll('th'):
column_headers.append(th.getText())
data_rows = soup.findAll('tr')[2:] # skip the first 2 header rows
player_data = []
for i in range(len(data_rows)): # for each table row
player_row = []
# for each table data element from each table row
for th in data_rows[i].findAll('th',limit=1):
player_row.append(th.getText())
for td in data_rows[i].findAll('td'):
# get the text content and append to the player_row
player_row.append(td.getText())
# then append each pick/player to the player_data matrix
player_data.append(player_row)
df = pd.DataFrame(player_data, columns=column_headers)
df.to_csv('projects/nba.csv', sep='\t', encoding='utf-8')
df.head()
# -
# <h1><center>Try it out</center></h1>
# <h1><center>Scraping, Transforming, and Visualizing with Beautiful Soup,Pandas, and Mathplotlib</center></h1>
# +
from bs4 import BeautifulSoup
import requests
import csv
import pandas as pd
import matplotlib.pyplot as plt
# Fetch URL
html_page = requests.get('https://www.travelchinaguide.com/climate/air-pollution.htm')
# Obtain the entire HTML page
soup = BeautifulSoup(html_page.content,'html.parser')
# Find all the HTML tables
tables = soup.find_all(class_="c_tableX")
# Access the first HTML table that contains Air Quality information (header + data)
table = tables[1]
# Obtaih air quality column header descriptions from the first HTML table
table_header = table.find(class_="c_tableX_th")
# Extract the header names from the 'td' elements of the header table and store in the variable 'tds'
tds = table_header.find_all('td')
# Create two empty lists for holding the air quality header names and air quality data
header = []
data = []
# Loop through the table cells (i.e. 'tds') to extract header names and append to the list
for i in tds:
# print(i.text)
header.append(i.text)
# print header
all_rows = table.find_all("tr")
# print data_rows.text
for i, row in enumerate(all_rows,1):
# print row.text
if (i < len(all_rows)):
tds = all_rows[i].find_all("td")
for j, td in enumerate(tds,1):
# print j,td.text
if j==1:
rank = td.text
if j==2:
city = td.text
if j==3:
province = td.text
if j==4:
aqi = td.text
if j==5:
air_quality = td.text
if j==6:
pm2_5 = td.text
if j==7:
pm10 = td.text
data.append([rank,city,province,aqi,air_quality,pm2_5,pm10])
# Assign row data and column headers to dataframe
df = pd.DataFrame(data,
columns = header
)
# Save dataframe to external csv file
df.to_csv('projects/china_air_quality.csv', sep='\t', encoding='utf-8')
# open csv file and read csv data into Pandas dataframe
df = pd.read_csv("projects/china_air_quality.csv",sep='\t', encoding='utf-8')
# Set column headings for entire air quality table and print out the entire table
air_quality_ranking = df[['Rank','City','AQI','Air Quality Level','PM2.5','PM10']]
# ////////////////////////////////////////////////////////////////////
# Display City Ranking by Pollution Level
# ////////////////////////////////////////////////////////////////////
print("Air Quality Ranking\n")
print(air_quality_ranking)
# Extract cities that are polluted
lightly_polluted = df[df['Air Quality Level'] == 'Lightly Polluted']
heavily_polluted = df[df['Air Quality Level'] == 'Heavily Polluted']
# Combine the cities of different pollution level into one table
selected = lightly_polluted.append(heavily_polluted)
pc = selected[['Rank','City','AQI','Air Quality Level','PM2.5','PM10']]
# ////////////////////////////////////////////////////////////////////
# Display Cities with Pollution
# ////////////////////////////////////////////////////////////////////
print("\nCities with Pollution\n")
print(pc)
cities = pc['City'].tolist()
aqi_lvl = pc['AQI'].tolist()
ax = plt.subplot()
plt.bar(range(len(cities)),aqi_lvl)
# Create ax object here
j = 0
ax_list = []
while j<len(cities):
ax_list.append(j)
j += 1
ax.set_xticks(ax_list)
plt.xlabel('Cities')
plt.ylabel('AQI Levels')
plt.title('Cities with High AQI Levels')
ax.set_xticklabels(cities, rotation=70)
plt.show()
# -
# <h1><center>Try it out</center></h1>
| Learning_Python_Scraping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
import numpy as np
#construct an instance of ImageDataGenerator class
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
brightness_range=[0.2,2.0],
horizontal_flip=True,
fill_mode='nearest')
# +
#initializing the counter for break statement
i=0
#getting the iterator as 'batch' from the flow_from_directory()
for batch in datagen.flow_from_directory('dataset/cat_dog_images',batch_size=1):
#Only if want to view the image inline in jupyter
#convert to unsigned integers
image = batch[0].astype('uint8')
#changing the image from multi image based 4D to single 3D
image = np.squeeze(image)
plt.imshow(image)
plt.show()
i = i+1
if i > 50:
break #break the loop at the count of 50
# -
| .ipynb_checkpoints/keras_data_gen_flowdir_example-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table class="ee-notebook-buttons" align="left">
# <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/JavaScripts/Arrays/SpectralUnmixing.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
# <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Arrays/SpectralUnmixing.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
# <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=JavaScripts/Arrays/SpectralUnmixing.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/Arrays/SpectralUnmixing.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
# </table>
# ## Install Earth Engine API and geemap
# Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
# The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
#
# **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
# +
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# -
# ## Create an interactive map
# The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
# ## Add Earth Engine Python script
# +
# Add Earth Engine dataset
# Array-based spectral unmixing.
# Create a mosaic of Landsat 5 images from June through September, 2007.
allBandMosaic = ee.ImageCollection('LANDSAT/LT05/C01/T1') \
.filterDate('2007-06-01', '2007-09-30') \
.select('B[0-7]') \
.median()
# Create some representative endmembers computed previously by sampling
# the Landsat 5 mosaic.
urbanEndmember = [88, 42, 48, 38, 86, 115, 59]
vegEndmember = [50, 21, 20, 35, 50, 110, 23]
waterEndmember = [51, 20, 14, 9, 7, 116, 4]
# Compute the 3x7 pseudo inverse.
endmembers = ee.Array([urbanEndmember, vegEndmember, waterEndmember])
inverse = ee.Image(endmembers.matrixPseudoInverse().transpose())
# Convert the bands to a 2D 7x1 array. The toArray() call concatenates
# pixels from each band along the default axis 0 into a 1D vector per
# pixel, and the toArray(1) call concatenates each band (in this case
# just the one band of 1D vectors) along axis 1, forming a 2D array.
inputValues = allBandMosaic.toArray().toArray(1)
# Matrix multiply the pseudo inverse of the endmembers by the pixels to
# get a 3x1 set of endmembers fractions from 0 to 1.
unmixed = inverse.matrixMultiply(inputValues)
# Create and show a colored image of the endmember fractions. Since we know
# the result has size 3x1, project down to 1D vectors at each pixel (since the
# second axis is pointless now), and then flatten back to a regular scalar
# image.
colored = unmixed \
.arrayProject([0]) \
.arrayFlatten([['urban', 'veg', 'water']])
Map.setCenter(-98.4, 19, 11)
# Load a hillshade to use as a backdrop.
Map.addLayer(ee.Algorithms.Terrain(ee.Image('CGIAR/SRTM90_V4')).select('hillshade'))
Map.addLayer(colored, {'min': 0, 'max': 1},
'Unmixed (red=urban, green=veg, blue=water)')
# -
# ## Display Earth Engine data layers
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| JavaScripts/Arrays/SpectralUnmixing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h6>Input pipelines</h6>
# In a production syatem data is stored sequentially in a DFS in tf.record format.
# <p></p>
# <li>Use tf.placeholders for test, training, validation date</li>
# <li>Use threads</li>
#
| hinton/tf_inputpipelines_threads.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="i6gI-NlUeW0i" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
# + id="SW3ouvoFfZdP" colab_type="code" outputId="9389cb21-ba74-4ee1-94f2-cf6189d04a5d" executionInfo={"status": "ok", "timestamp": 1581615868124, "user_tz": -60, "elapsed": 749, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix"
# + id="Mj0DaiuZfhHN" colab_type="code" outputId="a7493d17-4279-4f08-a6f1-ec653aa01233" executionInfo={"status": "ok", "timestamp": 1581615935239, "user_tz": -60, "elapsed": 2880, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 72}
df = pd.read_csv("men_shoes.csv" ,low_memory=False)
df.shape
# + id="guAB42I_gOqZ" colab_type="code" outputId="f03e97ac-f816-4da6-b5e4-71ff366ca8e2" executionInfo={"status": "ok", "timestamp": 1581615968118, "user_tz": -60, "elapsed": 573, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 237}
df.columns
# + id="l_GB-kWghHsE" colab_type="code" outputId="47f41f1b-6d5e-4511-a505-63893a8a09cc" executionInfo={"status": "ok", "timestamp": 1581616058900, "user_tz": -60, "elapsed": 711, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
mean_price = np.mean (df['prices_amountmin'])
mean_price
# + id="BPqf1ffFwdUm" colab_type="code" colab={}
df['brand_cat'] = df['brand'].factorize()[0]
# + id="fu43OSHyz9Sb" colab_type="code" colab={}
df['manufacturer_cat'] = df['manufacturer'].factorize()[0]
# + id="xDkGGa5e0956" colab_type="code" colab={}
df['prices_color_cat'] = df['prices_color'].factorize()[0]
# + id="vcoKf9VL1Xf7" colab_type="code" colab={}
df['categories_cat'] = df['categories'].factorize()[0]
# + id="hExMwHVhxoaF" colab_type="code" outputId="bbbd781c-b7c8-4ad3-9c55-8eb8eac2c109" executionInfo={"status": "ok", "timestamp": 1581620607006, "user_tz": -60, "elapsed": 1208, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
feats = ['brand_cat']
x = df[ feats ].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, x, y, scoring='neg_mean_absolute_error')
np.mean(scores), np.std(scores)
# + id="0yI0knybySwu" colab_type="code" colab={}
def run_model(feats):
x = df[feats].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, x , y, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="PLZ3DdeD2H0q" colab_type="code" outputId="2127cfec-4c19-4b6e-ebb0-610d0900a1c5" executionInfo={"status": "ok", "timestamp": 1581620360912, "user_tz": -60, "elapsed": 656, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
run_model(['brand_cat'])
# + id="nuahZMMz1jNm" colab_type="code" outputId="68907121-a1e8-4b84-c5ba-92550d19e240" executionInfo={"status": "ok", "timestamp": 1581619518967, "user_tz": -60, "elapsed": 1057, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
run_model(['categories_cat'])
# + id="4XV8bhkNzU4J" colab_type="code" outputId="6723c421-1c0c-445c-fd1c-fa1ba82a25aa" executionInfo={"status": "ok", "timestamp": 1581620612745, "user_tz": -60, "elapsed": 420, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
run_model(['brand_cat','manufacturer_cat',])
# + id="d9xH5DLH6MQp" colab_type="code" outputId="38bd4a4a-de5a-4adf-a702-b6f5e2b9c2a3" executionInfo={"status": "ok", "timestamp": 1581624017409, "user_tz": -60, "elapsed": 1039, "user": {"displayName": "<NAME>.", "photoUrl": "", "userId": "00753812650686944921"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
run_model(['brand_cat','manufacturer_cat','categories_cat'])
| matrix_one/day4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
import sys
import os.path
import numpy as np
import pandas as pd
import wave
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from numba import jit
from matplotlib.backends.backend_pdf import PdfPages
from collections import OrderedDict
from scipy import signal
from scipy.optimize import curve_fit
from functools import wraps
# ディレクトリの絶対パスを取得
current_dir = os.path.dirname(os.path.abspath("__file__"))
# モジュールのあるパスを追加
sys.path.append( str(current_dir) + '/../../research_tools' )
get_ipython().run_line_magic('matplotlib', 'inline')
# %matplotlib inline
plt.rcParams['font.family'] ='IPAPGothic' #使用するフォント
plt.rcParams['xtick.direction'] = 'in' #x軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['ytick.direction'] = 'in' #y軸の目盛線が内向き('in')か外向き('out')か双方向か('inout')
plt.rcParams['xtick.top'] = True #x軸の目盛線の上側を表示
plt.rcParams['ytick.right'] = True #y軸の目盛線の右側を表示
plt.rcParams['xtick.major.width'] = 1.0 #x軸主目盛り線の線幅
plt.rcParams['ytick.major.width'] = 1.0 #y軸主目盛り線の線幅
plt.rcParams['font.size'] = 11 #フォントの大きさ
plt.rcParams['axes.linewidth'] = 1.0 #軸の線幅edge linewidth。囲みの太さ
plt.rcParams['figure.figsize'] = (7,5)
plt.rcParams['figure.dpi'] = 100 #dpiの設定
plt.rcParams['figure.subplot.hspace'] = 0.3 # 図と図の幅
plt.rcParams['figure.subplot.wspace'] = 0.3 # 図と図の幅
fig = plt.figure(figsize=(8, 11))
# + pycharm={"is_executing": false, "name": "#%%\n"}
import decorators
# import plot_tool
import plot_tools
import adaptive_filters
import adaptive_filters_v2
import wave_process
# + pycharm={"name": "#%%\n"}
# + pycharm={"is_executing": false, "name": "#%%\n"}
wav = wave_process.wave_process("../../sample_wav/drone/th01.wav")
# + pycharm={"name": "#%%\n"}
iav_row = plot_tools.PlotTolls(y=wav.data, fft_N=524288, stft_N=256, fs=wav.fs, window="hamming")
# + pycharm={"name": "#%%\n"}
wav_row.plot_all()
# -
#
#
| playground/fir_sofa_ipynb/filters/test/drone_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# ## CSV
# + Collapsed="false"
import pandas as pd
from tqdm import tqdm_notebook as tqdm
# + Collapsed="false"
seame_phaseII_val_manifest = pd.read_csv('seame_phaseII_val_manifest.csv',header = None)
# + Collapsed="false"
seame_phaseII_val_manifest.head()
# + Collapsed="false"
path = '/media/volume1/Eason/Speech_Recognition/NER_Trs/'
name = 'ner'
# + Collapsed="false"
mypath = path
wav = []
txt = []
# 遞迴列出所有檔案的絕對路徑
for root, dirs, files in walk(mypath):
for f in files:
if f[-4:] == '.wav':
fullpath = join(root, f)
if 'Clean' in fullpath:
wav.append(fullpath)
elif f != 'lexicon.txt' and f != 'DA.txt' :
if f[-4:] == '.txt':
fullpath = join(root, f)
if 'Clean' in fullpath:
txt.append(fullpath)
# + Collapsed="false"
from os import walk
from os.path import join
from sklearn.utils import shuffle
# 指定要列出所有檔案的目錄
mypath = path
wav = []
txt = []
# 遞迴列出所有檔案的絕對路徑
for root, dirs, files in walk(mypath):
for f in files:
if f[-4:] == '.wav':
fullpath = join(root, f)
if 'Clean' in fullpath:
wav.append(fullpath)
elif f != 'lexicon.txt' and f != 'DA.txt' :
if f[-4:] == '.txt':
fullpath = join(root, f)
if 'Clean' in fullpath:
txt.append(fullpath)
for i in tqdm(range(len(txt))):
if wav[i].split('/')[-1].split('.')[0] != txt[i].split('/')[-1].split('.')[0]:
print(wav[i])
print(txt[i])
NER = pd.DataFrame([wav,txt]).T
NER_shuffle = shuffle(NER).reset_index(drop=True)
L = len(NER_shuffle)
train = NER_shuffle[:int(L*0.7)].reset_index(drop=True)
val = NER_shuffle[int(L*0.7):int(L*0.8)].reset_index(drop=True)
test = NER_shuffle[int(L*0.8):].reset_index(drop=True)
train.to_csv(name + '_train.csv',index=False,header=None)
val.to_csv(name + '_val.csv',index=False,header=None)
test.to_csv(name + '_test.csv',index=False,header=None)
# + Collapsed="false"
for i in tqdm(range(len(txt))):
if wav[i].split('/')[-1].split('.')[0] != txt[i].split('/')[-1].split('.')[0]:
print(wav[i])
print(txt[i])
# + Collapsed="false"
NER = pd.DataFrame([wav,txt]).T
# + Collapsed="false"
from sklearn.utils import shuffle
NER_shuffle = shuffle(NER).reset_index(drop=True)
# + Collapsed="false"
NER_shuffle[0][0]
# + Collapsed="false"
L = len(NER_shuffle)
train = NER_shuffle[:int(L*0.7)].reset_index(drop=True)
val = NER_shuffle[int(L*0.7):int(L*0.8)].reset_index(drop=True)
test = NER_shuffle[int(L*0.8):].reset_index(drop=True)
# + Collapsed="false"
train.to_csv('ner_v1tov10_train.csv',index=False,header=None)
val.to_csv('ner_v1tov10_val.csv',index=False,header=None)
test.to_csv('ner_v1tov10_test.csv',index=False,header=None)
# + Collapsed="false"
for i in range(len(test)):
with open(test[1][i],'r') as fp:
all_lines = fp.readlines()
if '新制' in all_lines:
print(i)
# + Collapsed="false"
test[0][12]
# + [markdown] Collapsed="false"
# ## JSON
# + Collapsed="false"
text = []
for i in tqdm(range(len(txt))):
fp = open(txt[i], "r")
lines = fp.readlines()
fp.close()
text.append(lines)
# + Collapsed="false"
text_str = ''
for i in tqdm(range(len(text))):
for j in text[i]:
text_str += j
# + Collapsed="false"
unique_word = ''.join(set(text_str))
# + Collapsed="false"
ner_labels = []
for i in unique_word:
ner_labels.append(i)
# + Collapsed="false"
import json
data = ner_labels
ret = json.dumps(data)
with open('../labels/ner_labels.json', 'w') as fp:
fp.write(ret)
# + Collapsed="false"
import torchaudio
for i in tqdm(range(len(test[0]))):
try:
path = test[0][i]
sound, _ = torchaudio.load(path, normalization=True)
except:
print('='*50)
print(path)
print('='*50)
# + Collapsed="false"
| data/manifests/NER_split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WA_DNR_GeoClaw notebooks
# - [topo](topo/Index.ipynb) Topography and bathymetry, topo files
# - [dtopo](dtopo/Index.ipynb) Earthquake sources and dtopo files
#
# #### Other topics:
#
# - Selecting fgmax points
# - AMR flagging regions
# - setrun
# - setplot
| Index.ipynb |