markdown stringlengths 0 1.02M | code stringlengths 0 832k | output stringlengths 0 1.02M | license stringlengths 3 36 | path stringlengths 6 265 | repo_name stringlengths 6 127 |
|---|---|---|---|---|---|
Determine ratio of positive ADE phrases compared to total dataset | df['label'].sum()/len(df) | _____no_output_____ | MIT | 1_data_prep.ipynb | marshmellow77/adverse-drug-effect-detection |
Initialise Sagemaker variables and create S3 bucket | from sagemaker.huggingface.processing import HuggingFaceProcessor
import sagemaker
from sagemaker import get_execution_role
sess = sagemaker.Session()
role = sagemaker.get_execution_role()
bucket = f"az-ade-{sess.account_id()}"
sess._create_s3_bucket_if_it_does_not_exist(bucket_name=bucket, region=sess._region_name) | _____no_output_____ | MIT | 1_data_prep.ipynb | marshmellow77/adverse-drug-effect-detection |
Save the name of the S3 bucket for later sessions | %store bucket | _____no_output_____ | MIT | 1_data_prep.ipynb | marshmellow77/adverse-drug-effect-detection |
Set up processing job | hf_processor = HuggingFaceProcessor(
role=role,
instance_type="ml.p3.2xlarge",
transformers_version='4.6',
base_job_name="az-ade",
pytorch_version='1.7',
instance_count=1,
)
from sagemaker.processing import ProcessingInput, ProcessingOutput
outputs=[
ProcessingOutput(output_name="train_data", source="/opt/ml/processing/training", destination=f"s3://{bucket}/processing_output/train_data"),
ProcessingOutput(output_name="validation_data", source="/opt/ml/processing/validation", destination=f"s3://{bucket}/processing_output/validation_data"),
ProcessingOutput(output_name="test_data", source="/opt/ml/processing/test", destination=f"s3://{bucket}/processing_output/test_data"),
]
arguments = ["--dataset-name", "ade_corpus_v2",
"--datasubset-name", "Ade_corpus_v2_classification",
"--model-name", "distilbert-base-uncased",
"--train-ratio", "0.7",
"--val-ratio", "0.15",]
hf_processor.run(
code="scripts/preprocess.py",
outputs=outputs,
arguments=arguments
)
preprocessing_job_description = hf_processor.jobs[-1].describe()
output_config = preprocessing_job_description['ProcessingOutputConfig']
for output in output_config['Outputs']:
print(output['S3Output']['S3Uri']) | _____no_output_____ | MIT | 1_data_prep.ipynb | marshmellow77/adverse-drug-effect-detection |
IllusTrip: Text to Video 3DPart of [Aphantasia](https://github.com/eps696/aphantasia) suite, made by Vadim Epstein [[eps696](https://github.com/eps696)] Based on [CLIP](https://github.com/openai/CLIP) + FFT/pixel ops from [Lucent](https://github.com/greentfrapp/lucent). 3D part by [deKxi](https://twitter.com/deKxi), based on [AdaBins](https://github.com/shariqfarooq123/AdaBins) depth. thanks to [Ryan Murdock](https://twitter.com/advadnoun), [Jonathan Fly](https://twitter.com/jonathanfly), [@eduwatch2](https://twitter.com/eduwatch2) for ideas. Features * continuously processes **multiple sentences** (e.g. illustrating lyrics or poems)* makes **videos**, evolving with pan/zoom/rotate motion* works with [inverse FFT](https://github.com/greentfrapp/lucent/blob/master/lucent/optvis/param/spatial.py) representation of the image or **directly with RGB** pixels (no GANs involved)* generates massive detailed textures (a la deepdream), **unlimited resolution*** optional **depth** processing for 3D look* various CLIP models* can start/resume from an image **Run the cell below after each session restart**Ensure that you're given Tesla T4/P4/P100 GPU, not K80! | #@title General setup
!pip install ftfy==5.8 transformers
!pip install gputil ffpb
try:
!pip3 install googletrans==3.1.0a0
from googletrans import Translator, constants
translator = Translator()
except: pass
# !apt-get -qq install ffmpeg
work_dir = '/content/illustrip'
import os
os.makedirs(work_dir, exist_ok=True)
%cd $work_dir
import os
import io
import time
import math
import random
import imageio
import numpy as np
import PIL
from base64 import b64encode
import shutil
from easydict import EasyDict as edict
a = edict()
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import transforms as T
from torch.autograd import Variable
from IPython.display import HTML, Image, display, clear_output
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import ipywidgets as ipy
from google.colab import output, files
import warnings
warnings.filterwarnings("ignore")
!pip install git+https://github.com/openai/CLIP.git --no-deps
import clip
!pip install sentence_transformers
from sentence_transformers import SentenceTransformer
!pip install kornia
import kornia
!pip install lpips
import lpips
!pip install PyWavelets==1.1.1
!pip install git+https://github.com/fbcotter/pytorch_wavelets
%cd /content
!rm -rf aphantasia
!git clone https://github.com/eps696/aphantasia
%cd aphantasia/
from clip_fft import to_valid_rgb, fft_image, rfft2d_freqs, img2fft, pixel_image, un_rgb
from utils import basename, file_list, img_list, img_read, txt_clean, plot_text, old_torch
from utils import slice_imgs, derivat, pad_up_to, slerp, checkout, sim_func, latent_anima
import transforms
import depth
from progress_bar import ProgressIPy as ProgressBar
shutil.copy('mask.jpg', work_dir)
depth_mask_file = os.path.join(work_dir, 'mask.jpg')
clear_output()
def save_img(img, fname=None):
img = np.array(img)[:,:,:]
img = np.transpose(img, (1,2,0))
img = np.clip(img*255, 0, 255).astype(np.uint8)
if fname is not None:
imageio.imsave(fname, np.array(img))
imageio.imsave('result.jpg', np.array(img))
def makevid(seq_dir, size=None):
char_len = len(basename(img_list(seq_dir)[0]))
out_sequence = seq_dir + '/%0{}d.jpg'.format(char_len)
out_video = seq_dir + '.mp4'
print('.. generating video ..')
!ffmpeg -y -v warning -i $out_sequence -crf 18 $out_video
data_url = "data:video/mp4;base64," + b64encode(open(out_video,'rb').read()).decode()
wh = '' if size is None else 'width=%d height=%d' % (size, size)
return """<video %s controls><source src="%s" type="video/mp4"></video>""" % (wh, data_url)
# Hardware check
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
import GPUtil as GPU
gpu = GPU.getGPUs()[0] # XXX: only one GPU on Colab and isn’t guaranteed
!nvidia-smi -L
print("GPU RAM {0:.0f}MB | Free {1:.0f}MB)".format(gpu.memoryTotal, gpu.memoryFree))
#@title Load inputs
#@markdown **Content** (either type a text string, or upload a text file):
content = "" #@param {type:"string"}
upload_texts = False #@param {type:"boolean"}
#@markdown **Style** (either type a text string, or upload a text file):
style = "" #@param {type:"string"}
upload_styles = False #@param {type:"boolean"}
#@markdown For non-English languages use Google translation:
translate = False #@param {type:"boolean"}
#@markdown Resume from the saved `.pt` snapshot, or from an image
#@markdown (resolution settings below will be ignored in this case):
if upload_texts:
print('Upload main text file')
uploaded = files.upload()
text_file = list(uploaded)[0]
texts = list(uploaded.values())[0].decode().split('\n')
texts = [tt.strip() for tt in texts if len(tt.strip())>0 and tt[0] != '#']
print(' main text:', text_file, len(texts), 'lines')
workname = txt_clean(basename(text_file))
else:
texts = [content]
workname = txt_clean(content)[:44]
if upload_styles:
print('Upload styles text file')
uploaded = files.upload()
text_file = list(uploaded)[0]
styles = list(uploaded.values())[0].decode().split('\n')
styles = [tt.strip() for tt in styles if len(tt.strip())>0 and tt[0] != '#']
print(' styles:', text_file, len(styles), 'lines')
else:
styles = [style]
resume = False #@param {type:"boolean"}
if resume:
print('Upload file to resume from')
resumed = files.upload()
resumed_filename = list(resumed)[0]
resumed_bytes = list(resumed.values())[0]
assert len(texts) > 0 and len(texts[0]) > 0, 'No input text[s] found!'
tempdir = os.path.join(work_dir, workname)
os.makedirs(tempdir, exist_ok=True)
print('main dir', tempdir) | _____no_output_____ | MIT | IllusTrip3D.ipynb | z-tasker/aphantasia |
**`content`** (what to draw) is your primary input; **`style`** (how to draw) is optional, if you want to separate such descriptions. If you load text file[s], the imagery will interpolate from line to line (ensure equal line counts for content and style lists, for their accordance). | #@title Google Drive [optional]
#@markdown Run this cell, if you want to store results on your Google Drive.
using_GDrive = True#@param{type:"boolean"}
if using_GDrive:
import os
from google.colab import drive
if not os.path.isdir('/G/MyDrive'):
drive.mount('/G', force_remount=True)
gdir = '/G/MyDrive'
tempdir = os.path.join(gdir, 'illustrip', workname)
os.makedirs(tempdir, exist_ok=True)
print('main dir', tempdir)
#@title Main settings
sideX = 1280 #@param {type:"integer"}
sideY = 720 #@param {type:"integer"}
steps = 200 #@param {type:"integer"}
frame_step = 100 #@param {type:"integer"}
#@markdown > Config
method = 'RGB' #@param ['FFT', 'RGB']
model = 'ViT-B/32' #@param ['ViT-B/16', 'ViT-B/32', 'RN101', 'RN50x16', 'RN50x4', 'RN50']
# Default settings
if method == 'RGB':
align = 'overscan'
colors = 2
contrast = 1.2
sharpness = -1.
aug_noise = 0.
smooth = False
else:
align = 'uniform'
colors = 1.8
contrast = 1.1
sharpness = 1.
aug_noise = 2.
smooth = True
interpolate_topics = True
style_power = 1.
samples = 200
save_step = 1
learning_rate = 1.
aug_transform = 'custom'
similarity_function = 'cossim'
macro = 0.4
enforce = 0.
expand = 0.
zoom = 0.012
shift = 10
rotate = 0.8
distort = 0.3
animate_them = True
sample_decrease = 1.
DepthStrength = 0.
print(' loading CLIP model..')
model_clip, _ = clip.load(model, jit=old_torch())
modsize = model_clip.visual.input_resolution
xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33}
if model in xmem.keys():
sample_decrease *= xmem[model]
clear_output()
print(' using CLIP model', model) | _____no_output_____ | MIT | IllusTrip3D.ipynb | z-tasker/aphantasia |
**`FFT`** method uses inverse FFT representation of the image. It allows flexible motion, but is either blurry (if smoothed) or noisy (if not). **`RGB`** method directly optimizes image pixels (without FFT parameterization). It's more clean and stable, when zooming in. There are few choices for CLIP `model` (results do vary!). I prefer ViT-B/32 for consistency, next best bet is ViT-B/16. **`steps`** defines the length of animation per text line (multiply it to the inputs line count to get total video duration in frames). `frame_step` sets frequency of the changes in animation (how many frames between motion keypoints). Other settings [optional] | #@title Run this cell to override settings, if needed
#@markdown [to roll back defaults, run "Main settings" cell again]
style_power = 1. #@param {type:"number"}
overscan = True #@param {type:"boolean"}
align = 'overscan' if overscan else 'uniform'
interpolate_topics = True #@param {type:"boolean"}
#@markdown > Look
colors = 2 #@param {type:"number"}
contrast = 1.2 #@param {type:"number"}
sharpness = 0. #@param {type:"number"}
#@markdown > Training
samples = 200 #@param {type:"integer"}
save_step = 1 #@param {type:"integer"}
learning_rate = 1. #@param {type:"number"}
#@markdown > Tricks
aug_transform = 'custom' #@param ['elastic', 'custom', 'none']
aug_noise = 0. #@param {type:"number"}
macro = 0.4 #@param {type:"number"}
enforce = 0. #@param {type:"number"}
expand = 0. #@param {type:"number"}
similarity_function = 'cossim' #@param ['cossim', 'spherical', 'mixed', 'angular', 'dot']
#@markdown > Motion
zoom = 0.012 #@param {type:"number"}
shift = 10 #@param {type:"number"}
rotate = 0.8 #@param {type:"number"}
distort = 0.3 #@param {type:"number"}
animate_them = True #@param {type:"boolean"}
smooth = True #@param {type:"boolean"}
if method == 'RGB': smooth = False
| _____no_output_____ | MIT | IllusTrip3D.ipynb | z-tasker/aphantasia |
`style_power` controls the strength of the style descriptions, comparing to the main input. `overscan` provides better frame coverage (needed for RGB method). `interpolate_topics` changes the subjects smoothly, otherwise they're switched by cut, making sharper transitions. Decrease **`samples`** if you face OOM (it's the main RAM eater), or just to speed up the process (with the cost of quality). `save_step` defines, how many optimization steps are taken between saved frames. Set it >1 for stronger image processing. Experimental tricks: `aug_transform` applies some augmentations, which quite radically change the output of this method (and slow down the process). Try yourself to see which is good for your case. `aug_noise` augmentation [FFT only!] seems to enhance optimization with transforms. `macro` boosts bigger forms. `enforce` adds more details by enforcing similarity between two parallel samples. `expand` boosts diversity (up to irrelevant) by enforcing difference between prev/next samples. Motion section:`shift` is in pixels, `rotate` in degrees. The values will be used as limits, if you mark `animate_them`. `smooth` reduces blinking, but induces motion blur with subtle screen-fixed patterns (valid only for FFT method, disabled for RGB). Add 3D depth [optional] | ### deKxi:: This whole cell contains most of whats needed,
# with just a few changes to hook it up via frame_transform
# (also glob_step now as global var)
# I highly recommend performing the frame transformations and depth *after* saving,
# (or just the depth warp if you prefer to keep the other affines as they are)
# from my testing it reduces any noticeable stretching and allows the new areas
# revealed from the changed perspective to be filled/detailed
# pretrained models: Nyu is much better but Kitti is an option too
depth_model = 'nyu' # @ param ["nyu","kitti"]
DepthStrength = 0.01 #@param{type:"number"}
MaskBlurAmt = 33 #@param{type:"integer"}
save_depth = False #@param{type:"boolean"}
size = (sideY,sideX)
#@markdown NB: depth computing may take up to ~3x more time. Read the comments inside for more info.
#@markdown Courtesy of [deKxi](https://twitter.com/deKxi)
if DepthStrength > 0:
if not os.path.exists("AdaBins_nyu.pt"):
!gdown https://drive.google.com/uc?id=1lvyZZbC9NLcS8a__YPcUP7rDiIpbRpoF
if not os.path.exists('AdaBins_nyu.pt'):
!wget https://www.dropbox.com/s/tayczpcydoco12s/AdaBins_nyu.pt
# if depth_model=='kitti' and not os.path.exists(os.path.join(workdir_depth, "pretrained/AdaBins_kitti.pt")):
# !gdown https://drive.google.com/uc?id=1HMgff-FV6qw1L0ywQZJ7ECa9VPq1bIoj
if save_depth:
depthdir = os.path.join(tempdir, 'depth')
os.makedirs(depthdir, exist_ok=True)
print('depth dir', depthdir)
else:
depthdir = None
depth_infer, depth_mask = depth.init_adabins(model_path='AdaBins_nyu.pt', mask_path='mask.jpg', size=size)
def depth_transform(img_t, img_np, depth_infer, depth_mask, size, depthX=0, scale=1., shift=[0,0], colors=1, depth_dir=None, save_num=0):
# d X/Y define the origin point of the depth warp, effectively a "3D pan zoom", [-1..1]
# plus = look ahead, minus = look aside
dX = 100. * shift[0] / size[1]
dY = 100. * shift[1] / size[0]
# dZ = movement direction: 1 away (zoom out), 0 towards (zoom in), 0.5 stay
dZ = 0.5 + 23. * (scale[0]-1)
# dZ += 0.5 * float(math.sin(((save_num % 70)/70) * math.pi * 2))
if img_np is None:
img2 = img_t.clone().detach()
par, imag, _ = pixel_image(img2.shape, resume=img2)
img2 = to_valid_rgb(imag, colors=colors)()
img2 = img2.detach().cpu().numpy()[0]
img2 = (np.transpose(img2, (1,2,0))) # [h,w,c]
img2 = np.clip(img2*255, 0, 255).astype(np.uint8)
image_pil = T.ToPILImage()(img2)
del img2
else:
image_pil = T.ToPILImage()(img_np)
size2 = [s//2 for s in size]
img = depth.depthwarp(img_t, image_pil, depth_infer, depth_mask, size2, depthX, [dX,dY], dZ, rescale=0.5, clip_range=2, save_path=depth_dir, save_num=save_num)
return img
| _____no_output_____ | MIT | IllusTrip3D.ipynb | z-tasker/aphantasia |
Generate | #@title Generate
if aug_transform == 'elastic':
trform_f = transforms.transforms_elastic
sample_decrease *= 0.95
elif aug_transform == 'custom':
trform_f = transforms.transforms_custom
sample_decrease *= 0.95
else:
trform_f = transforms.normalize()
if enforce != 0:
sample_decrease *= 0.5
samples = int(samples * sample_decrease)
print(' using %s method, %d samples' % (method, samples))
if translate:
translator = Translator()
def enc_text(txt):
if translate:
txt = translator.translate(txt, dest='en').text
emb = model_clip.encode_text(clip.tokenize(txt).cuda()[:77])
return emb.detach().clone()
# Encode inputs
count = 0 # max count of texts and styles
key_txt_encs = [enc_text(txt) for txt in texts]
count = max(count, len(key_txt_encs))
key_styl_encs = [enc_text(style) for style in styles]
count = max(count, len(key_styl_encs))
assert count > 0, "No inputs found!"
# !rm -rf $tempdir
# os.makedirs(tempdir, exist_ok=True)
# opt_steps = steps * save_step # for optimization
glob_steps = count * steps # saving
if glob_steps == frame_step: frame_step = glob_steps // 2 # otherwise no motion
outpic = ipy.Output()
outpic
if method == 'RGB':
if resume:
img_in = imageio.imread(resumed_bytes) / 255.
params_tmp = torch.Tensor(img_in).permute(2,0,1).unsqueeze(0).float().cuda()
params_tmp = un_rgb(params_tmp, colors=1.)
sideY, sideX = img_in.shape[0], img_in.shape[1]
else:
params_tmp = torch.randn(1, 3, sideY, sideX).cuda() # * 0.01
else: # FFT
if resume:
if os.path.splitext(resumed_filename)[1].lower()[1:] in ['jpg','png','tif','bmp']:
img_in = imageio.imread(resumed_bytes)
params_tmp = img2fft(img_in, 1.5, 1.) * 2.
else:
params_tmp = torch.load(io.BytesIO(resumed_bytes))
if isinstance(params_tmp, list): params_tmp = params_tmp[0]
params_tmp = params_tmp.cuda()
sideY, sideX = params_tmp.shape[2], (params_tmp.shape[3]-1)*2
else:
params_shape = [1, 3, sideY, sideX//2+1, 2]
params_tmp = torch.randn(*params_shape).cuda() * 0.01
params_tmp = params_tmp.detach()
# function() = torch.transformation(linear)
# animation controls
if animate_them:
if method == 'RGB':
m_scale = latent_anima([1], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[-0.3])
m_scale = 1 + (m_scale + 0.3) * zoom # only zoom in
else:
m_scale = latent_anima([1], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[0.6])
m_scale = 1 - (m_scale-0.6) * zoom # ping pong
m_shift = latent_anima([2], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[0.5,0.5])
m_angle = latent_anima([1], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[0.5])
m_shear = latent_anima([1], glob_steps, frame_step, uniform=True, cubic=True, start_lat=[0.5])
m_shift = (m_shift-0.5) * shift * abs(m_scale-1.) / zoom
m_angle = (m_angle-0.5) * rotate * abs(m_scale-1.) / zoom
m_shear = (m_shear-0.5) * distort * abs(m_scale-1.) / zoom
def get_encs(encs, num):
cnt = len(encs)
if cnt == 0: return []
enc_1 = encs[min(num, cnt-1)]
enc_2 = encs[min(num+1, cnt-1)]
return slerp(enc_1, enc_2, steps)
def frame_transform(img, size, angle, shift, scale, shear):
if old_torch(): # 1.7.1
img = T.functional.affine(img, angle, shift, scale, shear, fillcolor=0, resample=PIL.Image.BILINEAR)
img = T.functional.center_crop(img, size)
img = pad_up_to(img, size)
else: # 1.8+
img = T.functional.affine(img, angle, shift, scale, shear, fill=0, interpolation=T.InterpolationMode.BILINEAR)
img = T.functional.center_crop(img, size) # on 1.8+ also pads
return img
global img_np
img_np = None
prev_enc = 0
def process(num):
global params_tmp, img_np, opt_state, params, image_f, optimizer, pbar
if interpolate_topics:
txt_encs = get_encs(key_txt_encs, num)
styl_encs = get_encs(key_styl_encs, num)
else:
txt_encs = [key_txt_encs[min(num, len(key_txt_encs)-1)][0]] * steps if len(key_txt_encs) > 0 else []
styl_encs = [key_styl_encs[min(num, len(key_styl_encs)-1)][0]] * steps if len(key_styl_encs) > 0 else []
if len(texts) > 0: print(' ref text: ', texts[min(num, len(texts)-1)][:80])
if len(styles) > 0: print(' ref style: ', styles[min(num, len(styles)-1)][:80])
for ii in range(steps):
glob_step = num * steps + ii # saving/transforming
### animation: transform frame, reload params
h, w = sideY, sideX
# transform frame for motion
scale = m_scale[glob_step] if animate_them else 1-zoom
trans = tuple(m_shift[glob_step]) if animate_them else [0, shift]
angle = m_angle[glob_step][0] if animate_them else rotate
shear = m_shear[glob_step][0] if animate_them else distort
if method == 'RGB':
if DepthStrength > 0:
params_tmp = depth_transform(params_tmp, img_np, depth_infer, depth_mask, size, DepthStrength, scale, trans, colors, depthdir, glob_step)
params_tmp = frame_transform(params_tmp, (h,w), angle, trans, scale, shear)
params, image_f, _ = pixel_image([1,3,h,w], resume=params_tmp)
img_tmp = None
else: # FFT
if old_torch(): # 1.7.1
img_tmp = torch.irfft(params_tmp, 2, normalized=True, signal_sizes=(h,w))
if DepthStrength > 0:
img_tmp = depth_transform(img_tmp, img_np, depth_infer, depth_mask, size, DepthStrength, scale, trans, colors, depthdir, glob_step)
img_tmp = frame_transform(img_tmp, (h,w), angle, trans, scale, shear)
params_tmp = torch.rfft(img_tmp, 2, normalized=True)
else: # 1.8+
if type(params_tmp) is not torch.complex64:
params_tmp = torch.view_as_complex(params_tmp)
img_tmp = torch.fft.irfftn(params_tmp, s=(h,w), norm='ortho')
if DepthStrength > 0:
img_tmp = depth_transform(img_tmp, img_np, depth_infer, depth_mask, size, DepthStrength, scale, trans, colors, depthdir, glob_step)
img_tmp = frame_transform(img_tmp, (h,w), angle, trans, scale, shear)
params_tmp = torch.fft.rfftn(img_tmp, s=[h,w], dim=[2,3], norm='ortho')
params_tmp = torch.view_as_real(params_tmp)
params, image_f, _ = fft_image([1,3,h,w], resume=params_tmp, sd=1.)
image_f = to_valid_rgb(image_f, colors=colors)
del img_tmp
optimizer = torch.optim.Adam(params, learning_rate)
# optimizer = torch.optim.AdamW(params, learning_rate, weight_decay=0.01, amsgrad=True)
if smooth is True and num + ii > 0:
optimizer.load_state_dict(opt_state)
# get encoded inputs
txt_enc = txt_encs[ii % len(txt_encs)].unsqueeze(0) if len(txt_encs) > 0 else None
styl_enc = styl_encs[ii % len(styl_encs)].unsqueeze(0) if len(styl_encs) > 0 else None
### optimization
for ss in range(save_step):
loss = 0
noise = aug_noise * (torch.rand(1, 1, *params[0].shape[2:4], 1)-0.5).cuda() if aug_noise > 0 else 0.
img_out = image_f(noise)
img_sliced = slice_imgs([img_out], samples, modsize, trform_f, align, macro)[0]
out_enc = model_clip.encode_image(img_sliced)
if method == 'RGB': # empirical hack
loss += 1.5 * abs(img_out.mean((2,3)) - 0.45).mean() # fix brightness
loss += 1.5 * abs(img_out.std((2,3)) - 0.17).sum() # fix contrast
if txt_enc is not None:
loss -= sim_func(txt_enc, out_enc, similarity_function)
if styl_enc is not None:
loss -= style_power * sim_func(styl_enc, out_enc, similarity_function)
if sharpness != 0: # mode = scharr|sobel|naive
loss -= sharpness * derivat(img_out, mode='naive')
# loss -= sharpness * derivat(img_sliced, mode='scharr')
if enforce != 0:
img_sliced = slice_imgs([image_f(noise)], samples, modsize, trform_f, align, macro)[0]
out_enc2 = model_clip.encode_image(img_sliced)
loss -= enforce * sim_func(out_enc, out_enc2, similarity_function)
del out_enc2; torch.cuda.empty_cache()
if expand > 0:
global prev_enc
if ii > 0:
loss += expand * sim_func(prev_enc, out_enc, similarity_function)
prev_enc = out_enc.detach().clone()
del img_out, img_sliced, out_enc; torch.cuda.empty_cache()
optimizer.zero_grad()
loss.backward()
optimizer.step()
### save params & frame
params_tmp = params[0].detach().clone()
if smooth is True:
opt_state = optimizer.state_dict()
with torch.no_grad():
img_t = image_f(contrast=contrast)[0].permute(1,2,0)
img_np = torch.clip(img_t*255, 0, 255).cpu().numpy().astype(np.uint8)
imageio.imsave(os.path.join(tempdir, '%05d.jpg' % glob_step), img_np, quality=95)
shutil.copy(os.path.join(tempdir, '%05d.jpg' % glob_step), 'result.jpg')
outpic.clear_output()
with outpic:
display(Image('result.jpg'))
del img_t
pbar.upd()
params_tmp = params[0].detach().clone()
outpic = ipy.Output()
outpic
pbar = ProgressBar(glob_steps)
for i in range(count):
process(i)
HTML(makevid(tempdir))
files.download(tempdir + '.mp4')
## deKxi: downloading depth video
if save_depth and DepthStrength > 0:
HTML(makevid(depthdir))
files.download(depthdir + '.mp4') | _____no_output_____ | MIT | IllusTrip3D.ipynb | z-tasker/aphantasia |
If video is not auto-downloaded after generation (for whatever reason), run this cell to do that: | files.download(tempdir + '.mp4')
if save_depth and DepthStrength > 0:
files.download(depthdir + '.mp4') | _____no_output_____ | MIT | IllusTrip3D.ipynb | z-tasker/aphantasia |
1) Get data in a pandas.DataFrame and plot it using matplotlib.pyplot | # Get data
# 1) directement sous forme de list python
GPU = [2048,2048,4096,4096,3072,6144,6144,8192,8192,8192,8192,11264,11264]
prix = [139.96,149.95,184.96,194.95,299.95,332.95,359.95,459.95,534.95,569.95,699.95,829.96,929.95]
data = pd.DataFrame({'x1':GPU,'y':prix})
# Remarque: On peut également enregistrer des données structurées (dataFrame) en .csv
data.to_csv('graphicCardsData.csv',index=False)
# 2) En utilisant la fonction .read_csv() de pandas pour importer des données extérieure sous form .csv
# directement dans un pandas.DataFrame
data = pd.read_csv('graphicCards.csv')
data.head()
data = data[['memory (Go)', 'price (euros)']]
data = data.rename(columns={"memory (Go)": 'x1', 'price (euros)': 'y'})
data['x1'] = data['x1'] * 1000
#PLot data
plt.plot(data.x1,data.y,'o')
plt.xlabel('GPU (Mo)')
plt.ylabel('prix (€)')
plt.show(); | _____no_output_____ | MIT | notebook/linearRegression/GPUprice.ipynb | lbeaucourt/SIGMA-machine-learning |
2) Contruire un modéle pour nos données | # Définir notre hypothèse (fonction)
def hypothesis(x,theta):
return np.dot(x,theta)
# On génére aléatoirement une valeur de départ pour le paramètre theta1 de notre modèle
theta = np.random.rand()
# Fonction pour générer la droite représentant notre modèle
def getHypothesisForPLot(theta):
return pd.DataFrame({'x':np.arange(0, 12000, 100),
'y':[hypothesis(x,theta) for x in np.arange(0, 12000, 100)]})
# On plot les données avec notre hypothèse ...
plt.plot(data.x1,data.y,'o',label='data')
plt.plot(getHypothesisForPLot(theta).x,getHypothesisForPLot(theta).y ,'r',label='hypothèse')
plt.xlabel('GPU (Mo)')
plt.ylabel('prix (€)')
plt.title("C'est pas ça ....")
plt.legend()
plt.show();
print("theta = %f" % theta) | _____no_output_____ | MIT | notebook/linearRegression/GPUprice.ipynb | lbeaucourt/SIGMA-machine-learning |
3) Tester la pertinence de notre modèle: la fonction de coût | data.shape
# On définit notre fonction de coût: somme quadratique (eg: on somme les carré)
def costFunction(y,yhat):
return np.power(yhat - y,2).sum()*(2/y.shape[0])
# Prix prédis par notre modèle (avec un theta choisi pour illustrer) pour chaque exemple
theta = 0.07
yhat = hypothesis(data.x1,theta)
#Comment fonctionne la fonction de coût: on somme le carré de toute les barre noire
plt.plot(data.x1,data.y,'o',label='data')
plt.plot(getHypothesisForPLot(theta).x,getHypothesisForPLot(theta).y,'r',label='hypothèse')
for i in range(data.shape[0]):
plt.plot((data.x1[i],data.x1[i]), (min(data.y[i],yhat[i]),max(data.y[i],yhat[i])), 'k-')
plt.xlabel('GPU (Mo)')
plt.ylabel('prix (€)')
plt.legend()
plt.show();
print("theta = %f" % theta)
print("J(theta) = %f" % costFunction(data.y,yhat)) | _____no_output_____ | MIT | notebook/linearRegression/GPUprice.ipynb | lbeaucourt/SIGMA-machine-learning |
4) À quoi ressemble J(theta) en fonction de theta1 | # Calculons (brutalement) la valeur de J(theta) dans un intervale de valeur de theta1
# pour observer la forme de notre fonction de coût que nous allons chercher à minimiser
thetaRange = np.arange(-0.8,1,0.01)
costFctEvol = pd.DataFrame({'theta':thetaRange,
'cost':[costFunction(data.y,hypothesis(data.x1,theta))
for theta in thetaRange]})
plt.plot(costFctEvol.theta,costFctEvol.cost)
plt.xlabel('theta')
plt.ylabel('J(theta)')
plt.show; | _____no_output_____ | MIT | notebook/linearRegression/GPUprice.ipynb | lbeaucourt/SIGMA-machine-learning |
5) La descente de Gradient | # La descente de gradient utilise la notion de dérivée,
# illustrée ici avec la fonction carré (qui doit nous en rappeler une autre!)
def fct(x):
return np.power(x,2)
def fctDeriv(x):
return 2*x
fctCarre = pd.DataFrame({'x':np.arange(-10,10,0.1),'y':[fct(x) for x in np.arange(-10,10,0.1)]})
fctCarreD = pd.DataFrame({'x':np.arange(-10,10,0.1),
'y':[fctDeriv(x) for x in np.arange(-10,10,0.1)]})
plt.plot(fctCarre.x,fctCarre.y,label='f(x)')
plt.plot(fctCarreD.x,fctCarreD.y,label="f'(x)")
plt.legend();
# La descente de gradient utilise la dérivé de la fonction de coût
# par rapport au paramètre theta1
def costFctDeriv(x,y,yhat):
return ((yhat - y)*x.T).sum().sum()/y.shape[0]
# À chaque étape de la descente de gradient (jusqu'à la convergence),
# on incremente la valeur de theta1 par ce résultat.
# Alpha est le learning rate
def gradDescent(x,y,yhat,alpha):
return -alpha*costFctDeriv(x,y,yhat)
# on plot les données avec l'hypothèse correpondant à la valeur de theta
# ainsi que l'évolution dans la courbe de J(theta) en fonction de theta
# On rajoute également la valeur de J(theta) en fonction du temps qui va nous servir à
# débuger notre algorithme
def plotData(ax,data,theta,yhat,gradDescentEvol, title=''):
ax.plot(data.x1,data.y,'o',label='data')
ax.plot(getHypothesisForPLot(theta).x,getHypothesisForPLot(theta).y,'r',label='hypothèse')
for i in range(data.shape[0]):
ax.plot((data.x1[i],data.x1[i]), (min(data.y[i],yhat[i]),max(data.y[i],yhat[i])), 'k-')
ax.set_xlabel('iteration step')
if title != "":
ax.set_title(title)
ax.legend()
def plotCostFunction(ax,data,theta,gradDescentEvol,thetaInit, title=''):
thetaRange = np.arange(-abs(thetaInit)+0.07,abs(thetaInit)+0.07,0.01)
costFctEvol = pd.DataFrame({'theta':thetaRange,
'cost':[costFunction(data.y,hypothesis(data.x1,genTheta))
for genTheta in thetaRange]})
ax.plot(costFctEvol.theta,costFctEvol.cost,label='J(theta)')
for i in range(gradDescentEvol.shape[0]):
ax.plot(gradDescentEvol.theta[i],gradDescentEvol.J[i],'ro')
for i in range(gradDescentEvol.shape[0]-1):
ax.plot((gradDescentEvol.theta[i],gradDescentEvol.theta[i+1]),
(gradDescentEvol.J[i],gradDescentEvol.J[i+1]),'k-',lw=1)
ax.set_xlabel('iteration step')
if title != "":
ax.set_title(title)
ax.legend()
def plotCostFunctionEvol(ax,gradDescentEvol,title=""):
ax.plot(np.arange(gradDescentEvol.shape[0]),gradDescentEvol.J,label='J(theta)')
ax.set_xlabel('iteration step')
if title != "":
ax.set_title(title)
ax.legend()
# On utilise donc une valeur de départ pour theta généré aléatoirement entre 0 et 1,
# la valeur du learning rate est fixé à 0.00000003
# Epsilon correspond à la précision que l'on veut atteindre pour stopper la descente de gradient
thetaInit = np.random.rand()
yhat = hypothesis(data.x1,thetaInit)
alpha = 0.003
epsilon = 0.001
# On prepare un dataframe pour stocker les valeurs de J(theta) et theta1
gradDescentEvol = pd.DataFrame({'theta':thetaInit,
'J':costFunction(data.y,yhat)},index = np.arange(1))
# On parametrise deux trois trucs
plt.rcParams['figure.figsize'] = [16, 5]
costFct = 0
count = 0
theta = thetaInit
# Et on se lance dans la boucle: La descente de gradient!
while np.abs(costFunction(data.y,yhat) - costFct)/costFct >= epsilon:
count += 1
costFct = costFunction(data.y,yhat)
theta += gradDescent(data.x1,data.y,yhat,alpha)
yhat = hypothesis(data.x1,theta)
gradDescentEvol = gradDescentEvol.append(pd.DataFrame({'theta':theta,
'J':costFunction(data.y,yhat)},
index = np.arange(1)),
ignore_index=True)
fig, ax = plt.subplots(ncols=3)
plotData(ax[0],data,theta,yhat,gradDescentEvol)
plotCostFunction(ax[1],data,theta,gradDescentEvol,thetaInit)
plotCostFunctionEvol(ax[2],gradDescentEvol)
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(1) | _____no_output_____ | MIT | notebook/linearRegression/GPUprice.ipynb | lbeaucourt/SIGMA-machine-learning |
6) Conclusion | # Afficher les résultat:
print('La descente de gradient a été réalisé en %i étapes.' % count)
print('theta = %f' % theta)
print('J(theta) = %f' % costFunction(data.y,yhat))
# Faisons une prédiction ....
newGPUs = [3072*1.5,11264*1.2]
for newGPU in newGPUs:
print("Notre nouvelle carte de %i Mo de GPU pourra se vendre autour de %.2f €" %
(newGPU,newGPU*theta))
plt.rcParams['figure.figsize'] = [14, 8]
plt.plot(data.x1,data.y,'o',label='data')
plt.plot(getHypothesisForPLot(theta).x,getHypothesisForPLot(theta).y,'r',label='hypothèse')
for i in range(data.shape[0]):
plt.plot((data.x1[i],data.x1[i]), (min(data.y[i],yhat[i]),max(data.y[i],yhat[i])), 'k-')
plt.plot(newGPUs,[newGPU*theta for newGPU in newGPUs], 'or', label='predictions')
plt.xlabel('GPU (Mo)')
plt.ylabel('prix (€)')
plt.legend()
plt.show(); | _____no_output_____ | MIT | notebook/linearRegression/GPUprice.ipynb | lbeaucourt/SIGMA-machine-learning |
7) Choix du taux d'apprentissage lambda | # On utilise donc une valeur de départ pour theta généré aléatoirement entre 0 et 1,
# la valeur du learning rate est fixé à 0.00000003
# Epsilon correspond à la précision que l'on veut atteindre pour stopper la descente de gradient
thetaInit = np.random.rand()
yhat1 = hypothesis(data.x1,thetaInit)
yhat2 = hypothesis(data.x1,thetaInit)
yhat3 = hypothesis(data.x1,thetaInit)
alpha1 = 0.000000001
alpha2 = 0.00000001
alpha3 = 0.00000006
epsilon = 0.001
# On prepare un dataframe pour stocker les valeurs de J(theta) et theta1
gradDescentEvol1 = pd.DataFrame({'theta':thetaInit,
'J':costFunction(data.y,yhat1)},index = np.arange(1))
gradDescentEvol2 = pd.DataFrame({'theta':thetaInit,
'J':costFunction(data.y,yhat2)},index = np.arange(1))
gradDescentEvol3 = pd.DataFrame({'theta':thetaInit,
'J':costFunction(data.y,yhat3)},index = np.arange(1))
# On parametrise deux trois trucs
plt.rcParams['figure.figsize'] = [16, 5]
count = 0
costFct1 = 0
theta1 = thetaInit
costFct2 = 0
theta2 = thetaInit
costFct3 = 0
theta3 = thetaInit
# Et on se lance dans la boucle: La descente de gradient!
while np.abs(costFunction(data.y,yhat2) - costFct2)/costFct2 >= epsilon:
count += 1
costFct1 = costFunction(data.y,yhat1)
theta1 += gradDescent(data.x1,data.y,yhat1,alpha1)
yhat1 = hypothesis(data.x1,theta1)
gradDescentEvol1 = gradDescentEvol1.append(pd.DataFrame({'theta':theta1,
'J':costFunction(data.y,yhat1)},
index = np.arange(1)),
ignore_index=True)
costFct2 = costFunction(data.y,yhat2)
theta2 += gradDescent(data.x1,data.y,yhat2,alpha2)
yhat2 = hypothesis(data.x1,theta2)
gradDescentEvol2 = gradDescentEvol2.append(pd.DataFrame({'theta':theta2,
'J':costFunction(data.y,yhat2)},
index = np.arange(1)),
ignore_index=True)
costFct3 = costFunction(data.y,yhat3)
theta3 += gradDescent(data.x1,data.y,yhat3,alpha3)
yhat3 = hypothesis(data.x1,theta3)
gradDescentEvol3 = gradDescentEvol3.append(pd.DataFrame({'theta':theta3,
'J':costFunction(data.y,yhat3)},
index = np.arange(1)),
ignore_index=True)
fig, ax = plt.subplots(ncols=3)
plotCostFunctionEvol(ax[0],gradDescentEvol1,'small alpha')
plotCostFunctionEvol(ax[1],gradDescentEvol2,'correct alpha')
plotCostFunctionEvol(ax[2],gradDescentEvol3,'huge alpha')
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(1) | _____no_output_____ | MIT | notebook/linearRegression/GPUprice.ipynb | lbeaucourt/SIGMA-machine-learning |
Vertex client library: Custom training tabular regression model with pipeline for online prediction with training pipeline Run in Colab View on GitHub OverviewThis tutorial demonstrates how to use the Vertex client library for Python to train and deploy a custom tabular regression model for online prediction, using a training pipeline. DatasetThe dataset used for this tutorial is the [Boston Housing Prices dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html). The version of the dataset you will use in this tutorial is built into TensorFlow. The trained model predicts the median price of a house in units of 1K USD. ObjectiveIn this tutorial, you create a custom model from a Python script in a Google prebuilt Docker container using the Vertex client library, and then do a prediction on the deployed model by sending data. You can alternatively create custom models using `gcloud` command-line tool or online using Google Cloud Console.The steps performed include:- Create a Vertex custom job for training a model.- Create a `TrainingPipeline` resource.- Train a TensorFlow model with the `TrainingPipeline` resource.- Retrieve and load the model artifacts.- View the model evaluation.- Upload the model as a Vertex `Model` resource.- Deploy the `Model` resource to a serving `Endpoint` resource.- Make a prediction.- Undeploy the `Model` resource. CostsThis tutorial uses billable components of Google Cloud (GCP):* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. InstallationInstall the latest version of Vertex client library. | import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Install the latest GA version of *google-cloud-storage* library as well. | ! pip3 install -U google-cloud-storage $USER_FLAG | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Restart the kernelOnce you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages. | if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Before you begin GPU runtime*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.5. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. | PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations) | REGION = "us-central1" # @param {type: "string"} | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. | from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Authenticate your Google Cloud account**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.**Click Create service account**.In the **Service account name** field, enter a name, and click **Create**.In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.Click Create. A JSON file that contains your key downloads to your local environment.Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. | # If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS '' | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**When you submit a custom training job using the Vertex client library, you upload a Python packagecontaining your training code to a Cloud Storage bucket. Vertex runsthe code from this package. In this tutorial, Vertex also saves thetrained model that results from your job in the same bucket. You can thencreate an `Endpoint` resource based on this output in order to serveonline predictions.Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. | BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. | ! gsutil mb -l $REGION $BUCKET_NAME | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Finally, validate access to your Cloud Storage bucket by examining its contents: | ! gsutil ls -al $BUCKET_NAME | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Set up variablesNext, set up some variables used throughout the tutorial. Import libraries and define constants Import Vertex client libraryImport the Vertex client library into our Python environment. | import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Vertex constantsSetup up the following constants for Vertex:- `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.- `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources. | # API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
CustomJob constantsSet constants unique to CustomJob training:- Dataset Training Schemas: Tells the `Pipeline` resource service the task (e.g., classification) to train the model for. | CUSTOM_TASK_GCS_PATH = (
"gs://google-cloud-aiplatform/schema/trainingjob/definition/custom_task_1.0.0.yaml"
) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Hardware AcceleratorsSet the hardware accelerators (e.g., GPU), if any, for training and prediction.Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)For GPU, available accelerators include: - aip.AcceleratorType.NVIDIA_TESLA_K80 - aip.AcceleratorType.NVIDIA_TESLA_P100 - aip.AcceleratorType.NVIDIA_TESLA_P4 - aip.AcceleratorType.NVIDIA_TESLA_T4 - aip.AcceleratorType.NVIDIA_TESLA_V100Otherwise specify `(None, None)` to use a container image to run on a CPU.*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3 -- which is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support. | if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Container (Docker) imageNext, we will set the Docker container images for training and prediction - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/training/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-3:latest` - TensorFlow 2.4 - `gcr.io/cloud-aiplatform/training/tf-cpu.2-4:latest` - `gcr.io/cloud-aiplatform/training/tf-gpu.2-4:latest` - XGBoost - `gcr.io/cloud-aiplatform/training/xgboost-cpu.1-1` - Scikit-learn - `gcr.io/cloud-aiplatform/training/scikit-learn-cpu.0-23:latest` - Pytorch - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-4:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-5:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-6:latest` - `gcr.io/cloud-aiplatform/training/pytorch-cpu.1-7:latest`For the latest list, see [Pre-built containers for training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). - TensorFlow 1.15 - `gcr.io/cloud-aiplatform/prediction/tf-cpu.1-15:latest` - `gcr.io/cloud-aiplatform/prediction/tf-gpu.1-15:latest` - TensorFlow 2.1 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-1:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-1:latest` - TensorFlow 2.2 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-2:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-2:latest` - TensorFlow 2.3 - `gcr.io/cloud-aiplatform/prediction/tf2-cpu.2-3:latest` - `gcr.io/cloud-aiplatform/prediction/tf2-gpu.2-3:latest` - XGBoost - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-2:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.1-1:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-90:latest` - `gcr.io/cloud-aiplatform/prediction/xgboost-cpu.0-82:latest` - Scikit-learn - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-23:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-22:latest` - `gcr.io/cloud-aiplatform/prediction/sklearn-cpu.0-20:latest`For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) | if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Machine TypeNext, set the machine type to use for training and prediction.- Set the variables `TRAIN_COMPUTE` and `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for training and prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: The following is not supported for training:* - `standard`: 2 vCPUs - `highcpu`: 2, 4 and 8 vCPUs*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*. | if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
TutorialNow you are ready to start creating your own custom model and training for Boston Housing. Set up clientsThe Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.- Model Service for `Model` resources.- Pipeline Service for training.- Endpoint Service for deployment.- Job Service for batch jobs and custom training.- Prediction Service for serving. | # client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Train a modelThere are two ways you can train a custom model using a container image:- **Use a Google Cloud prebuilt container**. If you use a prebuilt container, you will additionally specify a Python package to install into the container image. This Python package contains your code for training a custom model.- **Use your own custom container image**. If you use your own container, the container needs to contain your code for training a custom model. Prepare your custom job specificationNow that your clients are ready, your first step is to create a Job Specification for your custom training job. The job specification will consist of the following:- `worker_pool_spec` : The specification of the type of machine(s) you will use for training and how many (single or distributed)- `python_package_spec` : The specification of the Python package to be installed with the pre-built container. Prepare your machine specificationNow define the machine specification for your custom training job. This tells Vertex what type of machine instance to provision for the training. - `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators. | if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Prepare your disk specification(optional) Now define the disk specification for your custom training job. This tells Vertex what type and size of disk to provision in each machine instance for the training. - `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. - `boot_disk_size_gb`: Size of disk in GB. | DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE} | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Define the worker pool specificationNext, you define the worker pool specification for your custom training job. The worker pool specification will consist of the following:- `replica_count`: The number of instances to provision of this machine type.- `machine_spec`: The hardware specification.- `disk_spec` : (optional) The disk storage specification.- `python_package`: The Python training package to install on the VM instance(s) and which Python module to invoke, along with command line arguments for the Python module.Let's dive deeper now into the python package specification:-`executor_image_spec`: This is the docker image which is configured for your custom training job.-`package_uris`: This is a list of the locations (URIs) of your python training packages to install on the provisioned instance. The locations need to be in a Cloud Storage bucket. These can be either individual python files or a zip (archive) of an entire package. In the later case, the job service will unzip (unarchive) the contents into the docker image.-`python_module`: The Python module (script) to invoke for running the custom training job. In this example, you will be invoking `trainer.task.py` -- note that it was not neccessary to append the `.py` suffix.-`args`: The command line arguments to pass to the corresponding Pythom module. In this example, you will be setting: - `"--model-dir=" + MODEL_DIR` : The Cloud Storage location where to store the model artifacts. There are two ways to tell the training script where to save the model artifacts: - direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or - indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification. - `"--epochs=" + EPOCHS`: The number of epochs for training. - `"--steps=" + STEPS`: The number of steps (batches) per epoch. - `"--distribute=" + TRAIN_STRATEGY"` : The training distribution strategy to use for single or distributed training. - `"single"`: single device. - `"mirror"`: all GPU devices on a single compute instance. - `"multi"`: all GPU devices on all compute instances. - `"--param-file=" + PARAM_FILE`: The Cloud Storage location for storing feature normalization values. | JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
PARAM_FILE = BUCKET_NAME + "/params.txt"
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
"--param-file=" + PARAM_FILE,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_boston.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
] | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Examine the training package Package layoutBefore you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.- PKG-INFO- README.md- setup.cfg- setup.py- trainer - \_\_init\_\_.py - task.pyThe files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`). Package AssemblyIn the following cells, you will assemble the training package. | # Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Task.py contentsIn the next cell, you write the contents of the training script task.py. I won't go into detail, it's just there for you to browse. In summary:- Get the directory where to save the model artifacts from the command line (`--model_dir`), and if not specified, then from the environment variable `AIP_MODEL_DIR`.- Loads Boston Housing dataset from TF.Keras builtin datasets- Builds a simple deep neural network model using TF.Keras model API.- Compiles the model (`compile()`).- Sets a training distribution strategy according to the argument `args.distribute`.- Trains the model (`fit()`) with epochs specified by `args.epochs`.- Saves the trained model (`save(args.model_dir)`) to the specified model directory.- Saves the maximum value for each feature `f.write(str(params))` to the specified parameters file. | %%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Store training script on your Cloud Storage bucketNext, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket. | ! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Train the model using a `TrainingPipeline` resourceNow start training of your custom training job using a training pipeline on Vertex. To train the your custom model, do the following steps:1. Create a Vertex `TrainingPipeline` resource for the `Dataset` resource.2. Execute the pipeline to start the training. Create a `TrainingPipeline` resourceYou may ask, what do we use a pipeline for? We typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of:1. Being reusable for subsequent training jobs.2. Can be containerized and ran as a batch job.3. Can be distributed.4. All the steps are associated with the same pipeline job for tracking progress. The `training_pipeline` specificationFirst, you need to describe a pipeline specification. Let's look into the *minimal* requirements for constructing a `training_pipeline` specification for a custom job:- `display_name`: A human readable name for the pipeline job.- `training_task_definition`: The training task schema.- `training_task_inputs`: A dictionary describing the requirements for the training job.- `model_to_upload`: A dictionary describing the specification for the (uploaded) Vertex custom `Model` resource. - `display_name`: A human readable name for the `Model` resource. - `artificat_uri`: The Cloud Storage path where the model artifacts are stored in SavedModel format. - `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the custom model will serve predictions. | from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
MODEL_NAME = "custom_pipeline-" + TIMESTAMP
PIPELINE_DISPLAY_NAME = "custom-training-pipeline" + TIMESTAMP
training_task_inputs = json_format.ParseDict(
{"workerPoolSpecs": worker_pool_spec}, Value()
)
pipeline = {
"display_name": PIPELINE_DISPLAY_NAME,
"training_task_definition": CUSTOM_TASK_GCS_PATH,
"training_task_inputs": training_task_inputs,
"model_to_upload": {
"display_name": PIPELINE_DISPLAY_NAME + "-model",
"artifact_uri": MODEL_DIR,
"container_spec": {"image_uri": DEPLOY_IMAGE},
},
}
print(pipeline) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Create the training pipelineUse this helper function `create_pipeline`, which takes the following parameter:- `training_pipeline`: the full specification for the pipeline training job.The helper function calls the pipeline client service's `create_pipeline` method, which takes the following parameters:- `parent`: The Vertex location root path for your `Dataset`, `Model` and `Endpoint` resources.- `training_pipeline`: The full specification for the pipeline training job.The helper function will return the Vertex fully qualified identifier assigned to the training pipeline, which is saved as `pipeline.name`. | def create_pipeline(training_pipeline):
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
response = create_pipeline(pipeline) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Now save the unique identifier of the training pipeline you created. | # The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Get information on a training pipelineNow get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's `get_training_pipeline` method, with the following parameter:- `name`: The Vertex fully qualified pipeline identifier.When the model is done training, the pipeline state will be `PIPELINE_STATE_SUCCEEDED`. | def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
DeploymentTraining the above model may take upwards of 20 minutes time.Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field `model_to_deploy.name`. | while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Load the saved modelYour model is stored in a TensorFlow SavedModel format in a Cloud Storage bucket. Now load it from the Cloud Storage bucket, and then you can do some things, like evaluate the model, and do a prediction.To load, you use the TF.Keras `model.load_model()` method passing it the Cloud Storage path where the model is saved -- specified by `MODEL_DIR`. | import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Evaluate the modelNow let's find out how good the model is. Load evaluation dataYou will load the Boston Housing test (holdout) data from `tf.keras.datasets`, using the method `load_data()`. This will return the dataset as a tuple of two elements. The first element is the training data and the second is the test data. Each element is also a tuple of two elements: the feature data, and the corresponding labels (median value of owner-occupied home).You don't need the training data, and hence why we loaded it as `(_, _)`.Before you can run the data through evaluation, you need to preprocess it:x_test:1. Normalize (rescaling) the data in each column by dividing each value by the maximum value of that column. This will replace each single value with a 32-bit floating point number between 0 and 1. | import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Perform the model evaluationNow evaluate how well the model in the custom job did. | model.evaluate(x_test, y_test) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Upload the model for servingNext, you will upload your TF.Keras model from the custom job to Vertex `Model` service, which will create a Vertex `Model` resource for your custom model. During upload, you need to define a serving function to convert data to the format your model expects. If you send encoded data to Vertex, your serving function ensures that the data is decoded on the model server before it is passed as input to your model. How does the serving function workWhen you send a request to an online prediction server, the request is received by a HTTP server. The HTTP server extracts the prediction request from the HTTP request content body. The extracted prediction request is forwarded to the serving function. For Google pre-built prediction containers, the request content is passed to the serving function as a `tf.string`.The serving function consists of two parts:- `preprocessing function`: - Converts the input (`tf.string`) to the input shape and data type of the underlying model (dynamic graph). - Performs the same preprocessing of the data that was done during training the underlying model -- e.g., normalizing, scaling, etc.- `post-processing function`: - Converts the model output to format expected by the receiving application -- e.q., compresses the output. - Packages the output for the the receiving application -- e.g., add headings, make JSON object, etc.Both the preprocessing and post-processing functions are converted to static graphs which are fused to the model. The output from the underlying model is passed to the post-processing function. The post-processing function passes the converted/packaged output back to the HTTP server. The HTTP server returns the output as the HTTP response content.One consideration you need to consider when building serving functions for TF.Keras models is that they run as static graphs. That means, you cannot use TF graph operations that require a dynamic graph. If you do, you will get an error during the compile of the serving function which will indicate that you are using an EagerTensor which is not supported. Get the serving function signatureYou can get the signatures of your model's input and output layers by reloading the model into memory, and querying it for the signatures corresponding to each layer.When making a prediction request, you need to route the request to the serving function instead of the model, so you need to know the input layer name of the serving function -- which you will use later when you make a prediction request. | loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Upload the modelUse this helper function `upload_model` to upload your model, stored in SavedModel format, up to the `Model` service, which will instantiate a Vertex `Model` resource instance for your model. Once you've done that, you can use the `Model` resource instance in the same way as any other Vertex `Model` resource instance, such as deploying to an `Endpoint` resource for serving predictions.The helper function takes the following parameters:- `display_name`: A human readable name for the `Endpoint` service.- `image_uri`: The container image for the model deployment.- `model_uri`: The Cloud Storage path to our SavedModel artificat. For this tutorial, this is the Cloud Storage location where the `trainer/task.py` saved the model artifacts, which we specified in the variable `MODEL_DIR`.The helper function calls the `Model` client service's method `upload_model`, which takes the following parameters:- `parent`: The Vertex location root path for `Dataset`, `Model` and `Endpoint` resources.- `model`: The specification for the Vertex `Model` resource instance.Let's now dive deeper into the Vertex model specification `model`. This is a dictionary object that consists of the following fields:- `display_name`: A human readable name for the `Model` resource.- `metadata_schema_uri`: Since your model was built without an Vertex `Dataset` resource, you will leave this blank (`''`).- `artificat_uri`: The Cloud Storage path where the model is stored in SavedModel format.- `container_spec`: This is the specification for the Docker container that will be installed on the `Endpoint` resource, from which the `Model` resource will serve predictions. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.Uploading a model into a Vertex Model resource returns a long running operation, since it may take a few moments. You call response.result(), which is a synchronous call and will return when the Vertex Model resource is ready.The helper function returns the Vertex fully qualified identifier for the corresponding Vertex Model instance upload_model_response.model. You will save the identifier for subsequent steps in the variable model_to_deploy_id. | IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model(
"boston-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy
) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Get `Model` resource informationNow let's get the model information for just your model. Use this helper function `get_model`, with the following parameter:- `name`: The Vertex unique identifier for the `Model` resource.This helper function calls the Vertex `Model` client service's method `get_model`, with the following parameter:- `name`: The Vertex unique identifier for the `Model` resource. | def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Deploy the `Model` resourceNow deploy the trained Vertex custom `Model` resource. This requires two steps:1. Create an `Endpoint` resource for deploying the `Model` resource to.2. Deploy the `Model` resource to the `Endpoint` resource. Create an `Endpoint` resourceUse this helper function `create_endpoint` to create an endpoint to deploy the model to for serving predictions, with the following parameter:- `display_name`: A human readable name for the `Endpoint` resource.The helper function uses the endpoint client service's `create_endpoint` method, which takes the following parameter:- `display_name`: A human readable name for the `Endpoint` resource.Creating an `Endpoint` resource returns a long running operation, since it may take a few moments to provision the `Endpoint` resource for serving. You call `response.result()`, which is a synchronous call and will return when the Endpoint resource is ready. The helper function returns the Vertex fully qualified identifier for the `Endpoint` resource: `response.name`. | ENDPOINT_NAME = "boston_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Now get the unique identifier for the `Endpoint` resource you created. | # The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Compute instance scalingYou have several choices on scaling the compute instances for handling your online prediction requests:- Single Instance: The online prediction requests are processed on a single compute instance. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.- Manual Scaling: The online prediction requests are split across a fixed number of compute instances that you manually specified. - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and online prediction requests are evenly distributed across them.- Auto Scaling: The online prediction requests are split across a scaleable number of compute instances. - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request. | MIN_NODES = 1
MAX_NODES = 1 | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Deploy `Model` resource to the `Endpoint` resourceUse this helper function `deploy_model` to deploy the `Model` resource to the `Endpoint` resource you created for serving predictions, with the following parameters:- `model`: The Vertex fully qualified model identifier of the model to upload (deploy) from the training pipeline.- `deploy_model_display_name`: A human readable name for the deployed model.- `endpoint`: The Vertex fully qualified endpoint identifier to deploy the model to.The helper function calls the `Endpoint` client service's method `deploy_model`, which takes the following parameters:- `endpoint`: The Vertex fully qualified `Endpoint` resource identifier to deploy the `Model` resource to.- `deployed_model`: The requirements specification for deploying the model.- `traffic_split`: Percent of traffic at the endpoint that goes to this model, which is specified as a dictionary of one or more key/value pairs. - If only one model, then specify as **{ "0": 100 }**, where "0" refers to this model being uploaded and 100 means 100% of the traffic. - If there are existing models on the endpoint, for which the traffic will be split, then use `model_id` to specify as **{ "0": percent, model_id: percent, ... }**, where `model_id` is the model id of an existing model to the deployed endpoint. The percents must add up to 100.Let's now dive deeper into the `deployed_model` parameter. This parameter is specified as a Python dictionary with the minimum required fields:- `model`: The Vertex fully qualified model identifier of the (upload) model to deploy.- `display_name`: A human readable name for the deployed model.- `disable_container_logging`: This disables logging of container events, such as execution failures (default is container logging is enabled). Container logging is typically enabled when debugging the deployment and then disabled when deployed for production.- `dedicated_resources`: This refers to how many compute instances (replicas) that are scaled for serving prediction requests. - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. - `min_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. Traffic SplitLet's now dive deeper into the `traffic_split` parameter. This parameter is specified as a Python dictionary. This might at first be a tad bit confusing. Let me explain, you can deploy more than one instance of your model to an endpoint, and then set how much (percent) goes to each instance.Why would you do that? Perhaps you already have a previous version deployed in production -- let's call that v1. You got better model evaluation on v2, but you don't know for certain that it is really better until you deploy to production. So in the case of traffic split, you might want to deploy v2 to the same endpoint as v1, but it only get's say 10% of the traffic. That way, you can monitor how well it does without disrupting the majority of users -- until you make a final decision. ResponseThe method returns a long running operation `response`. We will wait sychronously for the operation to complete by calling the `response.result()`, which will block until the model is deployed. If this is the first time a model is deployed to the endpoint, it may take a few additional minutes to complete provisioning of resources. | DEPLOYED_NAME = "boston_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Make a online prediction requestNow do a online prediction to your deployed model. Get test itemYou will use an example out of the test (holdout) portion of the dataset as a test item. | test_item = x_test[0]
test_label = y_test[0]
print(test_item.shape) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Send the prediction requestOk, now you have a test data item. Use this helper function `predict_data`, which takes the parameters:- `data`: The test data item as a numpy 1D array of floating point values.- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed.- `parameters_dict`: Additional parameters for serving.This function uses the prediction client service and calls the `predict` method with the parameters:- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource was deployed.- `instances`: A list of instances (data items) to predict.- `parameters`: Additional parameters for serving.To pass the test data to the prediction service, you package it for transmission to the serving binary as follows: 1. Convert the data item from a 1D numpy array to a 1D Python list. 2. Convert the prediction request to a serialized Google protobuf (`json_format.ParseDict()`)Each instance in the prediction request is a dictionary entry of the form: {input_name: content}- `input_name`: the name of the input layer of the underlying model.- `content`: The data item as a 1D Python list.Since the `predict()` service can take multiple data items (instances), you will send your single data item as a list of one data item. As a final step, you package the instances list into Google's protobuf format -- which is what we pass to the `predict()` service.The `response` object returns a list, where each element in the list corresponds to the corresponding image in the request. You will see in the output for each prediction:- `predictions` -- the predicated median value of a house in units of 1K USD. | def predict_data(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: data.tolist()}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_data(test_item, endpoint_id, None) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Undeploy the `Model` resourceNow undeploy your `Model` resource from the serving `Endpoint` resoure. Use this helper function `undeploy_model`, which takes the following parameters:- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed to.- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` is deployed to.This function calls the endpoint client service's method `undeploy_model`, with the following parameters:- `deployed_model_id`: The model deployment identifier returned by the endpoint service when the `Model` resource was deployed.- `endpoint`: The Vertex fully qualified identifier for the `Endpoint` resource where the `Model` resource is deployed.- `traffic_split`: How to split traffic among the remaining deployed models on the `Endpoint` resource.Since this is the only deployed model on the `Endpoint` resource, you simply can leave `traffic_split` empty by setting it to {}. | def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id) | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
Cleaning upTo clean up all GCP resources used in this project, you can [delete the GCPproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial:- Dataset- Pipeline- Model- Endpoint- Batch Job- Custom Job- Hyperparameter Tuning Job- Cloud Storage Bucket | delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME | _____no_output_____ | Apache-2.0 | notebooks/community/gapic/custom/showcase_custom_tabular_regression_online_pipeline.ipynb | nayaknishant/vertex-ai-samples |
GRU 236* Operate on 16000 GenCode 34 seqs.* 5-way cross validation. Save best model per CV.* Report mean accuracy from final re-validation with best 5.* Use Adam with a learn rate decay schdule. | NC_FILENAME='ncRNA.gc34.processed.fasta'
PC_FILENAME='pcRNA.gc34.processed.fasta'
DATAPATH=""
try:
from google.colab import drive
IN_COLAB = True
PATH='/content/drive/'
drive.mount(PATH)
DATAPATH=PATH+'My Drive/data/' # must end in "/"
NC_FILENAME = DATAPATH+NC_FILENAME
PC_FILENAME = DATAPATH+PC_FILENAME
except:
IN_COLAB = False
DATAPATH=""
EPOCHS=200
SPLITS=5
K=3
VOCABULARY_SIZE=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN'
EMBED_DIMEN=16
FILENAME='GRU236'
NEURONS=64
ACT="tanh"
DROP=0.5
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import StratifiedKFold
import tensorflow as tf
from tensorflow import keras
from keras.wrappers.scikit_learn import KerasRegressor
from keras.models import Sequential
from keras.layers import Bidirectional
from keras.layers import GRU
from keras.layers import Dense
from keras.layers import LayerNormalization
import time
dt='float32'
tf.keras.backend.set_floatx(dt) | _____no_output_____ | MIT | Workshop/GRU_236.ipynb | ShepherdCode/ShepherdML |
Build model | def compile_model(model):
adam_default_learn_rate = 0.001
schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate = adam_default_learn_rate*10,
#decay_steps=100000, decay_rate=0.96, staircase=True)
decay_steps=10000, decay_rate=0.99, staircase=True)
# learn rate = initial_learning_rate * decay_rate ^ (step / decay_steps)
alrd = tf.keras.optimizers.Adam(learning_rate=schedule)
bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
print("COMPILE...")
#model.compile(loss=bc, optimizer=alrd, metrics=["accuracy"])
model.compile(loss=bc, optimizer="adam", metrics=["accuracy"])
print("...COMPILED")
return model
def build_model():
embed_layer = keras.layers.Embedding(
#VOCABULARY_SIZE, EMBED_DIMEN, input_length=1000, input_length=1000, mask_zero=True)
#input_dim=[None,VOCABULARY_SIZE], output_dim=EMBED_DIMEN, mask_zero=True)
input_dim=VOCABULARY_SIZE, output_dim=EMBED_DIMEN, mask_zero=True)
#rnn1_layer = keras.layers.Bidirectional(
rnn1_layer = keras.layers.GRU(NEURONS, return_sequences=True,
input_shape=[1000,EMBED_DIMEN], activation=ACT, dropout=DROP)#)#bi
#rnn2_layer = keras.layers.Bidirectional(
rnn2_layer = keras.layers.GRU(NEURONS, return_sequences=False,
activation=ACT, dropout=DROP)#)#bi
dense1_layer = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt)
#drop1_layer = keras.layers.Dropout(DROP)
dense2_layer = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt)
#drop2_layer = keras.layers.Dropout(DROP)
output_layer = keras.layers.Dense(1, activation="sigmoid", dtype=dt)
mlp = keras.models.Sequential()
mlp.add(embed_layer)
mlp.add(rnn1_layer)
mlp.add(rnn2_layer)
mlp.add(dense1_layer)
#mlp.add(drop1_layer)
mlp.add(dense2_layer)
#mlp.add(drop2_layer)
mlp.add(output_layer)
mlpc = compile_model(mlp)
return mlpc | _____no_output_____ | MIT | Workshop/GRU_236.ipynb | ShepherdCode/ShepherdML |
Load and partition sequences | # Assume file was preprocessed to contain one line per seq.
# Prefer Pandas dataframe but df does not support append.
# For conversion to tensor, must avoid python lists.
def load_fasta(filename,label):
DEFLINE='>'
labels=[]
seqs=[]
lens=[]
nums=[]
num=0
with open (filename,'r') as infile:
for line in infile:
if line[0]!=DEFLINE:
seq=line.rstrip()
num += 1 # first seqnum is 1
seqlen=len(seq)
nums.append(num)
labels.append(label)
seqs.append(seq)
lens.append(seqlen)
df1=pd.DataFrame(nums,columns=['seqnum'])
df2=pd.DataFrame(labels,columns=['class'])
df3=pd.DataFrame(seqs,columns=['sequence'])
df4=pd.DataFrame(lens,columns=['seqlen'])
df=pd.concat((df1,df2,df3,df4),axis=1)
return df
def separate_X_and_y(data):
y= data[['class']].copy()
X= data.drop(columns=['class','seqnum','seqlen'])
return (X,y)
| _____no_output_____ | MIT | Workshop/GRU_236.ipynb | ShepherdCode/ShepherdML |
Make K-mers | def make_kmer_table(K):
npad='N'*K
shorter_kmers=['']
for i in range(K):
longer_kmers=[]
for mer in shorter_kmers:
longer_kmers.append(mer+'A')
longer_kmers.append(mer+'C')
longer_kmers.append(mer+'G')
longer_kmers.append(mer+'T')
shorter_kmers = longer_kmers
all_kmers = shorter_kmers
kmer_dict = {}
kmer_dict[npad]=0
value=1
for mer in all_kmers:
kmer_dict[mer]=value
value += 1
return kmer_dict
KMER_TABLE=make_kmer_table(K)
def strings_to_vectors(data,uniform_len):
all_seqs=[]
for seq in data['sequence']:
i=0
seqlen=len(seq)
kmers=[]
while i < seqlen-K+1 -1: # stop at minus one for spaced seed
#kmer=seq[i:i+2]+seq[i+3:i+5] # SPACED SEED 2/1/2 for K=4
kmer=seq[i:i+K]
i += 1
value=KMER_TABLE[kmer]
kmers.append(value)
pad_val=0
while i < uniform_len:
kmers.append(pad_val)
i += 1
all_seqs.append(kmers)
pd2d=pd.DataFrame(all_seqs)
return pd2d # return 2D dataframe, uniform dimensions
def make_kmers(MAXLEN,train_set):
(X_train_all,y_train_all)=separate_X_and_y(train_set)
X_train_kmers=strings_to_vectors(X_train_all,MAXLEN)
# From pandas dataframe to numpy to list to numpy
num_seqs=len(X_train_kmers)
tmp_seqs=[]
for i in range(num_seqs):
kmer_sequence=X_train_kmers.iloc[i]
tmp_seqs.append(kmer_sequence)
X_train_kmers=np.array(tmp_seqs)
tmp_seqs=None
labels=y_train_all.to_numpy()
return (X_train_kmers,labels)
def make_frequencies(Xin):
Xout=[]
VOCABULARY_SIZE= 4**K + 1 # plus one for 'NNN'
for seq in Xin:
freqs =[0] * VOCABULARY_SIZE
total = 0
for kmerval in seq:
freqs[kmerval] += 1
total += 1
for c in range(VOCABULARY_SIZE):
freqs[c] = freqs[c]/total
Xout.append(freqs)
Xnum = np.asarray(Xout)
return (Xnum)
def make_slice(data_set,min_len,max_len):
slice = data_set.query('seqlen <= '+str(max_len)+' & seqlen>= '+str(min_len))
return slice | _____no_output_____ | MIT | Workshop/GRU_236.ipynb | ShepherdCode/ShepherdML |
Cross validation | def do_cross_validation(X,y,given_model):
cv_scores = []
fold=0
splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.1) #, random_state=37863)
for train_index,valid_index in splitter.split(X):
fold += 1
X_train=X[train_index] # use iloc[] for dataframe
y_train=y[train_index]
X_valid=X[valid_index]
y_valid=y[valid_index]
# Avoid continually improving the same model.
model = compile_model(keras.models.clone_model(given_model))
bestname=DATAPATH+FILENAME+".cv."+str(fold)+".best"
mycallbacks = [keras.callbacks.ModelCheckpoint(
filepath=bestname, save_best_only=True,
monitor='val_accuracy', mode='max')]
print("FIT")
start_time=time.time()
history=model.fit(X_train, y_train, # batch_size=10, default=32 works nicely
epochs=EPOCHS, verbose=1, # verbose=1 for ascii art, verbose=0 for none
callbacks=mycallbacks,
validation_data=(X_valid,y_valid) )
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
best_model=keras.models.load_model(bestname)
scores = best_model.evaluate(X_valid, y_valid, verbose=0)
print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100))
cv_scores.append(scores[1] * 100)
print()
print("%d-way Cross Validation mean %.2f%% (+/- %.2f%%)" % (fold, np.mean(cv_scores), np.std(cv_scores))) | _____no_output_____ | MIT | Workshop/GRU_236.ipynb | ShepherdCode/ShepherdML |
Train on RNA lengths 200-1Kb | MINLEN=200
MAXLEN=1000
print("Load data from files.")
nc_seq=load_fasta(NC_FILENAME,0)
pc_seq=load_fasta(PC_FILENAME,1)
train_set=pd.concat((nc_seq,pc_seq),axis=0)
nc_seq=None
pc_seq=None
print("Ready: train_set")
#train_set
subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y
print ("Data reshape")
(X_train,y_train)=make_kmers(MAXLEN,subset)
#print ("Data prep")
#X_train=make_frequencies(X_train)
print ("Compile the model")
model=build_model()
print ("Summarize the model")
print(model.summary()) # Print this only once
model.save(DATAPATH+FILENAME+'.model')
print ("Cross valiation")
do_cross_validation(X_train,y_train,model)
print ("Done")
| _____no_output_____ | MIT | Workshop/GRU_236.ipynb | ShepherdCode/ShepherdML |
print daily message | day_info = {"day":u"1", "title":u"영광과 흑암", "song":u"하나님은 우리 아버지", "prayer":u"", "summary":u"", \
"verses": [u"창1:3", u"창1:14", u"창1:26,28,29", u"창2:19", u"창2:9", u"창2:17", u"창3:6-7", \
u"유1:6", u"벧후2:4", u"창3:17-18", u"왕하6:17", u"행26:18", u"롬1:19-20", u"요12:46", \
u"골1:13", u"단2:22", u"요일5:19"]}
day_info["day"]
def make_mdpage(bible, day_info, save=False):
""" print all verses in list using markdown format """
# check day_info.yml file
if isinstance(day_info, str):
try:
with open(day_info, "r") as f:
day_info = yaml.load(f, yaml.BaseLoader)
except:
print("... file: {} parser error!".format(day_info))
return 0
bible_version = ""
# check bible version
if isinstance(bible, str):
try:
bible_version = "-" + bible
bible = kbible.read_full_bible(bible)
except:
print("... read error: {}".format(bible_version))
return 0
msg = "# {}일차 - {}\n\n".format(day_info["day"],day_info["title"])
msg = msg + "찬양 : {}\n\n".format(day_info["song"])
msg = msg + "기도 : {}\n\n".format(day_info["prayer"])
msg = msg + "요약 : {}\n\n".format(day_info["summary"])
msg = msg + "성경 버전 : {}\n\n".format(bible_version[1:])
for v in day_info["verses"]:
msg = msg + '- {}\n\n'.format(kbible.extract_bystr(bible, v, form="md"))
msg = msg + "### info\n\n"
msg = msg + "- 성경 구절 갯수 : {}".format(len(day_info["verses"]))
if save:
filename = 'mpages/day{}-{}{}.md'.format(day_info["day"], day_info["title"].replace(" ", ""), bible_version)
with open(filename, "w") as f:
f.write(msg)
print('... save to {}'.format(filename))
return msg
print(make_mdpage("현대인의성경", "day1.yaml", save=True))
#kbible.find_id(bible, book="롬", chapter=1) | _____no_output_____ | Apache-2.0 | dev/make_markdown.ipynb | sungcheolkim78/py_kbible |
daily info file | import yaml
print(yaml.dump(day_info, allow_unicode=True))
with open('day1.yaml', "w") as f:
yaml.dump(day_info, f, allow_unicode=True)
yaml.load? | _____no_output_____ | Apache-2.0 | dev/make_markdown.ipynb | sungcheolkim78/py_kbible |
Visualizing and working with network graphs is a common problem in many different disciplines. HoloViews provides the ability to represent and visualize graphs very simply and easily with facilities for interactively exploring the nodes and edges of the graph, especially using the bokeh plotting interface.The ``Graph`` ``Element`` differs from other elements in HoloViews in that it consists of multiple sub-elements. The data of the ``Graph`` element itself are the abstract edges between the nodes. By default the element will automatically compute concrete ``x`` and ``y`` positions for the nodes and represent them using a ``Nodes`` element, which is stored on the Graph. The abstract edges and concrete node positions are sufficient to render the ``Graph`` by drawing straight-line edges between the nodes. In order to supply explicit edge paths we can also declare ``EdgePaths``, providing explicit coordinates for each edge to follow.To summarize a ``Graph`` consists of three different components:* The ``Graph`` itself holds the abstract edges stored as a table of node indices.* The ``Nodes`` hold the concrete ``x`` and ``y`` positions of each node along with a node ``index``. The ``Nodes`` may also define any number of value dimensions, which can be revealed when hovering over the nodes or to color the nodes by.* The ``EdgePaths`` can optionally be supplied to declare explicit node paths. A simple GraphLet's start by declaring a very simple graph connecting one node to all others. If we simply supply the abstract connectivity of the ``Graph``, it will automatically compute a layout for the nodes using the ``layout_nodes`` operation, which defaults to a circular layout: | # Declare abstract edges
N = 8
node_indices = np.arange(N, dtype=np.int32)
source = np.zeros(N, dtype=np.int32)
target = node_indices
padding = dict(x=(-1.2, 1.2), y=(-1.2, 1.2))
simple_graph = hv.Graph(((source, target),)).redim.range(**padding)
simple_graph | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Accessing the nodes and edgesWe can easily access the ``Nodes`` and ``EdgePaths`` on the ``Graph`` element using the corresponding properties: | simple_graph.nodes + simple_graph.edgepaths | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Supplying explicit pathsNext we will extend this example by supplying explicit edges: | def bezier(start, end, control, steps=np.linspace(0, 1, 100)):
return (1-steps)**2*start + 2*(1-steps)*steps*control+steps**2*end
x, y = simple_graph.nodes.array([0, 1]).T
paths = []
for node_index in node_indices:
ex, ey = x[node_index], y[node_index]
paths.append(np.column_stack([bezier(x[0], ex, 0), bezier(y[0], ey, 0)]))
bezier_graph = hv.Graph(((source, target), (x, y, node_indices), paths)).redim.range(**padding)
bezier_graph | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Interactive features Hover and selection policiesThanks to Bokeh we can reveal more about the graph by hovering over the nodes and edges. The ``Graph`` element provides an ``inspection_policy`` and a ``selection_policy``, which define whether hovering and selection highlight edges associated with the selected node or nodes associated with the selected edge, these policies can be toggled by setting the policy to ``'nodes'`` (the default) and ``'edges'``. | bezier_graph.options(inspection_policy='edges') | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
In addition to changing the policy we can also change the colors used when hovering and selecting nodes: | %%opts Graph [tools=['hover', 'box_select']] (edge_hover_line_color='green' node_hover_fill_color='red')
bezier_graph.options(inspection_policy='nodes') | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Additional informationWe can also associate additional information with the nodes and edges of a graph. By constructing the ``Nodes`` explicitly we can declare additional value dimensions, which are revealed when hovering and/or can be mapped to the color by specifying the ``color_index``. We can also associate additional information with each edge by supplying a value dimension to the ``Graph`` itself, which we can map to a color using the ``edge_color_index``. | %%opts Graph [color_index='Type' edge_color_index='Weight'] (cmap='Set1' edge_cmap='viridis')
node_labels = ['Output']+['Input']*(N-1)
np.random.seed(7)
edge_labels = np.random.rand(8)
nodes = hv.Nodes((x, y, node_indices, node_labels), vdims='Type')
graph = hv.Graph(((source, target, edge_labels), nodes, paths), vdims='Weight').redim.range(**padding)
graph + graph.options(inspection_policy='edges') | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
If you want to supply additional node information without speciying explicit node positions you may pass in a ``Dataset`` object consisting of various value dimensions. | %%opts Graph [color_index='Label'] (cmap='Set1')
node_info = hv.Dataset(node_labels, vdims='Label')
hv.Graph(((source, target), node_info)).redim.range(**padding) | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Working with NetworkX NetworkX is a very useful library when working with network graphs and the Graph Element provides ways of importing a NetworkX Graph directly. Here we will load the Karate Club graph and use the ``circular_layout`` function provided by NetworkX to lay it out: | %%opts Graph [tools=['hover']]
G = nx.karate_club_graph()
hv.Graph.from_networkx(G, nx.layout.circular_layout).redim.range(**padding) | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Animating graphs Like all other elements ``Graph`` can be updated in a ``HoloMap`` or ``DynamicMap``. Here we animate how the Fruchterman-Reingold force-directed algorithm lays out the nodes in real time. | %%opts Graph
G = nx.karate_club_graph()
def get_graph(iteration):
np.random.seed(10)
return hv.Graph.from_networkx(G, nx.spring_layout, iterations=iteration)
hv.HoloMap({i: get_graph(i) for i in range(5, 30, 5)},
kdims='Iterations').redim.range(x=(-1.2, 1.2), y=(-1.2, 1.2)) | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Real world graphs As a final example let's look at a slightly larger graph. We will load a dataset of a Facebook network consisting a number of friendship groups identified by their ``'circle'``. We will load the edge and node data using pandas and then color each node by their friendship group using many of the things we learned above. | %opts Nodes Graph [width=800 height=800 xaxis=None yaxis=None]
%%opts Graph [color_index='circle']
%%opts Graph (node_size=10 edge_line_width=1)
colors = ['#000000']+hv.Cycle('Category20').values
edges_df = pd.read_csv('../assets/fb_edges.csv')
fb_nodes = hv.Nodes(pd.read_csv('../assets/fb_nodes.csv')).sort()
fb_graph = hv.Graph((edges_df, fb_nodes), label='Facebook Circles')
fb_graph = fb_graph.redim.range(x=(-0.05, 1.05), y=(-0.05, 1.05)).options(cmap=colors)
fb_graph | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Bundling graphs The datashader library provides algorithms for bundling the edges of a graph and HoloViews provides convenient wrappers around the libraries. Note that these operations need ``scikit-image`` which you can install using:```conda install scikit-image```or```pip install scikit-image``` | from holoviews.operation.datashader import datashade, bundle_graph
bundled = bundle_graph(fb_graph)
bundled | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Datashading graphs For graphs with a large number of edges we can datashade the paths and display the nodes separately. This loses some of the interactive features but will let you visualize quite large graphs: | %%opts Nodes [color_index='circle'] (size=10 cmap=colors) Overlay [show_legend=False]
datashade(bundled, normalization='linear', width=800, height=800) * bundled.nodes | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
Applying selections Alternatively we can select the nodes and edges by an attribute that resides on either. In this case we will select the nodes and edges for a particular circle and then overlay just the selected part of the graph on the datashaded plot. Note that selections on the ``Graph`` itself will select all nodes that connect to one of the selected nodes. In this way a smaller subgraph can be highlighted and the larger graph can be datashaded. | %%opts Graph (node_fill_color='white')
datashade(bundle_graph(fb_graph), normalization='linear', width=800, height=800) *\
bundled.select(circle='circle15') | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
To select just nodes that are in 'circle15' set the ``selection_mode='nodes'`` overriding the default of 'edges': | bundled.select(circle='circle15', selection_mode='nodes') | _____no_output_____ | BSD-3-Clause | examples/user_guide/Network_Graphs.ipynb | jsignell/holoviews |
How have Airbnb prices changed due to COVID-19? Business Understanding This is the most recent data (Oct, 2020) taken from the official website Airbnb http://insideairbnb.com/get-the-data.htmlIn this Notebook, we'll look at this data, clean up, analyze, visualize, and model.And we will answer the following questions for Business Understanding:1. What correlates best with the price?2. How has price and busyness changed over the course of COVID-19?4. Can we predict the price based on its features? Let's begin! | #import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import geopandas as gpd
#ml libraries
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
import xgboost as xgb
from xgboost import plot_importance
from keras import backend as K
import tensorflow as tf
import time
from tensorflow import keras
from keras import models, layers, optimizers, regularizers
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import learning_curve
from sklearn.preprocessing import StandardScaler, MinMaxScaler
#metrics
from sklearn.metrics import r2_score, mean_squared_error
%matplotlib inline
def printColunmsInfo(df):
'''takes dataframe, prints columns info'''
df.info()
print("\n")
printTotalRowsAndColumns(df)
print("---------------------------------------")
def printTotalRowsAndColumns(df):
'''print number of columns and rows'''
print("Total columns: ", df.shape[1])
print("Total rows: ", df.shape[0])
def stringToNumConverter(string):
'''deletes not numbers symbols from string'''
newString = ""
if pd.notna(string):
for i in string:
if i.isdigit() or i == ".":
newString += i
return newString
def create_dummy_df(df, cat_cols, dummy_na):
'''creates dummy'''
for col in cat_cols:
try:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=dummy_na)], axis=1)
except:
continue
return df
def dateToCategorical(row):
'''changes column from date type to categorical'''
if row.year <= 2016:
return "4+ years"
elif row.year <= 2018:
return "2-3 years"
elif row.year <= 2019:
return "1-2 years"
elif row.year == 2020:
if row.month > 8:
return "0-1 month"
elif row.month > 2:
return "2-6 months"
elif row.month <= 2:
return "this year"
else :
return "no reviews"
def appendToMetricsdf(df, model_name, train_r2, test_r2, train_mse, test_mse):
'''appends new row to metrics_df'''
new_row = {"Model Name" : model_name,
"r-squared train" : train_r2,
"r-squared train test" : test_r2,
"MSE train" : train_mse,
"MSE test" : test_mse }
df = df.append(new_row, ignore_index=True)
return df
def r2_keras(y_true, y_pred):
'''calculates r2_score'''
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()) )
#load data
sf_cal = pd.read_csv("datasets/calendar.csv", low_memory=False, index_col=0)
sf_list = pd.read_csv("datasets/listings.csv") | N:\Anaconda\lib\site-packages\numpy\lib\arraysetops.py:569: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
mask |= (ar1 == a)
| Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Cleaning the Data Listing Data Frame First, let's look on Listing Data Frame. It is the biggest table. We won't need some columns because they don't make much sense for our purposes. So we will drop them. | sf_list = sf_list[['id', 'host_since', 'host_is_superhost', 'host_listings_count', 'host_response_time',
'host_response_rate', 'host_acceptance_rate','neighbourhood_cleansed', 'latitude', 'longitude',
'property_type', 'room_type', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'amenities',
'minimum_nights', 'maximum_nights', 'review_scores_rating', 'review_scores_accuracy',
'review_scores_cleanliness', 'review_scores_checkin', 'review_scores_communication',
'review_scores_location', 'review_scores_value', 'availability_30', 'number_of_reviews',
'last_review', 'reviews_per_month', 'price']]
sf_list.head() | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
We have left the following columns:* __'id'__ — we'll use to join tables* __host_since__ and __last_review__ — datatype data, we transform to categorical* __'host_response_time'__ — categorical data* __host_is_superhost__ — boolean data* __'host_response_rate'__ and __'host_acceptance_rate'__ — as a percentage, we will change to integer* __neighbourhood_cleansed'__ — neighbourhood name* __'latitude', 'longitude'__ — сoordinates, we use them for visualisation* __'room_type'__ and __property_type__ — categorical data* __'accommodates', 'bathrooms', 'bedrooms', 'beds'__ — numerical values describing property* __'amenities'__ — can be used to identify words associated with amenities* __'minimum_nights', 'maximum_nights'__ — numerical values* __'review_scores_rating'__ — numbers between 20 and 100* __'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', review_scores_communication', 'review_scores_location', 'review_scores_value'__ — numbers between 2 and 10* __availability_30__, __number_of_reviews__, __reviews_per_month__ — numerical* __'price'__ — target value Let's convert string data to numeric. | #converting datatype of price column to integer
sf_list["price"] = sf_list["price"].apply(lambda string: ''.join(i for i in string if i.isdigit())[:-2])
sf_list["price"] = pd.to_numeric(sf_list["price"], downcast="integer")
#host_response_rate and host_acceptance_rate types to float
sf_list["host_response_rate"] = sf_list["host_acceptance_rate"].apply(lambda string: stringToNumConverter(string))
sf_list["host_response_rate"] = pd.to_numeric(sf_list["host_response_rate"], downcast="float")
sf_list["host_acceptance_rate"] = sf_list["host_acceptance_rate"].apply(lambda string: stringToNumConverter(string))
sf_list["host_acceptance_rate"] = pd.to_numeric(sf_list["host_acceptance_rate"], downcast="float")
#converting t, f value to 1 or 0
sf_list["host_is_superhost"] = sf_list["host_is_superhost"].apply((lambda string: 1 if string == "t" else 0))
#converting datatype of date columns to datetime
sf_list["last_review"] = pd.to_datetime(arg=sf_list["last_review"], errors="coerce")
sf_list["host_since"] = pd.to_datetime(arg=sf_list["host_since"], errors="coerce")
print("Listing Data Frame")
printColunmsInfo(sf_list) | Listing Data Frame
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 7274 entries, 0 to 7273
Data columns (total 31 columns):
id 7274 non-null int64
host_since 7274 non-null datetime64[ns]
host_is_superhost 7274 non-null int64
host_listings_count 7274 non-null int64
host_response_time 5321 non-null object
host_response_rate 6393 non-null float32
host_acceptance_rate 6393 non-null float32
neighbourhood_cleansed 7274 non-null object
latitude 7274 non-null float64
longitude 7274 non-null float64
property_type 7274 non-null object
room_type 7274 non-null object
accommodates 7274 non-null int64
bathrooms 7185 non-null float64
bedrooms 6430 non-null float64
beds 7234 non-null float64
amenities 7274 non-null object
minimum_nights 7274 non-null int64
maximum_nights 7274 non-null int64
review_scores_rating 5508 non-null float64
review_scores_accuracy 5495 non-null float64
review_scores_cleanliness 5495 non-null float64
review_scores_checkin 5494 non-null float64
review_scores_communication 5496 non-null float64
review_scores_location 5494 non-null float64
review_scores_value 5494 non-null float64
availability_30 7274 non-null int64
number_of_reviews 7274 non-null int64
last_review 5554 non-null datetime64[ns]
reviews_per_month 5554 non-null float64
price 7274 non-null int16
dtypes: datetime64[ns](2), float32(2), float64(13), int16(1), int64(8), object(5)
memory usage: 1.6+ MB
Total columns: 31
Total rows: 7274
---------------------------------------
| Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Amenities Data Frame Consider the data about the amenities. This column is a set of lists enclosed in strings. So I had to use the *eval*. If you know a more elegant method, please let me know.Then we'll add columns for each amenitie, remove the common and very rare amenities. | amenitiesList = []
for item in sf_list["amenities"].value_counts().reset_index()["index"]:
item = eval(item)
for i in item:
if i not in amenitiesList:
amenitiesList.append(i)
print("Total amenities: ", len(amenitiesList))
print(amenitiesList)
amenities_df = sf_list[["id", "amenities"]]
#we don't need "amenities" in original data frame anymore
sf_list.drop(["amenities"], axis=1, inplace=True)
amenitiesDict = {}
for item in range(amenities_df.shape[0]):
i_id, amenitiesSet = amenities_df.loc[item, "id"], set(eval(amenities_df.loc[item, "amenities"]))
amenitiesDict[i_id] = amenitiesSet
for amenitie in amenitiesList:
bilist = []
for amId in amenities_df["id"]:
if amenitie in amenitiesDict[amId]:
bilist.append(1)
else:
bilist.append(0)
amenities_df.insert(loc=len(amenities_df.columns), column=amenitie, value=bilist, allow_duplicates=True)
print(amenities_df.shape) | (7274, 133)
| Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Calendar Data Frame | sf_cal.head() | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
This Data Frame has folowing columns:* __listing_id__ — id values, we'll use to join tables* __date__ — we need to change datatype to datetime* __available__ — it has to be boolean, so we need to change it* __minimum_nights, maximum_nights__ — we have same columns in Listing Data Frame, drop they later* __adjusted_price__, __price__ — target values | #converting datatype of price and adjusted_price columns to integer
sf_cal["price"] = sf_cal["price"].apply(lambda string: stringToNumConverter(string))
sf_cal["price"] = pd.to_numeric(sf_cal["price"], downcast="integer")
sf_cal["adjusted_price"] = sf_cal["adjusted_price"].apply(lambda string: stringToNumConverter(string))
sf_cal["adjusted_price"] = pd.to_numeric(sf_cal["adjusted_price"], downcast="integer")
#converting datatype of date columns to datetime
sf_cal["date"] = pd.to_datetime(arg=sf_cal["date"], errors="coerce")
#converting t, f value to Boolean datatype
sf_cal["available"] = sf_cal["available"].apply((lambda string: True if string == "t" else False))
print("Calendar Data Frame")
printColunmsInfo(sf_cal) | Calendar Data Frame
<class 'pandas.core.frame.DataFrame'>
Int64Index: 2390652 entries, 0 to 2390651
Data columns (total 7 columns):
listing_id int64
date datetime64[ns]
available bool
price float64
adjusted_price float64
minimum_nights float64
maximum_nights float64
dtypes: bool(1), datetime64[ns](1), float64(4), int64(1)
memory usage: 130.0 MB
Total columns: 7
Total rows: 2390652
---------------------------------------
| Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Data Understanding Let's analyze the data to answer the questions given at the beginning: 1. What correlates best with price? Does Amenities correlate with price? | amen_price_corr_neg = amenities_df.merge(sf_list[["id", "price"]], on="id").corr()[["id", "price"]].sort_values(by="price").head(10)
amen_price_corr_pos = amenities_df.merge(sf_list[["id", "price"]], on="id").corr()[["id", "price"]].sort_values(by="price").drop("price", axis=0).tail(10)
#negative correlation
amen_price_corr_neg.drop("id", axis=1).style.bar(color="#00677e", align="mid")
#positive correlation
amen_price_corr_pos.drop("id", axis=1).style.bar(color="#cd4a4c") | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
As you can see, air conditioning, gym, and building staff are highly correlated with price. The rest of the amenities correlate either weakly or not at all. Does Review Scores correlate with price? | plt.subplots(figsize=(9, 6))
sns.heatmap(sf_list[['review_scores_rating', 'review_scores_accuracy',
'review_scores_cleanliness', 'review_scores_checkin',
'review_scores_communication', 'review_scores_location',
'review_scores_value', "number_of_reviews", 'price']].corr(),
annot=True, fmt=".2f") | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Review Scores correlate weakly with price, but they correlate well with each other. Does Housing Characteristics correlate with price? | plt.subplots(figsize=(9, 6))
sns.heatmap(sf_list[['accommodates', 'bathrooms', 'bedrooms', 'beds', 'price']].corr(),
annot=True, fmt=".2f") | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
There is an obvious correlation. The more people you can accommodate, the more expensive it is to rent a room. Same about bedrooms and beds. But the number of bathrooms does not have a strong impact. Some more dependencies on the price, which we will use in modeling: | sf_list.groupby(["room_type"]).mean().reset_index()[["room_type","price"]].style.bar(color="#cd4a4c")
sf_list.groupby(["property_type"]).mean().reset_index()[["property_type","price"]].sort_values(by="price", ascending=False).style.bar(color="#cd4a4c")
sf_list.groupby(["host_response_time"]).mean().reset_index()[["host_response_time","price"]].style.bar(color="#cd4a4c")
sf_list.groupby(["host_is_superhost"]).mean().reset_index()[["host_is_superhost","price"]]
sf_list[["number_of_reviews","price"]].corr()
plt.subplots(figsize=(9, 6))
sns.heatmap(sf_list[["host_response_rate", "host_acceptance_rate", "minimum_nights",
"maximum_nights", "number_of_reviews", "price"]].corr(),
annot=True, fmt=".2f") | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
How about Neighbourhoods? Let's find the most expensive neighbourhood. | #coordinates of San Francisco
sf_latitude, sf_longitude = 37.7647993, -122.4629897
#the necessary data for the map
sf_map = gpd.read_file("planning_neighborhoods/planning_neighborhoods.shp")
sf_neig_mean = sf_list.groupby(["neighbourhood_cleansed"]).mean().reset_index()
sf_map = sf_map.merge(sf_neig_mean, left_on="neighborho", right_on="neighbourhood_cleansed")
vmin, vmax = 100, 1300
fig, ax = plt.subplots(figsize = (20, 20))
ax.set_title("Average price in each neighborhood of San Francisco", fontdict={"fontsize": "25", "fontweight" : "3"})
sf_map.plot(column="price", cmap="OrRd", linewidth=0.8, ax=ax, edgecolor="0.8")
texts = []
for x, y, label in zip(sf_map.centroid.geometry.x, sf_map.centroid.geometry.y, sf_map["neighbourhood_cleansed"]):
texts.append(plt.text(x, y, label, fontsize = 8))
sm = plt.cm.ScalarMappable(cmap="OrRd", norm=plt.Normalize(vmin=vmin, vmax=vmax))
# empty array for the data range
sm._A = []
# add the colorbar to the figure
cbar = fig.colorbar(sm)
ax.axis("off")
plt.show()
sf_list.groupby(["neighbourhood_cleansed"]).mean().reset_index()[["neighbourhood_cleansed","price"]].sort_values(by="price", ascending=False).style.bar(color="#cd4a4c") | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
As you can see from the map, the high price is more related to the location. The most expensive areas are Golden Gate Park and Financial District. If you look at my previous research, you understand that Golden Gate Park is quite safe, unlike the Financial District which pretty criminal. All this data can be used to predict prices. But before that, let's answer the second question. 2. How has price and busyness changed over the course of COVID-19? Let's start by looking at price changes over the past year. | per = sf_cal.date.dt.to_period("M")
g = sf_cal.groupby(per)
ax = sns.set_palette("viridis")
plt.figure(figsize=(16,6))
sns.barplot(x=g.mean().reset_index()["date"],
y=g.mean().reset_index()["price"])
plt.xlabel("Month", fontsize=20)
plt.ylabel("Price per night", fontsize=20)
plt.title("Average Price per night in San Francisco", fontsize=25)
plt.show() | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
During the covid period, the average price per night rose by about $33. And it does not stop growing linearly. Next one is busyness. | ax = sns.set_palette("viridis")
plt.figure(figsize=(16,6))
sns.barplot(x=g.mean().reset_index()["date"],
y=g.mean().reset_index()["available"])
plt.xlabel("Month", fontsize=20)
plt.ylabel("Availability, proportion", fontsize=20)
plt.title("Average Availability in San Francisco", fontsize=25)
plt.show() | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
September last year was quite popular (wonderful weather). Then the decline began. But with the onset of covid, the decline intensified and reached its peak (half of the housing is vacant) by May. As expected, the covid did not affect the Airbnb business in the best way. Prices have gone up and there are fewer customers. The indicators have not yet returned to their previous values. To answer the last question, we have to prepare the data for modeling. Can we predict the price based on its features? Prepare Data Working with NaNs and categorical variables Let's turn "last_review" and "host_since" from date type to categorical values. For that, we create new columns and fill them in. | sf_list["since_last_review"] = sf_list["last_review"].apply(lambda row : dateToCategorical(row))
sf_list["host_since_cat"] = sf_list["host_since"].apply(lambda row : dateToCategorical(row))
#drop all Nans in "price" columns
drop_sf_list = sf_list.dropna(subset=["price"], axis=0)
#create data frame with categorical values
cat_sf_list = drop_sf_list[["id", "neighbourhood_cleansed", "room_type",'property_type', "since_last_review", "host_since_cat"]]
#create data frame with nimerical
mean_sf_list = drop_sf_list[["id", "accommodates", "review_scores_rating", "bathrooms", "bedrooms", "beds",
"review_scores_accuracy", "review_scores_cleanliness", "availability_30",
"number_of_reviews", "reviews_per_month", "review_scores_communication",
"review_scores_location", "review_scores_value", "host_is_superhost",
"host_listings_count", "price"]]
num_cols = ["accommodates", "review_scores_rating", "bathrooms", "bedrooms", "beds",
"review_scores_accuracy", "review_scores_cleanliness", "availability_30",
"number_of_reviews", "reviews_per_month", "review_scores_communication",
"review_scores_location", "review_scores_value", "host_is_superhost",
"host_listings_count", "price"]
for col in num_cols:
mean_sf_list[col] = mean_sf_list[col].astype('float64').replace(0.0, 0.01)
mean_sf_list[col] = np.log(mean_sf_list[col])
#fill the mean
fill_mean = lambda col: col.fillna(col.mean())
mean_sf_list = mean_sf_list.apply(fill_mean, axis=0)
#create dummy data frame
cat_cols_lst = ["neighbourhood_cleansed", "room_type",'property_type', "since_last_review", "host_since_cat"]
dummy_sf_list = create_dummy_df(cat_sf_list, cat_cols_lst, dummy_na=False) | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
After all, we'll merge tree Data Frames: mean_sf_list, dummy_sf_list and amenities_df. | full_sf_list = dummy_sf_list.merge(amenities_df.drop(["amenities"], axis=1), on="id").merge(mean_sf_list, on="id") | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Data Modeling Let's start modeling. We will try several models and compare the results. | #preparation train and test data
X = full_sf_list.drop(["price"], axis=1)
y = full_sf_list["price"]
#scaling
scaler = StandardScaler()
X = pd.DataFrame(scaler.fit_transform(X), columns=list(X.columns))
#split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=42)
#writing the metrics for every model in DataFrame
metrics_columns = ["Model Name", "r-squared train", "r-squared train test", "MSE train", "MSE test"]
metrics_df = pd.DataFrame(columns=metrics_columns) | _____no_output_____ | Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Predicting Price AdaBoost regressor | adaboost_model = AdaBoostRegressor(n_estimators=20)
adaboost_model.fit(X_train, y_train)
#predict and score the model
y_test_preds = adaboost_model.predict(X_test)
y_train_preds = adaboost_model.predict(X_train)
#scoring model
test_r2 = round(r2_score(y_test, y_test_preds), 4)
train_r2 = round(r2_score(y_train, y_train_preds), 4)
test_mse = round(mean_squared_error(y_test, y_test_preds), 4)
train_mse = round(mean_squared_error(y_train, y_train_preds), 4)
print('r-squared score for training set was {}. r-squared score for test set was {}.'.format(train_r2, test_r2))
print('MSE score for training set was {}. MSE score for test set was {}.'.format(train_mse, test_mse))
#add row to metrics
metrics_df = appendToMetricsdf(metrics_df, "AdaBoost regressor", train_r2, test_r2, train_mse, test_mse) | r-squared score for training set was 0.4804. r-squared score for test set was 0.4407.
MSE score for training set was 0.3275. MSE score for test set was 0.3551.
| Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Gradient Boosting for regression | gradboost_model = GradientBoostingRegressor(n_estimators=300)
gradboost_model.fit(X_train, y_train)
#predict and score the model
y_test_preds = gradboost_model.predict(X_test)
y_train_preds = gradboost_model.predict(X_train)
#scoring model
test_r2 = round(r2_score(y_test, y_test_preds), 4)
train_r2 = round(r2_score(y_train, y_train_preds), 4)
test_mse = round(mean_squared_error(y_test, y_test_preds), 4)
train_mse = round(mean_squared_error(y_train, y_train_preds), 4)
print('r-squared score for training set was {}. r-squared score for test set was {}.'.format(train_r2, test_r2))
print('MSE score for training set was {}. MSE score for test set was {}.'.format(train_mse, test_mse))
metrics_df = appendToMetricsdf(metrics_df, "Gradient Boosting", train_r2, test_r2, train_mse, test_mse) | r-squared score for training set was 0.8149. r-squared score for test set was 0.7033.
MSE score for training set was 0.1167. MSE score for test set was 0.1884.
| Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Extreme Gradient Boosting | xgb_reg = xgb.XGBRegressor()
xgb_reg.fit(X_train, y_train)
y_train_preds = xgb_reg.predict(X_train)
y_test_preds = xgb_reg.predict(X_test)
#scoring model
test_r2 = round(r2_score(y_test, y_test_preds), 4)
train_r2 = round(r2_score(y_train, y_train_preds), 4)
test_mse = round(mean_squared_error(y_test, y_test_preds), 4)
train_mse = round(mean_squared_error(y_train, y_train_preds), 4)
print('r-squared score for training set was {}. r-squared score for test set was {}.'.format(train_r2, test_r2))
print('MSE score for training set was {}. MSE score for test set was {}.'.format(train_mse, test_mse))
metrics_df = appendToMetricsdf(metrics_df, "Extreme Gradient Boosting", train_r2, test_r2, train_mse, test_mse) | [16:43:04] WARNING: src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.
| Apache-2.0 | AirBNB_Analysis_San_Francisco.ipynb | zaveta/AirBNB-Analysis-San-Francisco |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.