code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # (VAD) Velocity Azimuth Display # # Argonne National Laboratory # # Original code by <NAME>: # # https://github.com/scollis/notebooks/blob/master/DYNAMO%20AIME%20workshop%20workbook.ipynb # + deletable=true editable=true from __future__ import print_function import matplotlib.pyplot as plt import numpy.ma as ma import numpy as np import pyart import warnings warnings.filterwarnings("ignore") # %matplotlib inline # + deletable=true editable=true def pyart_velocity_correct(norm_coh_power=None, norm_coh_power_value=0.5, gatefilter=None, nyquist_velocity=None): if norm_coh_power is None: # Copying a field and its shape to use as a # norm_coherent_power array with matching shape. radar.add_field_like('reflectivity', 'norm_coh_power', radar.fields['reflectivity']['data'].copy(), replace_existing=True) # Setting all values to norm_coh_power_value, # in the field norm_coh_power. (radar.fields['norm_coh_power']['data'])[ radar.fields['norm_coh_power']['data'] != norm_coh_power_value] = norm_coh_power_value norm_coh_power_used = 'norm_coh_power' else: norm_coh_power_used = norm_coh_power if gatefilter is none: gatefilter = pyart.correct.GateFilter(radar) gatefilter.exclude_below(norm_coh_power_used, norm_coh_power_value) else: gatefilter = gatefilter if nyquist_velocity is None: nyq = None corr_vel = pyart.correct.dealias_region_based( radar, vel_field='velocity', keep_original=False, gatefilter=gatefilter, nyquist_vel=nyq, centered=False) radar.add_field('corrected_velocity', corr_vel, replace_existing=True) else: nyq = nyquist_velocity corr_vel = pyart.correct.dealias_region_based( radar, vel_field='velocity', keep_original=False, gatefilter=gatefilter, nyquist_vel=nyq, centered=False) radar.add_field('corrected_velocity', corr_vel, replace_existing=True) # + deletable=true editable=true """ pyart.retrieve.velocity_azimuth_display ======================================= Retrieval of VADs from a radar object. .. autosummary:: :toctreeL generated/ :template: dev_template.rst velocity_azimuth_display _interval_mean _sd_to_uv _vad_calculation """ import numpy as np from pyart.core import HorizontalWindProfile def velocity_azimuth_display(radar, velocity, z_want=None, gatefilter=None): """ Velocity azimuth display. Parameters ---------- radar : Radar Radar object used. velocity : string Velocity field to use for VAD calculation. Other Parameters ---------------- z_want : array Heights for where to sample vads from. None will result in np.linespace(0, 10000, 100). gatefilter : GateFilter A GateFilter indicating radar gates that should be excluded when from the vad calculation. Returns ------- height : array Heights in meters above sea level at which horizontal winds were sampled. speed : array Horizontal wind speed in meters per second at each height. direction : array Horizontal wind direction in degrees at each height. u_wind : array U-wind mean in meters per second. v_wind : array V-wind mean in meters per second. Reference ---------- <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2000) BALTEX Radar Data Centre Products and their Methodologies. In SMHI Reports. Meteorology and Climatology. Swedish Meteorological and Hydrological Institute, Norrkoping. """ speed = [] angle = [] heights = [] z_gate_data = radar.gate_z['data'] if z_want is None: z_want = np.linspace(0, 1000, 100) else: z_want for i in range(len(radar.sweep_start_ray_index['data'])): index_start = radar.sweep_start_ray_index['data'][i] index_end = radar.sweep_end_ray_index['data'][i] if (index_end - index_start) % 2 == 0: print("even, all good") else: index_end = index_end - 1 velocities = radar.fields[ velocity]['data'][index_start:index_end] if gatefilter is not None: velocities = np.ma.masked_where( gatefilter.gate_excluded, velocities) #mask=velocities.mask #velocities[np.where(mask)]=np.nan azimuth = radar.azimuth['data'][index_start:index_end] elevation = radar.fixed_angle['data'][i] one_level = _vad_calculation(velocities, azimuth, elevation) bad = (np.isnan(one_level['speed'])) print('max height', z_gate_data[index_start, :][~bad].max(), ' meters') speed.append(one_level['speed'][~bad]) angle.append(one_level['angle'][~bad]) heights.append(z_gate_data[index_start, :][~bad]) speed_array = np.concatenate(speed) angle_array = np.concatenate(angle) height_array = np.concatenate(heights) arg_order = height_array.argsort() speed_ordered = speed_array[arg_order] height_ordered = height_array[arg_order] angle_ordered = angle_array[arg_order] print(height_ordered.max()) u_ordered, v_ordered = _sd_to_uv(speed_ordered, angle_ordered) u_mean = _interval_mean(u_ordered, height_ordered, z_want) v_mean = _interval_mean(v_ordered, height_ordered, z_want) u_mean = np.ma.masked_invalid(u_mean) v_mean = np.ma.masked_invalid(v_mean) vad = HorizontalWindProfile.from_u_and_v(z_want, u_mean, v_mean) return vad def _interval_mean(data, current_z, wanted_z): """ Find the mean of data indexed by current_z at wanted_z on intervals wanted_z+/- delta wanted_z. """ delta = wanted_z[1] - wanted_z[0] pos_lower = [np.argsort((current_z - ( wanted_z[i] - delta / 2.0))**2)[0] for i in range(len(wanted_z))] pos_upper = [np.argsort((current_z - ( wanted_z[i] + delta / 2.0))**2)[0] for i in range(len(wanted_z))] mean_values = np.array([data[pos_lower[i]:pos_upper[i]].mean() for i in range(len(pos_upper))]) return mean_values def _sd_to_uv(speed, direction): """ Takes speed and direction to create u_mean and v_mean. """ return (np.sin(direction) * speed), (np.cos(direction) * speed) def _vad_calculation(velocity_field, azimuth, elevation): """ Calculates VAD for a scan, returns speed and angle outdic = vad_algorithm(velocity_field, azimuth, elevation) velocity_field is a 2D array, azimuth is a 1D array, elevation is a number. All in degrees, m outdic contains speed, angle, variance. """ nrays, nbins = velocity_field.shape nrays2 = nrays // 2 velocity_count = np.empty((nrays2, nbins, 2)) velocity_count[:, :, 0] = velocity_field[0:nrays2, :] velocity_count[:, :, 1] = velocity_field[nrays2:, :] sinaz = np.sin(np.deg2rad(azimuth)) cosaz = np.cos(np.deg2rad(azimuth)) sumv = np.ma.sum(velocity_count, 2) vals = np.isnan(sumv) vals2 = np.vstack((vals, vals)) # Line below needs to be changed to 'is not' expression. count = np.sum(np.isnan(sumv) == False, 0) aa = count < 8 vals[:, aa] = 0 vals2[:, aa] = 0 count = np.float64(count) count[aa] = np.nan u_m = np.array([np.nansum(sumv, 0) // (2 * count)]) count[aa] = 0 cminusu_mcos = np.zeros((nrays, nbins)) cminusu_msin = np.zeros((nrays, nbins)) sincos = np.zeros((nrays, nbins)) sin2 = np.zeros((nrays, nbins)) cos2 = np.zeros((nrays, nbins)) for i in range(nbins): cminusu_mcos[:, i] = cosaz * (velocity_field[:, i] - u_m[:, i]) cminusu_msin[:, i] = sinaz * (velocity_field[:, i] - u_m[:, i]) sincos[:, i] = sinaz * cosaz sin2[:, i] = sinaz**2 cos2[:, i] = cosaz**2 cminusu_mcos[vals2] = np.nan cminusu_msin[vals2] = np.nan sincos[vals2] = np.nan sin2[vals2] = np.nan cos2[vals2] = np.nan sumcminu_mcos = np.nansum(cminusu_mcos, 0) sumcminu_msin = np.nansum(cminusu_msin, 0) sumsincos = np.nansum(sincos, 0) sumsin2 = np.nansum(sin2, 0) sumcos2 = np.nansum(cos2, 0) b_value = (sumcminu_mcos - (sumsincos*sumcminu_msin / sumsin2)) / ( sumcos2 - (sumsincos**2) / sumsin2) a_value = (sumcminu_msin - b_value*sumsincos) / sumsin2 speed = np.sqrt(a_value**2 + b_value**2) / np.cos( np.deg2rad(elevation)) angle = np.arctan2(a_value, b_value) return {'speed': speed, 'angle': angle} # + deletable=true editable=true # create a profile of 100 heights up to 500 m with 5 m/s winds at 25 degrees height = np.linspace(0, 1000, 100) speed = np.ones_like(height) * 5 direction = np.ones_like(height) * 25 profile = pyart.core.HorizontalWindProfile(height, speed, direction) # simulate a single sweep radar with a velocity field from the profile test_radar = pyart.testing.make_target_radar() test_radar.elevation['data'][:] = 45.0 test_radar.fixed_angle['data'][:] = 45.0 print("max height:", test_radar.gate_z['data'].max()) sim_vel = pyart.util.simulated_vel_from_profile(test_radar, profile) test_radar.add_field('velocity', sim_vel, replace_existing=True) # perform a VAD retrieval vad = velocity_azimuth_display( test_radar, 'velocity', z_want=height, gatefilter=None) # plot the original winds and the retrieved winds fig = plt.figure() ax = fig.add_subplot(111) ax.plot(profile.u_wind, profile.height, 'r-') ax.plot(profile.v_wind, profile.height, 'b-') ax.plot(vad.u_wind, vad.height, 'g--') ax.plot(vad.v_wind, vad.height, 'k--') ax.set_xlim(-5, 0) plt.show() # print out the results print("Height") print(profile.height[:5]) print(vad.height[:5]) print(profile.height[-5:]) print(vad.height[-5:]) print("u_wind:") print(profile.u_wind[:5]) print(vad.u_wind[:5]) print(profile.u_wind[-5:]) print(vad.u_wind[-5:]) print("v_wind:") print(profile.v_wind) print(vad.v_wind) #print(profile.v_wind[-5:]) #print(vad.v_wind[-5:]) print("speed:") print(profile.speed[:5]) print(vad.speed[:5]) print("direction:") print(profile.direction[:5]) print(vad.direction[:5]) # + deletable=true editable=true test_radar = pyart.io.read(pyart.testing.NEXRAD_ARCHIVE_MSG1_FILE) new_radar = test_radar.extract_sweeps([4]) radar_sweep = myradar.extract_sweeps([5]) height = np.linspace(50, 10766, 101) print("max height:", new_radar.gate_z['data'][0].max()) # perform a VAD retrieval vad_test = velocity_azimuth_display( radar_sweep, 'velocity', z_want=height, gatefilter=None) # print out the results print("Height") print(vad_test.height) print("u_wind:") print(vad_test.u_wind) print("v_wind:") print(vad_test.v_wind) print("speed:") print(vad_test.speed) print("direction:") print(vad_test.direction) # + deletable=true editable=true # Without in velocity_azimuth_display function: # mask=velocities.mask # velocities[np.where(mask)]=np.nan fig = plt.figure() plt.plot(vad_test.u_wind, vad_test.height, 'b-', label='U Wind') plt.plot(vad_test.v_wind, vad_test.height, 'r-', label='V Wind') plt.xlim(-25, 35) plt.ylim(-500, 11000) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # + deletable=true editable=true # Within velocity_azimuth_display function: # mask=velocities.mask # velocities[np.where(mask)]=np.nan fig = plt.figure() plt.plot(vad_test.u_wind, vad_test.height, 'b-', label='U Wind') plt.plot(vad_test.v_wind, vad_test.height, 'r-', label='V Wind') plt.xlim(-25, 35) plt.ylim(-500, 11000) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show() # - for i in range(len(test_radar.sweep_start_ray_index['data'])): index_start = test_radar.sweep_start_ray_index['data'][i] index_end = test_radar.sweep_end_ray_index['data'][i] if (index_end - index_start) % 2 == 0: print("even, all good") else: index_end = index_end - 1 velocity_field = test_radar.fields[ 'velocity']['data'][index_start:index_end] azimuth = test_radar.azimuth['data'][index_start:index_end] elevation = test_radar.fixed_angle['data'][i] nrays, nbins = velocity_field.shape nrays2 = nrays // 2 velocity_count = np.empty((nrays2, nbins, 2)) velocity_count[:, :, 0] = velocity_field[0:nrays2, :] velocity_count[:, :, 1] = velocity_field[nrays2:, :] sinaz = np.sin(np.deg2rad(azimuth)) cosaz = np.cos(np.deg2rad(azimuth)) sumv = np.ma.sum(velocity_count, 2) vals = np.isnan(sumv) vals2 = np.vstack((vals, vals)) # Jonathan, still can't get == 0 switched to is not the expression. # Have errors, I apologize I might be overthinking how to accomplish that. count = np.sum(np.isnan(sumv) == False, 0) aa = count < 8 vals[:, aa] = 0 vals2[:, aa] = 0 count = np.float64(count) count[aa] = np.nan u_m = np.array([np.nansum(sumv, 0) / (2 * count)]) count[aa] = 0 print(u_m) # + deletable=true editable=true filename = '/home/zsherman/training_exercises/data/KLOT20130417_235520_V06.gz' myradar = pyart.io.read(filename) # + deletable=true editable=true myradar.sweep_start_ray_index['data'] # + deletable=true editable=true ray_start = [] ray_end = [] for i in range(len(myradar.sweep_start_ray_index['data'])): index_start = myradar.sweep_start_ray_index['data'][i] index_end = myradar.sweep_end_ray_index['data'][i] ray_start.append(index_start) ray_end.append(index_end) ray_start = np.array(ray_start) ray_start ray_end # + deletable=true editable=true myradar.azimuth # + deletable=true editable=true myradar.fixed_angle['data'] # + deletable=true editable=true vad = velocity_azimuth_display(myradar, 'velocity') # + deletable=true editable=true corr_vel = pyart.correct.dealias_region_based( myradar, vel_field='velocity', keep_original=False, gatefilter=None, nyquist_vel=None, centered=True) myradar.add_field('corrected_velocity', corr_vel, replace_existing = True) vad_corrected = velocity_azimuth_display(myradar, 'corrected_velocity', z_count=101) # + deletable=true editable=true vad.speed # + deletable=true editable=true vad.u_wind # + deletable=true editable=true vad.v_wind # + deletable=true editable=true vad.direction
notebooks/vad_function_scotts_code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Select all sub corpus images # # We filtered the pma corpus to keep only tweets containing some keywords, and we referenced all the images associated in a csv file sha1_souscorpus_pma.csv. # Based on that we just create a new folder containing the images. import os import pandas as pd root = '/home/tyra/Documents/CERES/PMA' input_ = 'images' output = 'sous_corpus_pma' file = 'sha1_souscorpus_pma.csv' df = pd.read_csv(os.path.join(root, file), header=0) df.head() for sha1, ext in zip(list(df['sha1']), list(df['extension'])): name = sha1 + '.' + ext # print(name) os.symlink(os.path.join(root, input_, name), os.path.join(root, output, name)) # ### Check which images of pma_clean are in the new corpus to_check = '/home/tyra/Documents/pma_clean' liste = os.listdir(to_check) for image in liste: if image not in os.listdir(os.path.join(root, output)): os.remove(os.path.join(to_check, image))
Create new Corpus PMA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-nff] # language: python # name: conda-env-.conda-nff-py # --- # # LitNFFs # * This notebook shows how to speed up [Neural Force Field](https://github.com/learningmatter-mit/NeuralForceField) model training on [MD17](https://www.science.org/doi/10.1126/sciadv.1603015) datasets using the LitMatter template. # * The training workflow shown here can be scaled to hundreds of GPUs by changing a single keyword argument! # %load_ext autoreload # %autoreload 2 # + import nff import torch import numpy as np import pandas as pd import matplotlib.pyplot as plt import pytorch_lightning as pl # - # %matplotlib inline # + from lit_data.nff_data import NFFDataModule # change to point to the NeuralForceField repo dm = NFFDataModule(path='../../NeuralForceField/tutorials/data/dataset.pth.tar', batch_size=8) dm.prepare_data() dm.setup() # - mean_energy = dm.train_dataset.props['energy'].mean().item() std_energy = dm.train_dataset.props['energy'].std().item() # + from lit_models.lit_nffs import LitNFF model_params = { 'model_type': 'SchNet', 'n_atom_basis': 128, 'n_filters': 128, 'n_gaussians': 32, 'n_convolutions': 4, 'cutoff': 5.0, 'trainable_gauss': True, 'dropout_rate': 0.2 } loss_params = {'energy': 0.05, 'energy_grad': 0.95 } model = LitNFF(model_params, loss_params, lr=3e-4) # - trainer = pl.Trainer(gpus=-1, # use all available GPUs on each node # num_nodes=1, # change to number of available nodes # accelerator='ddp', max_epochs=100, ) trainer.fit(model, datamodule=dm) metrics = trainer.validate(model, datamodule=dm) results = trainer.predict(model, dataloaders=[dm.val_dataloader()]) y_energy = [r['energy'] for r in results] y_force = [r['energy_grad'] for r in results] results = {'energy': torch.cat(y_energy).view(-1), 'energy_grad': torch.cat(y_force).view(-1)} yhat_energy, yhat_force = dm.val_dataset[:]['energy'], dm.val_dataset[:]['energy_grad'] targets = {'energy_grad': torch.cat(yhat_force).view(-1), 'energy': yhat_energy} # + units = { 'energy_grad': r'kcal/mol/$\AA$', 'energy': 'kcal/mol' } fig, ax_fig = plt.subplots(1, 2, figsize=(12, 6)) for ax, key in zip(ax_fig, units.keys()): # pred = torch.stack(results[key], dim=0).view(-1).detach().cpu().numpy() # targ = torch.stack(targets[key], dim=0).view(-1).detach().cpu().numpy() pred = results[key].detach().numpy() targ = targets[key].numpy() mae = abs(pred-targ).mean() ax.scatter(pred, targ, color='#ff7f0e', alpha=0.3) lim_min = min(np.min(pred), np.min(targ)) * 1.1 lim_max = max(np.max(pred), np.max(targ)) * 1.1 ax.set_xlim(lim_min, lim_max) ax.set_ylim(lim_min, lim_max) ax.set_aspect('equal') ax.plot((lim_min, lim_max), (lim_min, lim_max), color='#000000', zorder=-1, linewidth=0.5) ax.set_title(key.upper(), fontsize=14) ax.set_xlabel('predicted %s (%s)' % (key, units[key]), fontsize=12) ax.set_ylabel('target %s (%s)' % (key, units[key]), fontsize=12) ax.text(0.1, 0.9, 'MAE: %.2f %s' % (mae, units[key]), transform=ax.transAxes, fontsize=14) plt.show()
LitNFFs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import pandas as pd import numpy as np from glob import glob import re from datetime import datetime import json import matplotlib.pyplot as plt # # Load Data df = pd.read_json("../test-data/nv-routes.jsonlines", lines=True) # # Basic Info df.head(3) df.head(3).T df.agg(['count', 'nunique', lambda x: x.isnull().sum()]).T df.info() # # Basic Info on some columns # ## route_name df.route_name.agg(['count', 'nunique', lambda x: x.isnull().sum()]).T df.route_name.value_counts() df.route_name.str.len().hist() # ### Short values df[ df.route_name.str.len()<10 ].route_name.value_counts() # ### Long values df[ df.route_name.str.len()>25 ].route_name.value_counts() # ### Alphabetically df.route_name.sort_values() df.route_name.sort_values().sample(10) # # Expand dict columns df.dtypes # ## Grades dft = pd.DataFrame(df.grade.to_dict().values()).rename(columns=lambda c: 'grades-'+c) dft df = pd.merge( df, dft, left_index=True, right_index=True, how='inner' ) df.drop(columns='grade', inplace=True) # ## type dft = pd.DataFrame(df.type.to_dict().values()).rename(columns=lambda c: 'type-'+c) dft dft.agg(['count', 'nunique', lambda x: x.isnull().sum()]).T dft.mode() dft.describe() # ##### The only unique value in all 'type' columns is 'True' df = pd.merge( df, dft, left_index=True, right_index=True, how='inner' ) df.drop(columns='type', inplace=True) # ## metadata dft = pd.DataFrame(df.metadata.to_dict().values()).rename(columns=lambda c: 'metadata-'+c) dft dft.agg(['count', 'nunique', lambda x: x.isnull().sum()]).T dft.describe() df = pd.merge( df, dft, left_index=True, right_index=True, how='inner' ) df.drop(columns='metadata', inplace=True) # # Look at list columns # ## description df.description.str.len().value_counts() df[ df.description.str.len() > 2 ].description.head(2).values # ## Location df.location.str.len().value_counts() df[ df.location.str.len() > 2 ].location.head(2).values # ## protection df.protection.str.len().value_counts() df[ df.protection.str.len() > 2 ].protection.head(2).values # ##### Do nothing with list columns # # FA column df.fa.head(4) df.fa.agg(['count', 'nunique', lambda x: x.isnull().sum()]).T df.fa.agg(['count', 'nunique', lambda x: x.isnull().sum()]).T # # Feature Extraction # ## Extract Year df['year'] = df['fa'].str.extract(r"(\d+)\s*$", expand=False) df[['fa', 'year']] df['year'] = df['year'].fillna('') # + df['fa_text'] = df.apply(lambda x: x['fa'][::-1].replace(x['year'][::-1], "", 1)[::-1], axis=1) \ .str.strip(", '") df[['fa', 'fa_text']] # - # ## Split Climbers df = df.reset_index().rename(columns={'index': 'ID'}) # !mkdir -p data/ df['fa_text'] \ .str.split(" and |&|,") \ .explode() \ .str.strip() \ .to_csv("data/tmp-nv-routes-climbers-expanded.csv", index=True) # ##### Look at the save data to take decisions on improving the extraction # # TODO from here df['fa_text'] \ .str.split(" and |&|,") \ .explode() \ .str.strip() \ .reset_index().rename(columns={'index': 'ID'}) \ .groupby('ID')['fa_text'] \ .apply(lambda x: ';'.join(x.astype(str))) # split climbers by all possible separators # expand them in rows # clean them # join them all back by ";" df['climbers'] = df['fa_text'] \ .str.split(" and |&|,") \ .explode() \ .str.strip() \ .groupby(['ID']).apply(lambda x: ';'.join(x.astype(str))) df['climbers'] df # ## Normalize climber names # # Prepare a manual decision dict to rename climbers names as much as possible df['climbers'].str.split(";", expand=False)\ .explode() \ .sort_values() renamings = {k: k for k in df['climbers'].str.split(";", expand=False)\ .explode() \ .sort_values().unique()} renamings # Copy and paste above and manual edit as desired. # This is probably most suitable step to take in Prophecies application if it becomes pretty big task. my_manual_renamings = { #'<NAME>': '<NAME>', #'<NAME>': '<NAME>', #'<NAME>': '<NAME>', #'<NAME>': '<NAME>', '<NAME>': '<NAME>', #'<NAME>': '<NAME>' } def run_all_renamings(x): for k, v in my_manual_renamings.items(): x = x.replace(k, v) return x df['climbers'] = df['climbers'].map(run_all_renamings) # ## Expand Climbers in columns dft = df['climbers'].str.split(";", expand=True) dft.columns = ['climber_' + str(i+1) for i in dft.columns] df = pd.concat([df, dft], axis=1) df # # Normalize year def normalize_year(y): y = int(y) if y < 22: # it is only 2 digits and passed 2000s y += 2000 elif y < 100: # it is only 2 digits and before 2000s y += 1900 return y df.insert( df.columns.tolist().index('year')+1, 'year_normalized', df['year'].map(normalize_year) ) df
snippets/processing-nv-routes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Efectos de la localidad # ## Efectos de no saber usar los bucles for # %%file f1.c void f1(int *arr1, int *arr2, int tam) { for (int i = 0; i < tam; i++) { arr1[i] *= 3; } for (int i = 0; i < tam; i++) { arr2[i] *= 3; } } # + # para generar el object file # ! gcc -c -fpic f1.c # para crear la shared library # ! gcc -shared f1.o -o f1.so # - # %%file f2.c void f2(int *arr1, int *arr2, int tam) { for (int i = 0; i < tam; i++) { arr1[i] *= 3; arr2[i] *= 3; } } # + # para generar el object file # ! gcc -c -fpic f2.c # para crear la shared library # ! gcc -shared f2.o -o f2.so # - import ctypes import numpy def ctypes_f1(): # ruta de la shared library lib = ctypes.CDLL('./f1.so') # tipo de dato de los argumentos lib.f1.argtypes = [ numpy.ctypeslib.ndpointer(dtype=numpy.int32), numpy.ctypeslib.ndpointer(dtype=numpy.int32), ctypes.c_int ] # se devuelve la función configurada return lib.f1 f1 = ctypes_f1() def ctypes_f2(): # ruta de la shared library lib = ctypes.CDLL('./f2.so') # tipo de dato de los argumentos lib.f2.argtypes = [ numpy.ctypeslib.ndpointer(dtype=numpy.int32), numpy.ctypeslib.ndpointer(dtype=numpy.int32), ctypes.c_int ] # se devuelve la función configurada return lib.f2 f2 = ctypes_f2() low = 1 top = 1000000 size = top arr1f1 = numpy.random.randint(low,top,size,dtype=numpy.int32) arr1f2 = arr1f1.copy() arr2f1 = numpy.random.randint(low,top,size,dtype=numpy.int32) arr2f2 = arr2f1.copy() f1(arr1f1,arr2f1,size) f2(arr1f2,arr2f2,size) numpy.array_equal(arr1f1, arr1f2) numpy.array_equal(arr2f1, arr2f2) import time lf1 = [] lf2 = [] for i in range(100): arr1f1 = numpy.random.randint(low,top,size,dtype=numpy.int32) arr1f2 = arr1f1.copy() arr2f1 = numpy.random.randint(low,top,size,dtype=numpy.int32) arr2f2 = arr2f1.copy() tic = time.time() f1(arr1f1,arr2f1,size) toc = time.time() lf1.append(toc-tic) tic = time.time() f2(arr1f1,arr2f1,size) toc = time.time() lf2.append(toc-tic) import matplotlib.pyplot as plt from utilities import filtro_mediana plt.plot(range(100),lf1,label='f1') plt.plot(range(100),filtro_mediana(lf1,13),label='f1') plt.legend() plt.plot(range(100),lf2,label='f2') plt.plot(range(100),filtro_mediana(lf2,13),label='f2') plt.legend() from statistics import mean noms = ['f1','f1 sin ruido','f2','f2 sin ruido'] proms = [ mean(lf1), mean(filtro_mediana(lf1,7)), mean(lf2), mean(filtro_mediana(lf2,7)) ] plt.bar(noms,proms,color='r') plt.xlabel('lenguaje') plt.ylabel('tiempo promedio') plt.tight_layout() mean(filtro_mediana(lf1,21))/mean(filtro_mediana(lf2,21)) # En este caso, se observa que las operaciones de lectura escritura tienen más influencia en el tiempo de ejecución que el ruido. # ## Como pasar un puntero a puntero en ctypes # %%file negmat.c void negmat(double **A, double **B, int N) { for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { B[i][j] = 0.0 - A[i][j]; } } } # + # para generar el object file # ! gcc -c -fpic negmat.c # para crear la shared library # ! gcc -shared negmat.o -o negmat.so # - def ctypes_negmat(): # ruta de la shared library lib = ctypes.CDLL('./negmat.so') # tipo de dato de los argumentos lib.negmat.argtypes = [ numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), ctypes.c_int ] # se devuelve la función configurada return lib.negmat negmat = ctypes_negmat() # Los arreglos de numpy brindan información sobre la cantidad de bytes que hay entre elementos continuos y elementos de una fila y otra. def ptr2ptr(x): return (x.__array_interface__['data'][0] + numpy.arange(x.shape[0])*x.strides[0]).astype(numpy.uintp) n = 2048 A = numpy.random.rand(n,n) B = 0-A Bc = numpy.zeros_like(B) App = ptr2ptr(A) Bcpp = ptr2ptr(Bc) negmat(App,Bcpp,n) numpy.linalg.norm(B-Bc)/numpy.linalg.norm(B) # ## Efectos de la localidad en la operación Matriz x Matrix n = 512 A = numpy.random.rand(n,n) B = numpy.random.rand(n,n) C = numpy.dot(A,B) App = ptr2ptr(A) Bpp = ptr2ptr(B) # ### Caso IJK # %%file mmIJK.c void mmIJK(double **A, double **B, double **C, int N) { for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { for(int k = 0; k < N; k++) { C[i][j] += A[i][k] * B[k][j]; } } } } # + # para generar el object file # ! gcc -c -fpic mmIJK.c # para crear la shared library # ! gcc -shared mmIJK.o -o mmIJK.so # - def ctypes_mmIJK(): # ruta de la shared library lib = ctypes.CDLL('./mmIJK.so') # tipo de dato de los argumentos lib.mmIJK.argtypes = [ numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), ctypes.c_int ] # se devuelve la función configurada return lib.mmIJK mmIJK = ctypes_mmIJK() Cijk = numpy.zeros_like(C) Cijkpp = ptr2ptr(Cijk) mmIJK(App,Bpp,Cijkpp,n) numpy.linalg.norm(C-Cijk)/numpy.linalg.norm(C) # ### Caso IKJ # %%file mmIKJ.c void mmIKJ(double **A, double **B, double **C, int N) { for(int i = 0; i < N; i++) { for(int k = 0; k < N; k++) { for(int j = 0; j < N; j++) { C[i][j] += A[i][k] * B[k][j]; } } } } # + # para generar el object file # ! gcc -c -fpic mmIKJ.c # para crear la shared library # ! gcc -shared mmIKJ.o -o mmIKJ.so # - def ctypes_mmIKJ(): # ruta de la shared library lib = ctypes.CDLL('./mmIKJ.so') # tipo de dato de los argumentos lib.mmIKJ.argtypes = [ numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), ctypes.c_int ] # se devuelve la función configurada return lib.mmIKJ mmIKJ = ctypes_mmIKJ() Cikj = numpy.zeros_like(C) Cikjpp = ptr2ptr(Cikj) mmIKJ(App,Bpp,Cikjpp,n) numpy.linalg.norm(C-Cikj)/numpy.linalg.norm(C) # ### Caso JKI # %%file mmJKI.c void mmJKI(double **A, double **B, double **C, int N) { for(int j = 0; j < N; j++) { for(int k = 0; k < N; k++) { for(int i = 0; i < N; i++) { C[i][j] += A[i][k] * B[k][j]; } } } } # + # para generar el object file # ! gcc -c -fpic mmJKI.c # para crear la shared library # ! gcc -shared mmJKI.o -o mmJKI.so # - def ctypes_mmJKI(): # ruta de la shared library lib = ctypes.CDLL('./mmJKI.so') # tipo de dato de los argumentos lib.mmJKI.argtypes = [ numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), ctypes.c_int ] # se devuelve la función configurada return lib.mmJKI mmJKI = ctypes_mmJKI() Cjki = numpy.zeros_like(C) Cjkipp = ptr2ptr(Cjki) mmJKI(App,Bpp,Cjkipp,n) numpy.linalg.norm(C-Cjki)/numpy.linalg.norm(C) # ### Caso JIK # %%file mmJIK.c void mmJIK(double **A, double **B, double **C, int N) { for(int j = 0; j < N; j++) { for(int i = 0; i < N; i++) { for(int k = 0; k < N; k++) { C[i][j] += A[i][k] * B[k][j]; } } } } # + # para generar el object file # ! gcc -c -fpic mmJIK.c # para crear la shared library # ! gcc -shared mmJIK.o -o mmJIK.so # - def ctypes_mmJIK(): # ruta de la shared library lib = ctypes.CDLL('./mmJIK.so') # tipo de dato de los argumentos lib.mmJIK.argtypes = [ numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), numpy.ctypeslib.ndpointer(dtype=numpy.uintp, ndim=1, flags='C'), ctypes.c_int ] # se devuelve la función configurada return lib.mmJIK mmJIK = ctypes_mmJIK() Cjik = numpy.zeros_like(C) Cjikpp = ptr2ptr(Cjik) mmJIK(App,Bpp,Cjikpp,n) numpy.linalg.norm(C-Cjik)/numpy.linalg.norm(C) # ## Mediciones de tiempo ns = 2**numpy.arange(1,10) veces = range(40) lijk = [] likj = [] ljki = [] ljik = [] for n in ns: lijki = [] likji = [] ljkii = [] ljiki = [] for _ in veces: # entradas A = numpy.random.rand(n,n) B = numpy.random.rand(n,n) # referencia C = numpy.dot(A,B) # para la forma IJK Cijk = numpy.zeros_like(C) # para la forma IKJ Cikj = numpy.zeros_like(C) # para la forma JKI Cjki = numpy.zeros_like(C) # para la forma JIK Cjik = numpy.zeros_like(C) # ptr 2 ptr App = ptr2ptr(A) Bpp = ptr2ptr(B) Cijkpp = ptr2ptr(Cijk) Cikjpp = ptr2ptr(Cikj) Cjkipp = ptr2ptr(Cjki) Cjikpp = ptr2ptr(Cjik) # IJK tic = time.time() mmIJK(App,Bpp,Cijkpp,n) toc = time.time() lijki.append(toc-tic) # IKJ tic = time.time() mmIKJ(App,Bpp,Cikjpp,n) toc = time.time() likji.append(toc-tic) # JKI tic = time.time() mmJKI(App,Bpp,Cjkipp,n) toc = time.time() ljkii.append(toc-tic) # JIK tic = time.time() mmJIK(App,Bpp,Cjikpp,n) toc = time.time() ljiki.append(toc-tic) lijk.append(mean(filtro_mediana(lijki,11))) likj.append(mean(filtro_mediana(likji,11))) ljki.append(mean(filtro_mediana(ljkii,11))) ljik.append(mean(filtro_mediana(ljiki,11))) plt.plot(ns,lijk,'r-o',label='IJK') plt.plot(ns,likj,'g-o',label='IKJ') plt.plot(ns,ljki,'b-o',label='JKI') plt.plot(ns,ljik,'c-o',label='JIK') plt.legend() plt.show() # Acercamiento a los dos últimos tamaños plt.plot(ns,lijk,'r-o',label='IJK') plt.plot(ns,likj,'g-o',label='IKJ') plt.plot(ns,ljki,'b-o',label='JKI') plt.plot(ns,ljik,'c-o',label='JIK') plt.xlim([ns[-2],ns[-1]]) plt.legend() plt.show() # Una explicación detallada sobre el producto matriz-matriz se puede encontrar en el primer capítulo del libro "Matrix Computations" de Golub. # ! rm *.o # ! rm *.so # ! rm *.c
efectos-localidad-matriz-matriz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np import pandas as pd import gc import time from contextlib import contextmanager from lightgbm import LGBMClassifier from sklearn.metrics import roc_auc_score, roc_curve from sklearn.model_selection import KFold, StratifiedKFold import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) from sklearn.externals import joblib # %matplotlib inline import seaborn as sn from tqdm import tqdm_notebook as tqdm from scipy.stats import skew, kurtosis, iqr # + #get top 700 features # - pruning=pd.read_csv('feature_importance_lgb1000features_0.01.csv') pruning pruning_object=pruning.groupby(by='feature').agg({'importance':sum}).reset_index() pruning_object pruning_rank=pruning_object.sort_values('importance',ascending=False).reset_index() pruning_rank pruning_rank_700=pruning_rank.loc[0:700,] pruning_rank_700 pruning_rank_700_name=np.array(pruning_rank_700['feature']) pruning_rank_700_name pruning_rank_700_name=np.append(pruning_rank_700_name,('TARGET','SK_ID_CURR','index')) pruning_rank_700_name type(pruning_rank_700_name) # + #read df # - df=pd.read_csv('df_1000.csv') df.shape import category_encoders as ce categorical_columns=['CODE_GENDER', 'EMERGENCYSTATE_MODE', 'FLAG_CONT_MOBILE', 'FLAG_DOCUMENT_3', 'FLAG_DOCUMENT_4', 'FLAG_DOCUMENT_5', 'FLAG_DOCUMENT_6', 'FLAG_DOCUMENT_7', 'FLAG_DOCUMENT_8', 'FLAG_DOCUMENT_9', 'FLAG_DOCUMENT_11', 'FLAG_DOCUMENT_18', 'FLAG_EMAIL', 'FLAG_EMP_PHONE', 'FLAG_MOBIL', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY', 'FLAG_PHONE', 'FLAG_WORK_PHONE', 'FONDKAPREMONT_MODE', 'HOUR_APPR_PROCESS_START', 'HOUSETYPE_MODE', 'LIVE_CITY_NOT_WORK_CITY', 'LIVE_REGION_NOT_WORK_REGION', 'NAME_CONTRACT_TYPE', 'NAME_TYPE_SUITE', 'NAME_INCOME_TYPE', 'NAME_EDUCATION_TYPE', 'NAME_FAMILY_STATUS', 'NAME_HOUSING_TYPE', 'OCCUPATION_TYPE', 'ORGANIZATION_TYPE', 'REG_CITY_NOT_LIVE_CITY', 'REG_CITY_NOT_WORK_CITY', 'REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION', 'WALLSMATERIAL_MODE', 'WEEKDAY_APPR_PROCESS_START'] type(categorical_columns) for col in categorical_columns: df[col] = df[col].astype('category') for col in categorical_columns: df[col] = pd.Categorical(df[col].cat.codes+1) df[categorical_columns].head() for col in categorical_columns: df[col] = df[col].astype(int) df.shape df[categorical_columns].dtypes # + #get new df # - df=df[pruning_rank_700_name] df.shape df.to_csv('df_700.csv', index=False)
homecredit/0-produce_features-700.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ### 1. 数据预处理 # + import keras from keras.datasets import mnist from keras.models import Sequential from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D from keras import backend as K num_classes = 10 img_rows, img_cols = 28, 28 # 通过Keras封装好的API加载MNIST数据。其中trainX就是一个60000 * 28 * 28的数组, # trainY是每一张图片对应的数字。 (trainX, trainY), (testX, testY) = mnist.load_data() # 根据对图像编码的格式要求来设置输入层的格式。 if K.image_data_format() == 'channels_first': trainX = trainX.reshape(trainX.shape[0], 1, img_rows, img_cols) testX = testX.reshape(testX.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: trainX = trainX.reshape(trainX.shape[0], img_rows, img_cols, 1) testX = testX.reshape(testX.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) trainX = trainX.astype('float32') testX = testX.astype('float32') trainX /= 255.0 testX /= 255.0 # 将标准答案转化为需要的格式(one-hot编码)。 trainY = keras.utils.to_categorical(trainY, num_classes) testY = keras.utils.to_categorical(testY, num_classes) # - # ### 2. 通过Keras的API定义卷机神经网络。 # + # 使用Keras API定义模型。 model = Sequential() model.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (5, 5), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(500, activation='relu')) model.add(Dense(num_classes, activation='softmax')) # 定义损失函数、优化函数和评测方法。 model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD(), metrics=['accuracy']) # - # ### 3. 通过Keras的API训练模型并计算在测试数据上的准确率。 # + model.fit(trainX, trainY, batch_size=128, epochs=10, validation_data=(testX, testY)) # 在测试数据上计算准确率。 score = model.evaluate(testX, testY) print('Test loss:', score[0]) print('Test accuracy:', score[1])
TensorFlow Practical Google Deep Learning Framework/1.4.0/Chapter10/3. Keras-CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.2 64-bit (''.venv'': venv)' # name: python3 # --- # + import pandas as pd df = pd.read_csv('koala-survey-sightings-data.csv', encoding='utf-8', parse_dates=[['Date', 'Time']]) df.dropna(subset=['HeightOfKoalaInTree_m', 'HeightOfTree_m'], inplace=True) # Drop entries recorded at the exact same time df.drop_duplicates(subset = ["Date_Time"], inplace=True) # Calculate the percentage of the tree the Koala has climbed df['PercentageHeightClimbed'] = df['HeightOfKoalaInTree_m'] / df['HeightOfTree_m'] df # - df[['Date_Time', 'PercentageHeightClimbed']].sort_values(by=['Date_Time']) import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (15,3) plt.scatter(df['Date_Time'],df['PercentageHeightClimbed']) plt.ylabel("Percentage of tree climbed") plt.xlabel("Date") # + for n in df['SurveyArea'].unique(): print(f"<option value='{n}'>{n}</option>") sorted(df['TreeSpecies'].dropna().unique())
api/data/koala.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Demos: Lecture 19 # + import pennylane as qml from pennylane import numpy as np from itertools import chain from lecture19_helpers import * # - # ## Demo 1: QAOA from scratch # ## Demo 2: using `qml.qaoa`
demos/Lecture19-Demos-Blank.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 0. Make Imports and Set Up Device # All imports are put into this first cell to make managing them easier. Next we detect if a compatible GPU is available in the local machine. If so, it will be used to train our network, which is much faster than using a CPU. # + from shutil import copy from os import listdir, makedirs from os.path import isdir, join, splitext from time import time import torch from torch import nn from torch import optim from torch.utils.data import DataLoader import torchvision from torchvision.datasets import ImageFolder from torchvision import transforms as tr # + def setup_device() -> torch.device: if (not torch.cuda.is_available()): print("No CUDA GPUs found. CPU selected as training device.") return torch.device("cpu") device_id = 0 device = torch.device(f"cuda:{device_id}") count = torch.cuda.device_count() name = torch.cuda.get_device_name(device_id) capability = torch.cuda.get_device_capability(device_id) print(f"{count} CUDA GPUs available. Using {name} with CUDA {capability[0]}.{capability[1]} capability.") return device device = setup_device() # - # ### 1. Divide the Dataset into Categories # The used dataset doesn't contain any explicit category labels. However, the source images are sorted by category, with each of the 17 categories having exactly 80 images. Therefore we can label the images by simply counting them. The code assumes the raw images are placed into _data/jpg_ directory. It splits the data into training, validation and testing subsets, then creates a subdirectory for each subset in _data_ dir. In each of those, another set of subdirs is created - one for each category, named after the category index (0 through 16) and flower images from that category are then copied inside. The number of copied files for each subset is defined in the _subset\_splits_ dict. def is_file_jpg(file_path: str) -> bool: path_root, extension = splitext(file_path) return extension.lower() == ".jpg" # + raw_data_path = "data/jpg" category_count = 17 images_per_category = 80 # How many images in each category should fall into a data subset subset_splits = {"training": 56, "validation": 16, "testing": 8} assert sum(subset_splits.values()) == images_per_category image_list = [file for file in listdir(raw_data_path) if is_file_jpg(file)] assert len(image_list) == category_count * images_per_category image_list[:10] # + def assign_images_to_categories(category_count: int, images_per_category: int, images: list) -> dict: categories = {} for category_index in range(category_count): first_image_in_category = category_index * images_per_category last_image_in_category = first_image_in_category + images_per_category categories[category_index] = images[first_image_in_category:last_image_in_category] return categories category_dict = assign_images_to_categories(category_count, images_per_category, image_list) assert len(category_dict.keys()) == category_count assert len(category_dict[category_count - 1]) == images_per_category # + def split_data_into_subsets(category_dict: dict, subset_splits: dict) -> (dict, dict, dict): train, valid, test = {}, {}, {} first_validation_image = subset_splits["training"] first_testing_image = first_validation_image + subset_splits["validation"] for cat_index, cat_images in category_dict.items(): train[cat_index] = cat_images[:first_validation_image] valid[cat_index] = cat_images[first_validation_image:first_testing_image] test[cat_index] = cat_images[first_testing_image:] return train, valid, test training_images, validation_images, testing_images = split_data_into_subsets(category_dict, subset_splits) assert len(training_images.keys()) == len(validation_images.keys()) == len(testing_images.keys()) == category_count assert len(training_images[10]) == subset_splits["training"] assert len(validation_images[4]) == subset_splits["validation"] assert len(testing_images[16]) == subset_splits["testing"] # + def create_directory(path: str) -> None: if not isdir(path): makedirs(path) def create_subset_data_directories(subset_dict: dict, subset_name: str, raw_data_path: str) -> None: subset_dir_path = join("data", subset_name) create_directory(subset_dir_path) for category_index, category_images in subset_dict.items(): category_path = join(subset_dir_path, str(category_index)) create_directory(category_path) for image in category_images: source_path = join(raw_data_path, image) destination = join(category_path, image) copy(source_path, destination) create_subset_data_directories(training_images, "training", raw_data_path) create_subset_data_directories(validation_images, "validation", raw_data_path) create_subset_data_directories(testing_images, "testing", raw_data_path) # - # ### 2. Pre-process Data and Load into Data Loaders # Before it can be used for training, the data needs to be pre-processed and assigned to DataLoaders, which will be used to provide image batches to the network. ''' The network that will be used in transfer learning has been pre-trained using normalized data. Therefore the same transformation must be performed for the new data, for the training to be effective. Below are the values used for original normalization, taken from Torchvision documentation. ''' normalization_means = [0.485, 0.456, 0.406] normalization_stds = [0.229, 0.224, 0.225] final_image_size = 224 # + ''' These transformations should help the network to learn translation, rotation and size invariance. In turn, this should improve its ability to generalize and reduce over-training. Additionally, we normalize the input data to make it more statistically similar to the data that the base network was pre-trained on. ''' training_transforms = [tr.RandomRotation(degrees=10, expand=True), tr.RandomResizedCrop(size=final_image_size, scale=[0.75, 1.0]), tr.ToTensor(), tr.Normalize(mean=normalization_means, std=normalization_stds)] testing_transforms = [tr.Resize(size=final_image_size + 8), tr.CenterCrop(size=final_image_size), tr.ToTensor(), tr.Normalize(mean=normalization_means, std=normalization_stds)] # + batch_size = 64 def make_data_loader(data_path: str, transforms: list, batch_size: int) -> DataLoader: image_transformation = tr.Compose(transforms) data_set = ImageFolder(root=data_path, transform=image_transformation) should_pin_memory = torch.cuda.is_available() loader = DataLoader(dataset=data_set, shuffle=True, pin_memory=should_pin_memory, batch_size=batch_size) return loader training_loader = make_data_loader("data/training", training_transforms, batch_size) validation_loader = make_data_loader("data/validation", training_transforms, batch_size) testing_loader = make_data_loader("data/testing", testing_transforms, batch_size) # - # ### 3. Build the Network # According to comparisons (such as https://learnopencv.com/wp-content/uploads/2019/06/Pre-Trained-Model-Comparison.png), # ResNet50 is a powerful image classification model that offers good balance between training time and accuracy. Torchvision offers models like ResNet50 already pre-trained on the ImageNet dataset. This dataset contains flowers among its many categories. This means that the pre-trained ResNet already knows how to recognize flower features. To use it in our problem we just need to replace its classification layers (the _fc_ module), so that they classify images as the categories we are interested in. After a short fine-tune training, this new model should be able to correctly classify our 17 flower categories. This technique is called transfer learning, as it "transfers" the knowledge acquired in one problem domain into another. pre_trained_model = torchvision.models.resnet50(pretrained=True) pre_trained_model pre_trained_feature_count = pre_trained_model.fc.in_features pre_trained_feature_count ''' This custom classifier will be used to replace the original ResNet's classifier layers. This way, we can adapt the model to classify our 17 flower categories, while still using its pre-trained feature extraction layers. The classifier will have three fully-connected layers, as this number is often sufficient for good accuracy. ''' class CustomClassifier(nn.Module): def __init__(self, input_features: int, hidden1_size: int, hidden2_size: int, output_categories: int, dropout: float) -> None: super().__init__() self.hidden1 = nn.Linear(input_features, hidden1_size) self.hidden2 = nn.Linear(hidden1_size, hidden2_size) self.output = nn.Linear(hidden2_size, output_categories) self.dropout = nn.Dropout(p=dropout) def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: x = self.dropout(nn.functional.relu(self.hidden1(input_tensor))) x = self.dropout(nn.functional.relu(self.hidden2(x))) raw_category_scores = self.output(x) return raw_category_scores # + ''' For this particular problem we disable training of ResNet's feature detection layers altogether. Since they already should be able to handle flower images, there is little point in training them further, and we can save a lot of compute by freezing these layers. We will only train the new fully-connected classification layers. Layer sizes for the classifier are chosen more or less arbitrarily, based on what worked in previous similar tasks. ''' def disable_feature_detector_training(network: nn.Module) -> None: for parameter in network.parameters(): parameter.requires_grad = False def prepare_network_for_transfer_learning(network: nn.Module) -> nn.Module: disable_feature_detector_training(network) custom_classifier = CustomClassifier(pre_trained_feature_count, 1024, 512, category_count, dropout=0.05) network.fc = custom_classifier network = network.to(device) return network network = prepare_network_for_transfer_learning(pre_trained_model) # - # ### 4. Train the Network # The network is now ready to be trained. Since this is just a simple proof of concept, we will not use much logging of the process, nor perform a hyperparameter search. Hyperparameter values are based on solutions for similar tasks. For simplicity, we will also not save the model or make checkpoints. def run_training_step(network: nn.Module, training_loader: DataLoader, device: torch.device, optimizer, criterion) -> float: network.train() avg_loss = 0 for images, labels in training_loader: images = images.to(device) labels = labels.to(device) optimizer.zero_grad() raw_output = network.forward(images) loss = criterion(input=raw_output, target=labels) loss.backward() optimizer.step() avg_loss += loss.item() avg_loss /= len(training_loader) return avg_loss # + def calculate_accuracy(raw_output: torch.Tensor, labels: torch.Tensor) -> float: class_probabilities = nn.functional.softmax(raw_output, dim=1) predicted_classes = torch.topk(input=class_probabilities, k=1, dim=1)[1] resized_labels = labels.view(predicted_classes.shape[0], -1) prediction_matches = predicted_classes == resized_labels batch_average_accuracy = torch.mean(prediction_matches.type(torch.FloatTensor)) return batch_average_accuracy def run_evaluation_step(network: nn.Module, loader: DataLoader, device: torch.device, criterion) -> (float, float): network.eval() avg_loss = 0 avg_accuracy = 0 with torch.no_grad(): for images, labels in loader: images = images.to(device) labels = labels.to(device) raw_output = network.forward(images) loss = criterion(input=raw_output, target=labels) accuracy = calculate_accuracy(raw_output, labels) avg_loss += loss avg_accuracy += accuracy avg_loss /= len(loader) avg_accuracy /= len(loader) return avg_loss, avg_accuracy # - def train_network(network: nn.Module, training_loader: DataLoader, validation_loader: DataLoader, device: torch.device) -> None: # Hyperparameters based on solutions for similar tasks; Adam is quite fast, good for prototyping; often needs low learning rate. # Criterion based on problem type (multiclass classification). learn_rate = 0.0003 optimizer = optim.Adam(params=network.fc.parameters(), lr=learn_rate) criterion = nn.CrossEntropyLoss() epoch_count = 10 start_time = time() for epoch in range(epoch_count): print(f"Epoch {epoch+1} / {epoch_count}") train_loss = run_training_step(network, training_loader, device, optimizer, criterion) print(f"Training loss: {train_loss}") valid_loss, valid_accuracy = run_evaluation_step(network, validation_loader, device, criterion) print(f"Validation loss: {valid_loss}") print(f"Validation accuracy: {valid_accuracy * 100:.2f}%\n") print(f"Total training duration: {(time() - start_time)/60:.2f} minutes") start_time = time() test_loss, test_accuracy = run_evaluation_step(network, testing_loader, device, criterion) print("\nTest Results") print(f"Test loss: {test_loss}") print(f"Test accuracy: {test_accuracy * 100:.2f}%") print(f"Test duration: {(time() - start_time)/60:.2f} minutes") train_network(network, training_loader, validation_loader, device) # ### 5. Performance Assessment # The performance of the final model is satisfactory. After only around 2 minutes of training on a consumer-grade PC, it reaches validation top-1 accuracy of nearly 90%, which should be considered very good. The inference on the test set (136 images not seen during training) takes only around 1 second and results in ~80% average top-1 accuracy. The test accuracy is markedly lower than that achieved for the validation set, but is still decent. The inference speed (lets assume ~10ms per image) is excellent, considering this is a completely un-optimized proof-of-concept model and there was no hyperparameter tuning. # ### 6. Answers to Task Questions # #### How would you share your findings with the client? # # I would prepare a short presentation describing the network’s performance in the context of the proposed application. Key use cases would be identified and analyzed, to see if the performance would be acceptable in these scenarios. I would also like to make a small live demo application using the prototype, such as a simple script running classification on some input images (e.g. manually collected from the Web) showing the model’s speed and accuracy. # # #### What would your comments be to a colleague building the app, regarding the model? # # I would describe the model’s computational and memory requirements for training and inference, as well as the properties of expected input and output data. We could discuss options for improving model computational performance or accuracy and other possible changes in its implementation. # # #### The amount of time you have spent on this task. # # I’ve spend roughly an entire day writing the code and documentation. A lot of time was consumed by implementing a clean solution to divide the data set into training, validation and test subsets. Some documentation reading was also necessary. All in all, I’d say it was about 8 hours of work time in total.
flower_exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width="300" alt="cognitiveclass.ai logo" /> # </center> # # # Working with a real world data-set using SQL and Python # # Estimated time needed: **30** minutes # # ## Objectives # # After completing this lab you will be able to: # # - Understand the dataset for Chicago Public School level performance # - Store the dataset in an Db2 database on IBM Cloud instance # - Retrieve metadata about tables and columns and query data from mixed case columns # - Solve example problems to practice your SQL skills including using built-in database functions # # ## Chicago Public Schools - Progress Report Cards (2011-2012) # # The city of Chicago released a dataset showing all school level performance data used to create School Report Cards for the 2011-2012 school year. The dataset is available from the Chicago Data Portal: [https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t](https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) # # This dataset includes a large number of metrics. Start by familiarizing yourself with the types of metrics in the database: [https://data.cityofchicago.org/api/assets/AAD41A13-BE8A-4E67-B1F5-86E711E09D5F?download=true](https://data.cityofchicago.org/api/assets/AAD41A13-BE8A-4E67-B1F5-86E711E09D5F?download=true&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) # # **NOTE**: # # Do not download the dataset directly from City of Chicago portal. Instead download a static copy which is a more database friendly version from this <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/FinalModule_edX/data/Chicago_Public_Schools_-_Progress_Report_Cards__2011-2012-v3.csv">link</a>. # # Now review some of its contents. # # ### Store the dataset in a Table # # In many cases the dataset to be analyzed is available as a .CSV (comma separated values) file, perhaps on the internet. To analyze the data using SQL, it first needs to be stored in the database. # # While it is easier to read the dataset into a Pandas dataframe and then PERSIST it into the database as we saw in the previous lab, it results in mapping to default datatypes which may not be optimal for SQL querying. For example a long textual field may map to a CLOB instead of a VARCHAR. # # Therefore, **it is highly recommended to manually load the table using the database console LOAD tool, as indicated in Week 2 Lab 1 Part II**. The only difference with that lab is that in Step 5 of the instructions you will need to click on create "(+) New Table" and specify the name of the table you want to create and then click "Next". # # ##### Now open the Db2 console, open the LOAD tool, Select / Drag the .CSV file for the CHICAGO PUBLIC SCHOOLS dataset and load the dataset into a new table called **SCHOOLS**. # # <a href="https://cognitiveclass.ai"><img src = "https://ibm.box.com/shared/static/uc4xjh1uxcc78ks1i18v668simioz4es.jpg"></a> # # ### Connect to the database # # Let us now load the ipython-sql extension and establish a connection with the database # # %load_ext sql # Enter the connection string for your Db2 on Cloud database instance below # # %sql ibm_db_sa://my-username:my-password@my-hostname:my-port/my-db-name # %sql ibm_db_sa:// # ### Query the database system catalog to retrieve table metadata # # ##### You can verify that the table creation was successful by retrieving the list of all tables in your schema and checking whether the SCHOOLS table was created # # + # type in your query to retrieve list of all tables in the database for your db2 schema (username) #In Db2 the system catalog table called SYSCAT.TABLES contains the table metadata # %sql select TABSCHEMA, TABNAME, CREATE_TIME from SYSCAT.TABLES where TABSCHEMA='YOUR-DB2-USERNAME' ''' or, you can retrieve list of all tables where the schema name is not one of the system created ones: %sql select TABSCHEMA, TABNAME, CREATE_TIME from SYSCAT.TABLES \ where TABSCHEMA not in ('SYSIBM', 'SYSCAT', 'SYSSTAT', 'SYSIBMADM', 'SYSTOOLS', 'SYSPUBLIC') or, just query for a specifc table that you want to verify exists in the database %sql select * from SYSCAT.TABLES where TABNAME = 'SCHOOLS' ''' # - # <details><summary>Click here for the solution</summary> # # ```python # #In Db2 the system catalog table called SYSCAT.TABLES contains the table metadata # # # %sql select TABSCHEMA, TABNAME, CREATE_TIME from SYSCAT.TABLES where TABSCHEMA='YOUR-DB2-USERNAME' # # or, you can retrieve list of all tables where the schema name is not one of the system created ones: # # # %sql select TABSCHEMA, TABNAME, CREATE_TIME from SYSCAT.TABLES \ # # where TABSCHEMA not in ('SYSIBM', 'SYSCAT', 'SYSSTAT', 'SYSIBMADM', 'SYSTOOLS', 'SYSPUBLIC') # # or, just query for a specifc table that you want to verify exists in the database # # %sql select * from SYSCAT.TABLES where TABNAME = 'SCHOOLS' # ``` # # </details> # # ### Query the database system catalog to retrieve column metadata # # ##### The SCHOOLS table contains a large number of columns. How many columns does this table have? # # + # type in your query to retrieve the number of columns in the SCHOOLS table #In Db2 the system catalog table called SYSCAT.COLUMNS contains the column metadata # %sql select count(*) from SYSCAT.COLUMNS where TABNAME = 'SCHOOLS' # - # <details><summary>Click here for the solution</summary> # # ```python # #In Db2 the system catalog table called SYSCAT.COLUMNS contains the column metadata # # # %sql select count(*) from SYSCAT.COLUMNS where TABNAME = 'SCHOOLS' # # ``` # # </details> # # Now retrieve the the list of columns in SCHOOLS table and their column type (datatype) and length. # # + # type in your query to retrieve all column names in the SCHOOLS table along with their datatypes and length # %sql select COLNAME, TYPENAME, LENGTH from SYSCAT.COLUMNS where TABNAME = 'SCHOOLS' ''' or %sql select distinct(NAME), COLTYPE, LENGTH from SYSIBM.SYSCOLUMNS where TBNAME = 'SCHOOLS' ''' # - # <details><summary>Click here for the solution</summary> # # ```python # # %sql select COLNAME, TYPENAME, LENGTH from SYSCAT.COLUMNS where TABNAME = 'SCHOOLS' # # or # # # %sql select distinct(NAME), COLTYPE, LENGTH from SYSIBM.SYSCOLUMNS where TBNAME = 'SCHOOLS' # # ``` # # </details> # # ### Questions # # 1. Is the column name for the "SCHOOL ID" attribute in upper or mixed case? # 2. What is the name of "Community Area Name" column in your table? Does it have spaces? # 3. Are there any columns in whose names the spaces and paranthesis (round brackets) have been replaced by the underscore character "_"? # # ## Problems # # ### Problem 1 # # ##### How many Elementary Schools are in the dataset? # # + # %sql select count(*) from SCHOOLS where "Elementary, Middle, or High School" = 'ES' #Correct answer: 462 # - # <details><summary>Click here for the hint</summary> # # ```python # Which column specifies the school type e.g. 'ES', 'MS', 'HS'? # # Does the column name have mixed case, spaces or other special characters? # If so, ensure you use double quotes around the "Name of the Column" # # ``` # # </details> # # <details><summary>Click here for the solution</summary> # # ```python # # %sql select count(*) from SCHOOLS where "Elementary, Middle, or High School" = 'ES' # # Correct answer: 462 # # ``` # # </details> # # ### Problem 2 # # ##### What is the highest Safety Score? # # + jupyter={"outputs_hidden": true} # Use the MAX() function # %sql select MAX(Safety_Score) AS MAX_SAFETY_SCORE from SCHOOLS #Correct answer: 99 # - # <details><summary>Click here for the solution</summary> # # ```python # # Use the MAX() function # # # %sql select MAX(Safety_Score) AS MAX_SAFETY_SCORE from SCHOOLS # # Correct answer: 99 # # ``` # # </details> # # ### Problem 3 # # ##### Which schools have highest Safety Score? # # + jupyter={"outputs_hidden": true} # In the previous problem we found out that the highest Safety Score is 99, so we can use that as an input in the where clause: # %sql select Name_of_School, Safety_Score from SCHOOLS where Safety_Score = 99 ''' or, a better way: %sql select Name_of_School, Safety_Score from SCHOOLS where \ Safety_Score= (select MAX(Safety_Score) from SCHOOLS) ''' #Correct answer: several schools with with Safety Score of 99. # - # <details><summary>Click here for the solution</summary> # # ```python # # In the previous problem we found out that the highest Safety Score is 99, so we can use that as an input in the where clause: # # # %sql select Name_of_School, Safety_Score from SCHOOLS where Safety_Score = 99 # # or, a better way: # # # %sql select Name_of_School, Safety_Score from SCHOOLS where \ # # Safety_Score= (select MAX(Safety_Score) from SCHOOLS) # # # Correct answer: several schools with with Safety Score of 99. # # ``` # # </details> # # ### Problem 4 # # ##### What are the top 10 schools with the highest "Average Student Attendance"? # # + jupyter={"outputs_hidden": true} # %sql select Name_of_School, Average_Student_Attendance from SCHOOLS \ # order by Average_Student_Attendance desc nulls last limit 10 # - # <details><summary>Click here for the solution</summary> # # ```python # # %sql select Name_of_School, Average_Student_Attendance from SCHOOLS \ # # order by Average_Student_Attendance desc nulls last limit 10 # # ``` # # </details> # # ### Problem 5 # # ##### Retrieve the list of 5 Schools with the lowest Average Student Attendance sorted in ascending order based on attendance # # + jupyter={"outputs_hidden": true} # %sql SELECT Name_of_School, Average_Student_Attendance \ # from SCHOOLS \ # order by Average_Student_Attendance \ # fetch first 5 rows only # - # <details><summary>Click here for the solution</summary> # # ```python # # %sql SELECT Name_of_School, Average_Student_Attendance \ # # from SCHOOLS \ # # order by Average_Student_Attendance \ # # fetch first 5 rows only # # ``` # # </details> # # ### Problem 6 # # ##### Now remove the '%' sign from the above result set for Average Student Attendance column # # + jupyter={"outputs_hidden": true} #Use the REPLACE() function to replace '%' with '' #See documentation for this function at: https://www.ibm.com/support/knowledgecenter/en/SSEPGG_10.5.0/com.ibm.db2.luw.sql.ref.doc/doc/r0000843.html # %sql SELECT Name_of_School, REPLACE(Average_Student_Attendance, '%', '') \ # from SCHOOLS \ # order by Average_Student_Attendance \ # fetch first 5 rows only # - # <details><summary>Click here for the solution</summary> # # ```python # #Use the REPLACE() function to replace '%' with '' # #See documentation for this function at: # https://www.ibm.com/support/knowledgecenter/en/SSEPGG_10.5.0/com.ibm.db2.luw.sql.ref.doc/doc/r0000843.html # # # %sql SELECT Name_of_School, REPLACE(Average_Student_Attendance, '%', '') \ # # from SCHOOLS \ # # order by Average_Student_Attendance \ # # fetch first 5 rows only # # ``` # # </details> # # ### Problem 7 # # ##### Which Schools have Average Student Attendance lower than 70%? # # + jupyter={"outputs_hidden": true} # %sql SELECT Name_of_School, Average_Student_Attendance \ # from SCHOOLS \ # where CAST ( REPLACE(Average_Student_Attendance, '%', '') AS DOUBLE ) < 70 \ # order by Average_Student_Attendance ''' or, %sql SELECT Name_of_School, Average_Student_Attendance \ from SCHOOLS \ where DECIMAL ( REPLACE(Average_Student_Attendance, '%', '') ) < 70 \ order by Average_Student_Attendance ''' # - # <details><summary>Click here for the hint</summary> # # ```python # The datatype of the "Average_Student_Attendance" column is varchar. # So you cannot use it as is in the where clause for a numeric comparison. # First use the CAST() function to cast it as a DECIMAL or DOUBLE # e.g. CAST("Column_Name" as DOUBLE) # or simply: DECIMAL("Column_Name") # # Don't forget the '%' age sign needs to be removed before casting # # ``` # # </details> # # <details><summary>Click here for the solution</summary> # # ```python # # %sql SELECT Name_of_School, Average_Student_Attendance \ # # from SCHOOLS \ # # where CAST ( REPLACE(Average_Student_Attendance, '%', '') AS DOUBLE ) < 70 \ # # order by Average_Student_Attendance # # or, # # # %sql SELECT Name_of_School, Average_Student_Attendance \ # # from SCHOOLS \ # # where DECIMAL ( REPLACE(Average_Student_Attendance, '%', '') ) < 70 \ # # order by Average_Student_Attendance # # ``` # # </details> # # ### Problem 8 # # ##### Get the total College Enrollment for each Community Area # # + jupyter={"outputs_hidden": true} # %sql select Community_Area_Name, sum(College_Enrollment) AS TOTAL_ENROLLMENT \ # from SCHOOLS \ # group by Community_Area_Name # - # <details><summary>Click here for the hint</summary> # # ```python # Verify the exact name of the Enrollment column in the database # Use the SUM() function to add up the Enrollments for each Community Area # # Don't forget to group by the Community Area # # ``` # # </details> # # <details><summary>Click here for the solution</summary> # # ```python # # %sql select Community_Area_Name, sum(College_Enrollment) AS TOTAL_ENROLLMENT \ # # from SCHOOLS \ # # group by Community_Area_Name # # ``` # # </details> # # ### Problem 9 # # ##### Get the 5 Community Areas with the least total College Enrollment sorted in ascending order # # + jupyter={"outputs_hidden": true} # Order the previous query and limit the number of rows you fetch # %sql select Community_Area_Name, sum(College_Enrollment) AS TOTAL_ENROLLMENT \ # from SCHOOLS \ # group by Community_Area_Name \ # order by TOTAL_ENROLLMENT asc \ # fetch first 5 rows only # - # <details><summary>Click here for the solution</summary> # # ```python # # Order the previous query and limit the number of rows you fetch # # # %sql select Community_Area_Name, sum(College_Enrollment) AS TOTAL_ENROLLMENT \ # # from SCHOOLS \ # # group by Community_Area_Name \ # # order by TOTAL_ENROLLMENT asc \ # # fetch first 5 rows only # # ``` # # </details> # # ### Problem 10 # # ##### Get the hardship index for the community area which has College Enrollment of 4368 # # + # For this solution to work the CHICAGO_SOCIOECONOMIC_DATA table as created in the last lab of Week 3 should already exist # %%sql select hardship_index from chicago_socioeconomic_data CD, schools CPS where CD.ca = CPS.community_area_number and college_enrollment = 4368 # - # <details><summary>Click here for the solution</summary> # # ```python # # For this solution to work the CHICAGO_SOCIOECONOMIC_DATA table as created in the last lab of Week 3 should already exist # # # %%sql # select hardship_index # from chicago_socioeconomic_data CD, schools CPS # where CD.ca = CPS.community_area_number # and college_enrollment = 4368 # # ``` # # </details> # # ### Problem 11 # # ##### Get the hardship index for the community area which has the highest value for College Enrollment # # + # For this solution to work the CHICAGO_SOCIOECONOMIC_DATA table as created in the last lab of Week 3 should already exist # %sql select ca, community_area_name, hardship_index from chicago_socioeconomic_data \ # where ca in \ # ( select community_area_number from schools order by college_enrollment desc limit 1 ) # - # <details><summary>Click here for the solution</summary> # # ```python # # For this solution to work the CHICAGO_SOCIOECONOMIC_DATA table as created in the last lab of Week 3 should already exist # # # %sql select ca, community_area_name, hardship_index from chicago_socioeconomic_data \ # # where ca in \ # # ( select community_area_number from schools order by college_enrollment desc limit 1 ) # # # ``` # # </details> # # ## Summary # # ##### In this lab you learned how to work with a real word dataset using SQL and Python. You learned how to query columns with spaces or special characters in their names and with mixed case names. You also used built in database functions and practiced how to sort, limit, and order result sets, as well as used sub-queries and worked with multiple tables. # # ## Author # # <a href="https://www.linkedin.com/in/ravahuja/" target="_blank"><NAME></a> # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ---------- | ---------------------------------- | # | 2020-08-28 | 2.0 | Lavanya | Moved lab to course repo in GitLab | # # <hr> # # ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> #
IBM Professional Certificates/Databases and SQL for Data Science with Python/4-1-1-RealDataPractice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Welcome! (Beta) # Welcome to the Arviz resources repository. This repository is intended to be a collection reading material, examples, and other educational material related to explaining the functionality and output of the ArviZ python package. # ## What is Arviz # <img src="https://raw.githubusercontent.com/arviz-devs/arviz/master/doc/logo/ArviZ.png" alt="Arviz" width="500"/> # # ArviZ is is a Python package for exploratory analysis of Bayesian models. In particular it takes the various outputs from Bayesian computational libraries, such as stan, PyMC3, and others, and provides a common method to plot and understand the results. # # More information about how to use ArviZ can be found in the docs. https://arviz-devs.github.io/arviz/index.html # ## Intended Audience # While ArviZ strives to provide an easy way for Bayesian practioners to create exploratory plots and statistics, the package and docs alone do not explain how to **use** the exploratory plots and statistics. This repository is aimed to fill that gap, and serve as more in depth explanaton to all the functionality in ArviZ. Ideally it will be a handy reference for Bayesian practitioners that are using probabilistic programming but may be unfamiliar with the various methods of model criticism. # # For those interested in how to construct models either PyMC3 or Pystan, both libraries documentation numerous example. # * [PyMC3](https://docs.pymc.io/) # * [PyStan](https://pystan.readthedocs.io/en/latest/) # * [CmdStan](http://mc-stan.org/users/interfaces/cmdstan) # # If interested in Bayesian Statistical methods in general the following resources are recommended # * [Bayesian Analysis With Python](https://www.amazon.com/Bayesian-Analysis-Python-Introduction-probabilistic-ebook/dp/B07HHBCR9G) # * [Bayesian Methods for Hackers](https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers) # * [Statistical Rethinking](https://www.amazon.com/Statistical-Rethinking-Bayesian-Examples-Chapman/dp/1482253445) #
archive/An_Introduction/Preface.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="copyright" # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="title" # # Vertex AI client library: AutoML text sentiment analysis model for batch prediction # # <table align="left"> # <td> # <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/community/gapic/automl/showcase_automl_text_sentiment_analysis_batch.ipynb"> # <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab # </a> # </td> # <td> # <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/community/gapic/automl/showcase_automl_text_sentiment_analysis_batch.ipynb"> # <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> # View on GitHub # </a> # </td> # </table> # <br/><br/><br/> # + [markdown] id="overview:automl" # ## Overview # # # This tutorial demonstrates how to use the Vertex AI Python client library to create text sentiment analysis models and do batch prediction using Google Cloud's [AutoML](https://cloud.google.com/ai-platform-unified/docs/start/automl-users). # + [markdown] id="dataset:claritin,tst" # ### Dataset # # The dataset used for this tutorial is the [Crowdflower Claritin-Twitter dataset](https://data.world/crowdflower/claritin-twitter) from [data.world Datasets](https://data.world). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. # + [markdown] id="objective:automl,training,batch_prediction" # ### Objective # # In this tutorial, you create an AutoML text sentiment analysis model from a Python script, and then do a batch prediction using the Vertex AI client library. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console. # # The steps performed include: # # - Create a Vertex AI `Dataset` resource. # - Train the model. # - View the model evaluation. # - Make a batch prediction. # # There is one key difference between using batch prediction and using online prediction: # # * Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time. # # * Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready. # + [markdown] id="costs" # ### Costs # # This tutorial uses billable components of Google Cloud (GCP): # # * Vertex AI # * Cloud Storage # # Learn about [Vertex AI # pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storage # pricing](https://cloud.google.com/storage/pricing), and use the [Pricing # Calculator](https://cloud.google.com/products/calculator/) # to generate a cost estimate based on your projected usage. # + [markdown] id="install_aip" # ## Installation # # Install the latest version of Vertex AI client library. # + id="install_aip" import sys if "google.colab" in sys.modules: USER_FLAG = "" else: USER_FLAG = "--user" # ! pip3 install -U google-cloud-aiplatform $USER_FLAG # + [markdown] id="install_storage" # Install the latest GA version of *google-cloud-storage* library as well. # + id="install_storage" # ! pip3 install -U google-cloud-storage $USER_FLAG # + [markdown] id="restart" # ### Restart the kernel # # Once you've installed the Vertex AI client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages. # + id="restart" import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) # + [markdown] id="before_you_begin" # ## Before you begin # # ### GPU runtime # # *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU** # # ### Set up your Google Cloud project # # **The following steps are required, regardless of your notebook environment.** # # 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs. # # 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project) # # 3. [Enable the Vertex AI APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component) # # 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Vertex AI Notebooks. # # 5. Enter your project ID in the cell below. Then run the cell to make sure the # Cloud SDK uses the right project for all the commands in this notebook. # # **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. # + id="set_project_id" PROJECT_ID = "[your-project-id]" # @param {type:"string"} # + id="autoset_project_id" if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud # shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) # + id="set_gcloud_project_id" # ! gcloud config set project $PROJECT_ID # + [markdown] id="region" # #### Region # # You can also change the `REGION` variable, which is used for operations # throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. # # - Americas: `us-central1` # - Europe: `europe-west4` # - Asia Pacific: `asia-east1` # # You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. For the latest support per region, see the [Vertex AI locations documentation](https://cloud.google.com/ai-platform-unified/docs/general/locations) # + id="region" REGION = "us-central1" # @param {type: "string"} # + [markdown] id="timestamp" # #### Timestamp # # If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. # + id="timestamp" from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") # + [markdown] id="gcp_authenticate" # ### Authenticate your Google Cloud account # # **If you are using Vertex AI Notebooks**, your environment is already authenticated. Skip this step. # # **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. # # **Otherwise**, follow these steps: # # In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page. # # **Click Create service account**. # # In the **Service account name** field, enter a name, and click **Create**. # # In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**. # # Click Create. A JSON file that contains your key downloads to your local environment. # # Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. # + id="gcp_authenticate" import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on AI Platform, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): # %env GOOGLE_APPLICATION_CREDENTIALS '' # + [markdown] id="bucket:batch_prediction" # ### Create a Cloud Storage bucket # # **The following steps are required, regardless of your notebook environment.** # # This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket. # # Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. # + id="bucket" BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} # + id="autoset_bucket" if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP # + [markdown] id="create_bucket" # **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. # + id="create_bucket" # ! gsutil mb -l $REGION $BUCKET_NAME # + [markdown] id="validate_bucket" # Finally, validate access to your Cloud Storage bucket by examining its contents: # + id="validate_bucket" # ! gsutil ls -al $BUCKET_NAME # + [markdown] id="setup_vars" # ### Set up variables # # Next, set up some variables used throughout the tutorial. # ### Import libraries and define constants # + [markdown] id="import_aip" # #### Import Vertex AI client library # # Import the Vertex AI client library into our Python environment. # + id="import_aip" import os import sys import time import google.cloud.aiplatform_v1 as aip from google.protobuf import json_format from google.protobuf.json_format import MessageToJson, ParseDict from google.protobuf.struct_pb2 import Struct, Value # + [markdown] id="aip_constants" # #### Vertex AI constants # # Setup up the following constants for Vertex AI: # # - `API_ENDPOINT`: The Vertex AI API service endpoint for dataset, model, job, pipeline and endpoint services. # - `PARENT`: The Vertex AI location root path for dataset, model, job, pipeline and endpoint resources. # + id="aip_constants" # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex AI location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION # + [markdown] id="automl_constants" # #### AutoML constants # # Set constants unique to AutoML datasets and training: # # - Dataset Schemas: Tells the `Dataset` resource service which type of dataset it is. # - Data Labeling (Annotations) Schemas: Tells the `Dataset` resource service how the data is labeled (annotated). # - Dataset Training Schemas: Tells the `Pipeline` resource service the task (e.g., classification) to train the model for. # + id="automl_constants:tst" # Text Dataset type DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/text_1.0.0.yaml" # Text Labeling type LABEL_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_sentiment_io_format_1.0.0.yaml" # Text Training task TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_text_sentiment_1.0.0.yaml" # + [markdown] id="accelerators:prediction" # #### Hardware Accelerators # # Set the hardware accelerators (e.g., GPU), if any, for prediction. # # Set the variable `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify: # # (aip.AcceleratorType.NVIDIA_TESLA_K80, 4) # # For GPU, available accelerators include: # - aip.AcceleratorType.NVIDIA_TESLA_K80 # - aip.AcceleratorType.NVIDIA_TESLA_P100 # - aip.AcceleratorType.NVIDIA_TESLA_P4 # - aip.AcceleratorType.NVIDIA_TESLA_T4 # - aip.AcceleratorType.NVIDIA_TESLA_V100 # # Otherwise specify `(None, None)` to use a container image to run on a CPU. # + id="accelerators:prediction" if os.getenv("IS_TESTING_DEPOLY_GPU"): DEPLOY_GPU, DEPLOY_NGPU = ( aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv("IS_TESTING_DEPOLY_GPU")), ) else: DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1) # + [markdown] id="container:automl" # #### Container (Docker) image # # For AutoML batch prediction, the container image for the serving binary is pre-determined by the Vertex AI prediction service. More specifically, the service will pick the appropriate container for the model depending on the hardware accelerator you selected. # + [markdown] id="machine:prediction" # #### Machine Type # # Next, set the machine type to use for prediction. # # - Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VM you will use for prediction. # - `machine type` # - `n1-standard`: 3.75GB of memory per vCPU. # - `n1-highmem`: 6.5GB of memory per vCPU # - `n1-highcpu`: 0.9 GB of memory per vCPU # - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \] # # *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs* # + id="machine:prediction" if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) # + [markdown] id="tutorial_start:automl" # # Tutorial # # Now you are ready to start creating your own AutoML text sentiment analysis model. # + [markdown] id="clients:automl,batch_prediction" # ## Set up clients # # The Vertex AI client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex AI server. # # You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. # # - Dataset Service for `Dataset` resources. # - Model Service for `Model` resources. # - Pipeline Service for training. # - Job Service for batch prediction and custom training. # + id="clients:automl,batch_prediction" # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_dataset_client(): client = aip.DatasetServiceClient(client_options=client_options) return client def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_pipeline_client(): client = aip.PipelineServiceClient(client_options=client_options) return client def create_job_client(): client = aip.JobServiceClient(client_options=client_options) return client clients = {} clients["dataset"] = create_dataset_client() clients["model"] = create_model_client() clients["pipeline"] = create_pipeline_client() clients["job"] = create_job_client() for client in clients.items(): print(client) # + [markdown] id="create_aip_dataset" # ## Dataset # # Now that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it. # # ### Create `Dataset` resource instance # # Use the helper function `create_dataset` to create the instance of a `Dataset` resource. This function does the following: # # 1. Uses the dataset client service. # 2. Creates an Vertex AI `Dataset` resource (`aip.Dataset`), with the following parameters: # - `display_name`: The human-readable name you choose to give it. # - `metadata_schema_uri`: The schema for the dataset type. # 3. Calls the client dataset service method `create_dataset`, with the following parameters: # - `parent`: The Vertex AI location root path for your `Database`, `Model` and `Endpoint` resources. # - `dataset`: The Vertex AI dataset object instance you created. # 4. The method returns an `operation` object. # # An `operation` object is how Vertex AI handles asynchronous calls for long running operations. While this step usually goes fast, when you first use it in your project, there is a longer delay due to provisioning. # # You can use the `operation` object to get status on the operation (e.g., create `Dataset` resource) or to cancel the operation, by invoking an operation method: # # | Method | Description | # | ----------- | ----------- | # | result() | Waits for the operation to complete and returns a result object in JSON format. | # | running() | Returns True/False on whether the operation is still running. | # | done() | Returns True/False on whether the operation is completed. | # | canceled() | Returns True/False on whether the operation was canceled. | # | cancel() | Cancels the operation (this may take up to 30 seconds). | # + id="create_aip_dataset" TIMEOUT = 90 def create_dataset(name, schema, labels=None, timeout=TIMEOUT): start_time = time.time() try: dataset = aip.Dataset( display_name=name, metadata_schema_uri=schema, labels=labels ) operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset) print("Long running operation:", operation.operation.name) result = operation.result(timeout=TIMEOUT) print("time:", time.time() - start_time) print("response") print(" name:", result.name) print(" display_name:", result.display_name) print(" metadata_schema_uri:", result.metadata_schema_uri) print(" metadata:", dict(result.metadata)) print(" create_time:", result.create_time) print(" update_time:", result.update_time) print(" etag:", result.etag) print(" labels:", dict(result.labels)) return result except Exception as e: print("exception:", e) return None result = create_dataset("claritin-" + TIMESTAMP, DATA_SCHEMA) # + [markdown] id="dataset_id:result" # Now save the unique dataset identifier for the `Dataset` resource instance you created. # + id="dataset_id:result" # The full unique ID for the dataset dataset_id = result.name # The short numeric ID for the dataset dataset_short_id = dataset_id.split("/")[-1] print(dataset_id) # + [markdown] id="data_preparation:text,u_dataset" # ### Data preparation # # The Vertex AI `Dataset` resource for text has a couple of requirements for your text data. # # - Text examples must be stored in a CSV or JSONL file. # + [markdown] id="data_import_format:tst,u_dataset,csv" # #### CSV # # For text sentiment analysis, the CSV file has a few requirements: # # - No heading. # - First column is the text example or Cloud Storage path to text file. # - Second column the label (i.e., sentiment). # - Third column is the maximum sentiment value. For example, if the range is 0 to 3, then the maximum value is 3. # + [markdown] id="import_file:u_dataset,csv" # #### Location of Cloud Storage training data. # # Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage. # + id="import_file:claritin,csv,tst" IMPORT_FILE = "gs://cloud-samples-data/language/claritin.csv" SENTIMENT_MAX = 4 # + [markdown] id="quick_peek:csv" # #### Quick peek at your data # # You will use a version of the Crowdflower Claritin-Twitter dataset that is stored in a public Cloud Storage bucket, using a CSV index file. # # Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows. # + id="quick_peek:csv" if "IMPORT_FILES" in globals(): FILE = IMPORT_FILES[0] else: FILE = IMPORT_FILE count = ! gsutil cat $FILE | wc -l print("Number of Examples", int(count[0])) print("First 10 rows") # ! gsutil cat $FILE | head # + [markdown] id="import_data" # ### Import data # # Now, import the data into your Vertex AI Dataset resource. Use this helper function `import_data` to import the data. The function does the following: # # - Uses the `Dataset` client. # - Calls the client method `import_data`, with the following parameters: # - `name`: The human readable name you give to the `Dataset` resource (e.g., claritin). # - `import_configs`: The import configuration. # # - `import_configs`: A Python list containing a dictionary, with the key/value entries: # - `gcs_sources`: A list of URIs to the paths of the one or more index files. # - `import_schema_uri`: The schema identifying the labeling type. # # The `import_data()` method returns a long running `operation` object. This will take a few minutes to complete. If you are in a live tutorial, this would be a good time to ask questions, or take a personal break. # + id="import_data" def import_data(dataset, gcs_sources, schema): config = [{"gcs_source": {"uris": gcs_sources}, "import_schema_uri": schema}] print("dataset:", dataset_id) start_time = time.time() try: operation = clients["dataset"].import_data( name=dataset_id, import_configs=config ) print("Long running operation:", operation.operation.name) result = operation.result() print("result:", result) print("time:", int(time.time() - start_time), "secs") print("error:", operation.exception()) print("meta :", operation.metadata) print( "after: running:", operation.running(), "done:", operation.done(), "cancelled:", operation.cancelled(), ) return operation except Exception as e: print("exception:", e) return None import_data(dataset_id, [IMPORT_FILE], LABEL_SCHEMA) # + [markdown] id="train_automl_model" # ## Train the model # # Now train an AutoML text sentiment analysis model using your Vertex AI `Dataset` resource. To train the model, do the following steps: # # 1. Create an Vertex AI training pipeline for the `Dataset` resource. # 2. Execute the pipeline to start the training. # + [markdown] id="create_pipeline:automl" # ### Create a training pipeline # # You may ask, what do we use a pipeline for? You typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of: # # 1. Being reusable for subsequent training jobs. # 2. Can be containerized and ran as a batch job. # 3. Can be distributed. # 4. All the steps are associated with the same pipeline job for tracking progress. # # Use this helper function `create_pipeline`, which takes the following parameters: # # - `pipeline_name`: A human readable name for the pipeline job. # - `model_name`: A human readable name for the model. # - `dataset`: The Vertex AI fully qualified dataset identifier. # - `schema`: The dataset labeling (annotation) training schema. # - `task`: A dictionary describing the requirements for the training job. # # The helper function calls the `Pipeline` client service'smethod `create_pipeline`, which takes the following parameters: # # - `parent`: The Vertex AI location root path for your `Dataset`, `Model` and `Endpoint` resources. # - `training_pipeline`: the full specification for the pipeline training job. # # Let's look now deeper into the *minimal* requirements for constructing a `training_pipeline` specification: # # - `display_name`: A human readable name for the pipeline job. # - `training_task_definition`: The dataset labeling (annotation) training schema. # - `training_task_inputs`: A dictionary describing the requirements for the training job. # - `model_to_upload`: A human readable name for the model. # - `input_data_config`: The dataset specification. # - `dataset_id`: The Vertex AI dataset identifier only (non-fully qualified) -- this is the last part of the fully-qualified identifier. # - `fraction_split`: If specified, the percentages of the dataset to use for training, test and validation. Otherwise, the percentages are automatically selected by AutoML. # + id="create_pipeline:automl" def create_pipeline(pipeline_name, model_name, dataset, schema, task): dataset_id = dataset.split("/")[-1] input_config = { "dataset_id": dataset_id, "fraction_split": { "training_fraction": 0.8, "validation_fraction": 0.1, "test_fraction": 0.1, }, } training_pipeline = { "display_name": pipeline_name, "training_task_definition": schema, "training_task_inputs": task, "input_data_config": input_config, "model_to_upload": {"display_name": model_name}, } try: pipeline = clients["pipeline"].create_training_pipeline( parent=PARENT, training_pipeline=training_pipeline ) print(pipeline) except Exception as e: print("exception:", e) return None return pipeline # + [markdown] id="task_requirements:automl,tst" # ### Construct the task requirements # # Next, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the `task` field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the `json_format.ParseDict` method for the conversion. # # The minimal fields we need to specify are: # # - `sentiment_max`: The maximum value for the sentiment (e.g., 4). # # Finally, create the pipeline by calling the helper function `create_pipeline`, which returns an instance of a training pipeline object. # + id="task_requirements:automl,tst" PIPE_NAME = "claritin_pipe-" + TIMESTAMP MODEL_NAME = "claritin_model-" + TIMESTAMP task = json_format.ParseDict( { "sentiment_max": SENTIMENT_MAX, }, Value(), ) response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task) # + [markdown] id="pipeline_id:response" # Now save the unique identifier of the training pipeline you created. # + id="pipeline_id:response" # The full unique ID for the pipeline pipeline_id = response.name # The short numeric ID for the pipeline pipeline_short_id = pipeline_id.split("/")[-1] print(pipeline_id) # + [markdown] id="get_training_pipeline" # ### Get information on a training pipeline # # Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's `get_training_pipeline` method, with the following parameter: # # - `name`: The Vertex AI fully qualified pipeline identifier. # # When the model is done training, the pipeline state will be `PIPELINE_STATE_SUCCEEDED`. # + id="get_training_pipeline" def get_training_pipeline(name, silent=False): response = clients["pipeline"].get_training_pipeline(name=name) if silent: return response print("pipeline") print(" name:", response.name) print(" display_name:", response.display_name) print(" state:", response.state) print(" training_task_definition:", response.training_task_definition) print(" training_task_inputs:", dict(response.training_task_inputs)) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", dict(response.labels)) return response response = get_training_pipeline(pipeline_id) # + [markdown] id="wait_training_complete" # # Deployment # # Training the above model may take upwards of 180 minutes time. # # Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, you will need to know the fully qualified Vertex AI Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field `model_to_deploy.name`. # + id="wait_training_complete" while True: response = get_training_pipeline(pipeline_id, True) if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_to_deploy_id = None if response.state == aip.PipelineState.PIPELINE_STATE_FAILED: raise Exception("Training Job Failed") else: model_to_deploy = response.model_to_upload model_to_deploy_id = model_to_deploy.name print("Training Time:", response.end_time - response.start_time) break time.sleep(60) print("model to deploy:", model_to_deploy_id) # + [markdown] id="model_information" # ## Model information # # Now that your model is trained, you can get some information on your model. # + [markdown] id="evaluate_the_model:automl" # ## Evaluate the Model resource # # Now find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model. # + [markdown] id="list_model_evaluations:automl,tst" # ### List evaluations for all slices # # Use this helper function `list_model_evaluations`, which takes the following parameter: # # - `name`: The Vertex AI fully qualified model identifier for the `Model` resource. # # This helper function uses the model client service's `list_model_evaluations` method, which takes the same parameter. The response object from the call is a list, where each element is an evaluation metric. # # For each evaluation -- you probably only have one, we then print all the key names for each metric in the evaluation, and for a small set (`meanAbsoluteError` and `precision`) you will print the result. # + id="list_model_evaluations:automl,tst" def list_model_evaluations(name): response = clients["model"].list_model_evaluations(parent=name) for evaluation in response: print("model_evaluation") print(" name:", evaluation.name) print(" metrics_schema_uri:", evaluation.metrics_schema_uri) metrics = json_format.MessageToDict(evaluation._pb.metrics) for metric in metrics.keys(): print(metric) print("meanAbsoluteError", metrics["meanAbsoluteError"]) print("precision", metrics["precision"]) return evaluation.name last_evaluation = list_model_evaluations(model_to_deploy_id) # + [markdown] id="deploy:batch_prediction" # ## Model deployment for batch prediction # # Now deploy the trained Vertex AI `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for on-demand prediction. # # For online prediction, you: # # 1. Create an `Endpoint` resource for deploying the `Model` resource to. # # 2. Deploy the `Model` resource to the `Endpoint` resource. # # 3. Make online prediction requests to the `Endpoint` resource. # # For batch-prediction, you: # # 1. Create a batch prediction job. # # 2. The job service will provision resources for the batch prediction request. # # 3. The results of the batch prediction request are returned to the caller. # # 4. The job service will unprovision the resoures for the batch prediction request. # + [markdown] id="make_prediction" # ## Make a batch prediction request # # Now do a batch prediction to your deployed model. # + [markdown] id="get_test_items:batch_prediction" # ### Get test item(s) # # Now do a batch prediction to your Vertex AI model. You will use arbitrary examples out of the dataset as a test items. Don't be concerned that the examples were likely used in training the model -- we just want to demonstrate how to make a prediction. # + id="get_test_items:automl,tst,csv" test_items = ! gsutil cat $IMPORT_FILE | head -n2 if len(test_items[0]) == 4: _, test_item_1, test_label_1, _ = str(test_items[0]).split(",") _, test_item_2, test_label_2, _ = str(test_items[1]).split(",") else: test_item_1, test_label_1, _ = str(test_items[0]).split(",") test_item_2, test_label_2, _ = str(test_items[1]).split(",") print(test_item_1, test_label_1) print(test_item_2, test_label_2) # + [markdown] id="make_batch_file:automl,text" # ### Make the batch input file # # Now make a batch input file, which you will store in your local Cloud Storage bucket. The batch input file can only be in JSONL format. For JSONL file, you make one dictionary entry per line for each data item (instance). The dictionary contains the key/value pairs: # # - `content`: The Cloud Storage path to the file with the text item. # - `mime_type`: The content type. In our example, it is an `text` file. # # For example: # # {'content': '[your-bucket]/file1.txt', 'mime_type': 'text'} # + id="make_batch_file:automl,text" import json import tensorflow as tf gcs_test_item_1 = BUCKET_NAME + "/test1.txt" with tf.io.gfile.GFile(gcs_test_item_1, "w") as f: f.write(test_item_1 + "\n") gcs_test_item_2 = BUCKET_NAME + "/test2.txt" with tf.io.gfile.GFile(gcs_test_item_2, "w") as f: f.write(test_item_2 + "\n") gcs_input_uri = BUCKET_NAME + "/test.jsonl" with tf.io.gfile.GFile(gcs_input_uri, "w") as f: data = {"content": gcs_test_item_1, "mime_type": "text/plain"} f.write(json.dumps(data) + "\n") data = {"content": gcs_test_item_2, "mime_type": "text/plain"} f.write(json.dumps(data) + "\n") print(gcs_input_uri) # ! gsutil cat $gcs_input_uri # + [markdown] id="instance_scaling" # ### Compute instance scaling # # You have several choices on scaling the compute instances for handling your batch prediction requests: # # - Single Instance: The batch prediction requests are processed on a single compute instance. # - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one. # # - Manual Scaling: The batch prediction requests are split across a fixed number of compute instances that you manually specified. # - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and batch prediction requests are evenly distributed across them. # # - Auto Scaling: The batch prediction requests are split across a scaleable number of compute instances. # - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions. # # The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request. # + id="instance_scaling" MIN_NODES = 1 MAX_NODES = 1 # + [markdown] id="make_batch_request:automl,tst" # ### Make batch prediction request # # Now that your batch of two test items is ready, let's do the batch request. Use this helper function `create_batch_prediction_job`, with the following parameters: # # - `display_name`: The human readable name for the prediction job. # - `model_name`: The Vertex AI fully qualified identifier for the `Model` resource. # - `gcs_source_uri`: The Cloud Storage path to the input file -- which you created above. # - `gcs_destination_output_uri_prefix`: The Cloud Storage path that the service will write the predictions to. # - `parameters`: Additional filtering parameters for serving prediction results. # # The helper function calls the job client service's `create_batch_prediction_job` metho, with the following parameters: # # - `parent`: The Vertex AI location root path for Dataset, Model and Pipeline resources. # - `batch_prediction_job`: The specification for the batch prediction job. # # Let's now dive into the specification for the `batch_prediction_job`: # # - `display_name`: The human readable name for the prediction batch job. # - `model`: The Vertex AI fully qualified identifier for the `Model` resource. # - `dedicated_resources`: The compute resources to provision for the batch prediction job. # - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated. # - `starting_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`. # - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`. # - `model_parameters`: Additional filtering parameters for serving prediction results. *Note*, text models do not support additional parameters. # - `input_config`: The input source and format type for the instances to predict. # - `instances_format`: The format of the batch prediction request file: `jsonl` only supported. # - `gcs_source`: A list of one or more Cloud Storage paths to your batch prediction requests. # - `output_config`: The output destination and format for the predictions. # - `prediction_format`: The format of the batch prediction response file: `jsonl` only supported. # - `gcs_destination`: The output destination for the predictions. # # This call is an asychronous operation. You will print from the response object a few select fields, including: # # - `name`: The Vertex AI fully qualified identifier assigned to the batch prediction job. # - `display_name`: The human readable name for the prediction batch job. # - `model`: The Vertex AI fully qualified identifier for the Model resource. # - `generate_explanations`: Whether True/False explanations were provided with the predictions (explainability). # - `state`: The state of the prediction job (pending, running, etc). # # Since this call will take a few moments to execute, you will likely get `JobState.JOB_STATE_PENDING` for `state`. # + id="make_batch_request:automl,tst" BATCH_MODEL = "claritin_batch-" + TIMESTAMP def create_batch_prediction_job( display_name, model_name, gcs_source_uri, gcs_destination_output_uri_prefix, parameters=None, ): if DEPLOY_GPU: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_type": DEPLOY_GPU, "accelerator_count": DEPLOY_NGPU, } else: machine_spec = { "machine_type": DEPLOY_COMPUTE, "accelerator_count": 0, } batch_prediction_job = { "display_name": display_name, # Format: 'projects/{project}/locations/{location}/models/{model_id}' "model": model_name, "model_parameters": json_format.ParseDict(parameters, Value()), "input_config": { "instances_format": IN_FORMAT, "gcs_source": {"uris": [gcs_source_uri]}, }, "output_config": { "predictions_format": OUT_FORMAT, "gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix}, }, "dedicated_resources": { "machine_spec": machine_spec, "starting_replica_count": MIN_NODES, "max_replica_count": MAX_NODES, }, } response = clients["job"].create_batch_prediction_job( parent=PARENT, batch_prediction_job=batch_prediction_job ) print("response") print(" name:", response.name) print(" display_name:", response.display_name) print(" model:", response.model) try: print(" generate_explanation:", response.generate_explanation) except: pass print(" state:", response.state) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", response.labels) return response IN_FORMAT = "jsonl" OUT_FORMAT = "jsonl" # [jsonl] response = create_batch_prediction_job( BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME, None ) # + [markdown] id="batch_job_id:response" # Now get the unique identifier for the batch prediction job you created. # + id="batch_job_id:response" # The full unique ID for the batch job batch_job_id = response.name # The short numeric ID for the batch job batch_job_short_id = batch_job_id.split("/")[-1] print(batch_job_id) # + [markdown] id="get_batch_prediction_job" # ### Get information on a batch prediction job # # Use this helper function `get_batch_prediction_job`, with the following paramter: # # - `job_name`: The Vertex AI fully qualified identifier for the batch prediction job. # # The helper function calls the job client service's `get_batch_prediction_job` method, with the following paramter: # # - `name`: The Vertex AI fully qualified identifier for the batch prediction job. In this tutorial, you will pass it the Vertex AI fully qualified identifier for your batch prediction job -- `batch_job_id` # # The helper function will return the Cloud Storage path to where the predictions are stored -- `gcs_destination`. # + id="get_batch_prediction_job" def get_batch_prediction_job(job_name, silent=False): response = clients["job"].get_batch_prediction_job(name=job_name) if silent: return response.output_config.gcs_destination.output_uri_prefix, response.state print("response") print(" name:", response.name) print(" display_name:", response.display_name) print(" model:", response.model) try: # not all data types support explanations print(" generate_explanation:", response.generate_explanation) except: pass print(" state:", response.state) print(" error:", response.error) gcs_destination = response.output_config.gcs_destination print(" gcs_destination") print(" output_uri_prefix:", gcs_destination.output_uri_prefix) return gcs_destination.output_uri_prefix, response.state predictions, state = get_batch_prediction_job(batch_job_id) # + [markdown] id="get_the_predictions:automl,tst" # ### Get the predictions # # When the batch prediction is done processing, the job state will be `JOB_STATE_SUCCEEDED`. # # Finally you view the predictions stored at the Cloud Storage path you set as output. The predictions will be in a JSONL format, which you indicated at the time you made the batch prediction job, under a subfolder starting with the name `prediction`, and under that folder will be a file called `predictions*.jsonl`. # # Now display (cat) the contents. You will see multiple JSON objects, one for each prediction. # # The first field `text_snippet` is the text file you did the prediction on, and the second field `annotations` is the prediction, which is further broken down into: # # - `sentiment`: The predicted sentiment level. # + id="get_the_predictions:automl,text" def get_latest_predictions(gcs_out_dir): """ Get the latest prediction subfolder using the timestamp in the subfolder name""" # folders = !gsutil ls $gcs_out_dir latest = "" for folder in folders: subfolder = folder.split("/")[-2] if subfolder.startswith("prediction-"): if subfolder > latest: latest = folder[:-1] return latest while True: predictions, state = get_batch_prediction_job(batch_job_id, True) if state != aip.JobState.JOB_STATE_SUCCEEDED: print("The job has not completed:", state) if state == aip.JobState.JOB_STATE_FAILED: raise Exception("Batch Job Failed") else: folder = get_latest_predictions(predictions) # ! gsutil ls $folder/prediction*.jsonl # ! gsutil cat $folder/prediction*.jsonl break time.sleep(60) # + [markdown] id="cleanup" # # Cleaning up # # To clean up all GCP resources used in this project, you can [delete the GCP # project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial. # # Otherwise, you can delete the individual resources you created in this tutorial: # # - Dataset # - Pipeline # - Model # - Endpoint # - Batch Job # - Custom Job # - Hyperparameter Tuning Job # - Cloud Storage Bucket # + id="cleanup" delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True # Delete the dataset using the Vertex AI fully qualified identifier for the dataset try: if delete_dataset and "dataset_id" in globals(): clients["dataset"].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the training pipeline using the Vertex AI fully qualified identifier for the pipeline try: if delete_pipeline and "pipeline_id" in globals(): clients["pipeline"].delete_training_pipeline(name=pipeline_id) except Exception as e: print(e) # Delete the model using the Vertex AI fully qualified identifier for the model try: if delete_model and "model_to_deploy_id" in globals(): clients["model"].delete_model(name=model_to_deploy_id) except Exception as e: print(e) # Delete the endpoint using the Vertex AI fully qualified identifier for the endpoint try: if delete_endpoint and "endpoint_id" in globals(): clients["endpoint"].delete_endpoint(name=endpoint_id) except Exception as e: print(e) # Delete the batch job using the Vertex AI fully qualified identifier for the batch job try: if delete_batchjob and "batch_job_id" in globals(): clients["job"].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) # Delete the custom job using the Vertex AI fully qualified identifier for the custom job try: if delete_customjob and "job_id" in globals(): clients["job"].delete_custom_job(name=job_id) except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex AI fully qualified identifier for the hyperparameter tuning job try: if delete_hptjob and "hpt_job_id" in globals(): clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): # ! gsutil rm -r $BUCKET_NAME
ai-platform-unified/notebooks/unofficial/gapic/automl/showcase_automl_text_sentiment_analysis_batch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/gndede/python/blob/main/Poisson_Regression_model_223.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="OlIgC_WSNJ-I" import pandas as pd from patsy import dmatrices import numpy as np import statsmodels.api as sm import matplotlib.pyplot as plt #Create a pandas DataFrame for the counts data set. df = pd.read_csv('/content/nyc_bb_bicyclist_counts.csv', header=0, infer_datetime_format=True, parse_dates=[0], index_col=[0]) #Add a few derived regression variables. ds = df.index.to_series() df['MONTH'] = ds.dt.month df['DAY_OF_WEEK'] = ds.dt.dayofweek df['DAY'] = ds.dt.day #Create the training and testing data sets. mask = np.random.rand(len(df)) < 0.8 df_train = df[mask] df_test = df[~mask] print('Training data set length='+str(len(df_train))) print('Testing data set length='+str(len(df_test))) #Setup the regression expression in patsy notation. We are telling patsy that BB_COUNT is our dependent variable and # it depends on the regression variables: DAY, DAY_OF_WEEK, MONTH, HIGH_T, LOW_T and PRECIP. expr = """BB_COUNT ~ DAY + DAY_OF_WEEK + MONTH + HIGH_T + LOW_T + PRECIP""" #Set up the X and y matrices y_train, X_train = dmatrices(expr, df_train, return_type='dataframe') y_test, X_test = dmatrices(expr, df_test, return_type='dataframe') #Using the statsmodels GLM class, train the Poisson regression model on the training data set. poisson_training_results = sm.GLM(y_train, X_train, family=sm.families.Poisson()).fit() #Print the training summary. print(poisson_training_results.summary()) #Make some predictions on the test data set. poisson_predictions = poisson_training_results.get_prediction(X_test) #.summary_frame() returns a pandas DataFrame predictions_summary_frame = poisson_predictions.summary_frame() print(predictions_summary_frame) predicted_counts=predictions_summary_frame['mean'] actual_counts = y_test['BB_COUNT'] #Mlot the predicted counts versus the actual counts for the test data. fig = plt.figure() fig.suptitle('Predicted versus actual bicyclist counts on the Brooklyn bridge') predicted, = plt.plot(X_test.index, predicted_counts, 'go-', label='Predicted counts') actual, = plt.plot(X_test.index, actual_counts, 'ro-', label='Actual counts') plt.legend(handles=[predicted, actual]) plt.show() #Show scatter plot of Actual versus Predicted counts plt.clf() fig = plt.figure() fig.suptitle('Scatter plot of Actual versus Predicted counts') plt.scatter(x=predicted_counts, y=actual_counts, marker='.') plt.xlabel('Predicted counts') plt.ylabel('Actual counts') plt.show()
Poisson_Regression_model_223.ipynb
# # Hyperparameter tuning by grid-search # # In the previous notebook, we saw that hyperparameters can affect the # generalization performance of a model. In this notebook, we will show how to # optimize hyperparameters using a grid-search approach. # ## Our predictive model # # Let us reload the dataset as we did previously: # + from sklearn import set_config set_config(display="diagram") # + import pandas as pd adult_census = pd.read_csv("../datasets/adult-census.csv") # - # We extract the column containing the target. target_name = "class" target = adult_census[target_name] target # We drop from our data the target and the `"education-num"` column which # duplicates the information from the `"education"` column. data = adult_census.drop(columns=[target_name, "education-num"]) data.head() # Once the dataset is loaded, we split it into a training and testing sets. # + from sklearn.model_selection import train_test_split data_train, data_test, target_train, target_test = train_test_split( data, target, random_state=42) # - # We will define a pipeline as seen in the first module. It will handle both # numerical and categorical features. # # The first step is to select all the categorical columns. # + from sklearn.compose import make_column_selector as selector categorical_columns_selector = selector(dtype_include=object) categorical_columns = categorical_columns_selector(data) # - # Here we will use a tree-based model as a classifier # (i.e. `HistGradientBoostingClassifier`). That means: # # * Numerical variables don't need scaling; # * Categorical variables can be dealt with an `OrdinalEncoder` even if the # coding order is not meaningful; # * For tree-based models, the `OrdinalEncoder` avoids having high-dimensional # representations. # # We now build our `OrdinalEncoder` by passing it the known categories. # + from sklearn.preprocessing import OrdinalEncoder categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1) # - # We then use a `ColumnTransformer` to select the categorical columns and # apply the `OrdinalEncoder` to them. # + from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer([ ('cat_preprocessor', categorical_preprocessor, categorical_columns)], remainder='passthrough', sparse_threshold=0) # - # Finally, we use a tree-based classifier (i.e. histogram gradient-boosting) to # predict whether or not a person earns more than 50 k$ a year. # + from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.pipeline import Pipeline model = Pipeline([ ("preprocessor", preprocessor), ("classifier", HistGradientBoostingClassifier(random_state=42, max_leaf_nodes=4))]) model # - # ## Tuning using a grid-search # # In the previous exercise we used one `for` loop for each hyperparameter to find the # best combination over a fixed grid of values. `GridSearchCV` is a scikit-learn class # that implements a very similar logic with less repetitive code. # # Let's see how to use the `GridSearchCV` estimator for doing such search. # Since the grid-search will be costly, we will only explore the combination # learning-rate and the maximum number of nodes. # + # %%time from sklearn.model_selection import GridSearchCV param_grid = { 'classifier__learning_rate': (0.01, 0.1, 1, 10), 'classifier__max_leaf_nodes': (3, 10, 30)} model_grid_search = GridSearchCV(model, param_grid=param_grid, n_jobs=2, cv=2) model_grid_search.fit(data_train, target_train) # - # Finally, we will check the accuracy of our model using the test set. accuracy = model_grid_search.score(data_test, target_test) print( f"The test accuracy score of the grid-searched pipeline is: " f"{accuracy:.2f}" ) # <div class="admonition warning alert alert-danger"> # <p class="first admonition-title" style="font-weight: bold;">Warning</p> # <p>Be aware that the evaluation should normally be performed through # cross-validation by providing <tt class="docutils literal">model_grid_search</tt> as a model to the # <tt class="docutils literal">cross_validate</tt> function.</p> # <p class="last">Here, we used a single train-test split to to evaluate <tt class="docutils literal">model_grid_search</tt>. # In a future notebook will go into more detail about nested cross-validation, # when you use cross-validation both for hyperparameter tuning and model # evaluation.</p> # </div> # The `GridSearchCV` estimator takes a `param_grid` parameter which defines # all hyperparameters and their associated values. The grid-search will be in # charge of creating all possible combinations and test them. # # The number of combinations will be equal to the product of the # number of values to explore for each parameter (e.g. in our example 4 x 3 # combinations). Thus, adding new parameters with their associated values to be # explored become rapidly computationally expensive. # # Once the grid-search is fitted, it can be used as any other predictor by # calling `predict` and `predict_proba`. Internally, it will use the model with # the best parameters found during `fit`. # # Get predictions for the 5 first samples using the estimator with the best # parameters. model_grid_search.predict(data_test.iloc[0:5]) # You can know about these parameters by looking at the `best_params_` # attribute. print(f"The best set of parameters is: " f"{model_grid_search.best_params_}") # The accuracy and the best parameters of the grid-searched pipeline are # similar to the ones we found in the previous exercise, where we searched the # best parameters "by hand" through a double for loop. # # In addition, we can inspect all results which are stored in the attribute # `cv_results_` of the grid-search. We will filter some specific columns # from these results. cv_results = pd.DataFrame(model_grid_search.cv_results_).sort_values( "mean_test_score", ascending=False) cv_results.head() # Let us focus on the most interesting columns and shorten the parameter # names to remove the `"param_classifier__"` prefix for readability: # get the parameter names column_results = [f"param_{name}" for name in param_grid.keys()] column_results += [ "mean_test_score", "std_test_score", "rank_test_score"] cv_results = cv_results[column_results] # + def shorten_param(param_name): if "__" in param_name: return param_name.rsplit("__", 1)[1] return param_name cv_results = cv_results.rename(shorten_param, axis=1) cv_results # - # With only 2 parameters, we might want to visualize the grid-search as a # heatmap. We need to transform our `cv_results` into a dataframe where: # # - the rows will correspond to the learning-rate values; # - the columns will correspond to the maximum number of leaf; # - the content of the dataframe will be the mean test scores. # + pivoted_cv_results = cv_results.pivot_table( values="mean_test_score", index=["learning_rate"], columns=["max_leaf_nodes"]) pivoted_cv_results # - # We can use a heatmap representation to show the above dataframe visually. # + import seaborn as sns ax = sns.heatmap(pivoted_cv_results, annot=True, cmap="YlGnBu", vmin=0.7, vmax=0.9) ax.invert_yaxis() # - # The above tables highlights the following things: # # * for too high values of `learning_rate`, the generalization performance of the # model is degraded and adjusting the value of `max_leaf_nodes` cannot fix # that problem; # * outside of this pathological region, we observe that the optimal choice # of `max_leaf_nodes` depends on the value of `learning_rate`; # * in particular, we observe a "diagonal" of good models with an accuracy # close to the maximal of 0.87: when the value of `max_leaf_nodes` is # increased, one should decrease the value of `learning_rate` accordingly # to preserve a good accuracy. # # The precise meaning of those two parameters will be explained later. # # For now we will note that, in general, **there is no unique optimal parameter # setting**: 4 models out of the 12 parameter configurations reach the maximal # accuracy (up to small random fluctuations caused by the sampling of the # training set). # In this notebook we have seen: # # * how to optimize the hyperparameters of a predictive model via a # grid-search; # * that searching for more than two hyperparamters is too costly; # * that a grid-search does not necessarily find an optimal solution.
notebooks/parameter_tuning_grid_search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # MNIST digits classification with TensorFlow # - # <img src="images/mnist_sample.png" style="width:30%"> # + tags=[] import numpy as np from sklearn.metrics import accuracy_score from matplotlib import pyplot as plt # %matplotlib inline # import tensorflow as tf import tensorflow.compat.v1 as tf tf.disable_v2_behavior() tf.reset_default_graph() print("We're using TF", tf.__version__) import sys sys.path.append("../..") import grading import matplotlib_utils from importlib import reload reload(matplotlib_utils) import grading_utils reload(grading_utils) import keras_utils # from keras_utils import reset_tf_session # - # # Fill in your Coursera token and email # To successfully submit your answers to our grader, please fill in your Coursera submission token and email grader = grading.Grader(assignment_key="XtD7ho3TEeiHQBLWejjYAA", all_parts=["9XaAS", "vmogZ", "RMv95", "i8bgs", "rE763"]) # token expires every 30 min COURSERA_TOKEN = "### YOUR TOKEN HERE ###" COURSERA_EMAIL = "### YOUR EMAIL HERE ###" # # Look at the data # # In this task we have 50000 28x28 images of digits from 0 to 9. # We will train a classifier on this data. # + tags=[] import preprocessed_mnist X_train, y_train, X_val, y_val, X_test, y_test = preprocessed_mnist.load_dataset() # + tags=[] # X contains rgb values divided by 255 print("X_train [shape %s] sample patch:\n" % (str(X_train.shape)), X_train[1, 15:20, 5:10]) print("A closeup of a sample patch:") plt.imshow(X_train[1, 15:20, 5:10], cmap="Greys") plt.show() print("And the whole sample:") plt.imshow(X_train[1], cmap="Greys") plt.show() print("y_train [shape %s] 10 samples:\n" % (str(y_train.shape)), y_train[:10]) # - # # Linear model # # Your task is to train a linear classifier $\vec{x} \rightarrow y$ with SGD using TensorFlow. # # You will need to calculate a logit (a linear transformation) $z_k$ for each class: # $$z_k = \vec{x} \cdot \vec{w_k} + b_k \quad k = 0..9$$ # # And transform logits $z_k$ to valid probabilities $p_k$ with softmax: # $$p_k = \frac{e^{z_k}}{\sum_{i=0}^{9}{e^{z_i}}} \quad k = 0..9$$ # # We will use a cross-entropy loss to train our multi-class classifier: # $$\text{cross-entropy}(y, p) = -\sum_{k=0}^{9}{\log(p_k)[y = k]}$$ # # where # $$ # [x]=\begin{cases} # 1, \quad \text{if $x$ is true} \\ # 0, \quad \text{otherwise} # \end{cases} # $$ # # Cross-entropy minimization pushes $p_k$ close to 1 when $y = k$, which is what we want. # # Here's the plan: # * Flatten the images (28x28 -> 784) with `X_train.reshape((X_train.shape[0], -1))` to simplify our linear model implementation # * Use a matrix placeholder for flattened `X_train` # * Convert `y_train` to one-hot encoded vectors that are needed for cross-entropy # * Use a shared variable `W` for all weights (a column $\vec{w_k}$ per class) and `b` for all biases. # * Aim for ~0.93 validation accuracy # + tags=[] X_train_flat = X_train.reshape((X_train.shape[0], -1)) print(X_train_flat.shape) X_val_flat = X_val.reshape((X_val.shape[0], -1)) print(X_val_flat.shape) # + tags=[] import keras y_train_oh = keras.utils.to_categorical(y_train, 10) y_val_oh = keras.utils.to_categorical(y_val, 10) print(y_train_oh.shape) print(y_train_oh[:3], y_train[:3]) # + # run this again if you remake your graph # s = reset_tf_session() # For new version tf.keras.backend.clear_session() sess = tf.Session() # + # Model parameters: W and b ### MY CODE HERE ### W = tf.get_variable("W", shape=(784,10), dtype = tf.float32, initializer = tf.glorot_uniform_initializer()) b = tf.get_variable("b", shape=(10,), dtype = tf.float32, initializer = tf.glorot_uniform_initializer()) # + # Placeholders for the input data ### MY CODE HERE ### input_X = tf.placeholder(tf.float32, shape=[None, 784]) input_y = tf.placeholder(tf.float32, shape=[None, 10]) # + tags=[] # Compute predictions ### MY CODE HERE ### logits = tf.matmul(input_X, W)+b ### shape should be [input_X.shape[0], 10] probas = tf.nn.softmax(logits) ### apply tf.nn.softmax to logits classes = tf.argmax(probas, 1) ### apply tf.argmax to find a class index with highest probability # Loss should be a scalar number: average loss over all the objects with tf.reduce_mean(). # Use tf.nn.softmax_cross_entropy_with_logits on top of one-hot encoded input_y and logits. # It is identical to calculating cross-entropy on top of probas, but is more numerically friendly (read the docs). # logits_h = keras.utils.to_categorical(logits, 10) # input_yh = keras.utils.to_categorical(input_y) loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=input_y)) ### cross-entropy loss # Use a default tf.train.AdamOptimizer to get an SGD step step = tf.train.AdamOptimizer().minimize(loss) ### optimizer step that minimizes the loss # + sess.run(tf.global_variables_initializer()) BATCH_SIZE = 512 EPOCHS = 40 # for logging the progress right here in Jupyter (for those who don't have TensorBoard) simpleTrainingCurves = matplotlib_utils.SimpleTrainingCurves("cross-entropy", "accuracy") for epoch in range(EPOCHS): # we finish an epoch when we've looked at all training samples batch_losses = [] for batch_start in range(0, X_train_flat.shape[0], BATCH_SIZE): # data is already shuffled _, batch_loss = sess.run([step, loss], {input_X: X_train_flat[batch_start:batch_start+BATCH_SIZE], input_y: y_train_oh[batch_start:batch_start+BATCH_SIZE]}) # collect batch losses, this is almost free as we need a forward pass for backprop anyway batch_losses.append(batch_loss) train_loss = np.mean(batch_losses) val_loss = sess.run(loss, {input_X: X_val_flat, input_y: y_val_oh}) # this part is usually small train_accuracy = accuracy_score(y_train, sess.run(classes, {input_X: X_train_flat})) # this is slow and usually skipped valid_accuracy = accuracy_score(y_val, sess.run(classes, {input_X: X_val_flat})) simpleTrainingCurves.add(train_loss, val_loss, train_accuracy, valid_accuracy) # - # # Submit a linear model ## GRADED PART, DO NOT CHANGE! # Testing shapes grader.set_answer("9XaAS", grading_utils.get_tensors_shapes_string([W, b, input_X, input_y, logits, probas, classes])) # Validation loss grader.set_answer("vmogZ", sess.run(loss, {input_X: X_val_flat, input_y: y_val_oh})) # Validation accuracy grader.set_answer("RMv95", accuracy_score(y_val, sess.run(classes, {input_X: X_val_flat}))) # you can make submission with answers so far to check yourself at this stage grader.submit(COURSERA_EMAIL, COURSERA_TOKEN) # # MLP with hidden layers # Previously we've coded a dense layer with matrix multiplication by hand. # But this is not convenient, you have to create a lot of variables and your code becomes a mess. # In TensorFlow there's an easier way to make a dense layer: # ```python # hidden1 = tf.layers.dense(inputs, 256, activation=tf.nn.sigmoid) # ``` # # That will create all the necessary variables automatically. # Here you can also choose an activation function (remember that we need it for a hidden layer!). # # Now define the MLP with 2 hidden layers and restart training with the cell above. # # You're aiming for ~0.97 validation accuracy here. # + tags=[] # write the code here to get a new `step` operation and then run the cell with training loop above. # name your variables in the same way (e.g. logits, probas, classes, etc) for safety. ### MY CODE HERE ### from preprocessed_mnist import load_dataset X_train, y_train, X_val, y_val, X_test, y_test = load_dataset() print(X_train.shape, y_train.shape) # - # # Submit the MLP with 2 hidden layers # Run these cells after training the MLP with 2 hidden layers ## GRADED PART, DO NOT CHANGE! # Validation loss for MLP grader.set_answer("i8bgs", sess.run(loss, {input_X: X_val_flat, input_y: y_val_oh})) # Validation accuracy for MLP grader.set_answer("rE763", accuracy_score(y_val, sess.run(classes, {input_X: X_val_flat}))) # you can make submission with answers so far to check yourself at this stage grader.submit(COURSERA_EMAIL, COURSERA_TOKEN) sess.close()
intro-to-dl/week2/v2/digits_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: meuambvirt # --- # # Introdução ao Python # ##### Curso de Extensão - IFG - Jun/19 # <img src="img/abertura.jpg"> # Este curso é dirigido aos alunos do IFG, dos ensinos médio/técnico e do superior (Engenharias). O objetivo principal é apresentar a Linguagem de Programação Python em nível introdutório. Essa linguagem é bastante eclética, permitindo ao usuário o desenvolvimento de aplicativos de toda sorte, desde o processamento de dados científicos até aplicações comerciais, para rodar no desktop, tablet ou smartphone, envolvendo banco de dados, servidores Web ou não, manipulando grande massa de dados (big data). # # Goiânia, Jun/2019 # <p><NAME></p> # <hr style="color: #0000FF"> # <h4 id="historico">Histórico</h4> # ## O QUE É PYTHON? # É uma linguagem de programação de alto nível, interpretada, interativa, versátil, de código aberto e legível aos seres humanos, orientada a objetos/imperativa/funcional/estruturada e de uso geral. Possui um sistema de tipificação dinâmica de variáveis, gerenciamento automático de memória e uma biblioteca padrão abrangente. Como outras linguagens dinâmicas, o Python é frequentemente usado como uma linguagem de **script**, mas também pode ser compilado em programas executáveis. # # Nos exemplos a seguir, a entrada e a saída de comandos/respostas são diferenciadas pela presença ou ausência de sinal de prontidão (_prompt_ `>>>` ou `...`), aguardando comando do usuário: para reproduzir o exemplo, você deve digitar os comandos após o _prompt_ `>>>`. As linhas que não começam com _prompt_ são geradas pelo interpretador Python, ou seja, são as respostas aos comandos do usuário. Observe que um _prompt_ secundário sozinho na linha, num dado exemplo, significa que você deve digitar uma linha em branco; isso é usado para encerrar um comando de várias linhas. # # Muitos dos exemplos nesta apostila, mesmo aqueles inseridos no modo interativo (digitados no _prompt_), incluem comentários. Comentários em Python começam com o caractere _hash_ **#**, e se estendem até o final da linha física. Um comentário pode aparecer no início de uma linha ou após um espaço em branco ou comando da linguagem, mas não dentro de uma _string_. Um caractere _hash_ dentro de uma _string_ é apenas mais um caracter da _string_. Como os comentários são usados no esclarecimento do código e não são interpretados pelo Python, eles podem ser omitidos ao se digitar os exemplos. # ## HISTÓRICO <a id="historico"></a> # A linguagem Python começou a ser desenvolvida por __<NAME>__ em 1989 (lançada oficialmente em 1999) na _Centrum Wiskunde & Informatica (CWI)_, Holanda, como sucessora da **linguagem ABC** (inspirada na SETL) que era capaz de lidar com exceções e interagir com o sistema operacional Amoeba que ele estava ajudando a desenvolver. # # SETL é uma Linguagem de programação de "altíssimo nível", baseada na matemática de conjuntos. Foi originalmente desenvolvida por **<NAME>** no _Courant Institute of Mathematical Sciences_ na NYU no fim dos anos 1960. Lambert Meertens passou um ano com o grupo SETL na NYU antes de finalizar o projeto da __linguagem ABC__. # # Guido era fã do grupo humorístico __Monty Python__, criador do programa de comédia _Monty Python's Flying Circus_ na televisão inglesa BBC (1969-1974). Ele quis homenagear o grupo dando o nome __Python__ à linguagem. # Então, a denominação da linguagem não é devida à serpente Python, embora o ícone utilizado para representação sejam duas cobras estilizadas. # # <img src="img/icone.png" width=250> # # ### Linha do Tempo das Versões dos Interpretadores: # # <img src="img/linhatempo.png" width=700> # # Fonte: http://www.trytoprogram.com/python-programming/history-of-python/ # ## FILOSOFIA # A filosofia central da linguagem Python inclui os seguintes preceitos: # - Bonito é melhor que feio. # - Explícito é melhor que implícito. # - Simples é melhor que complexo. # - Complexo é melhor que complicado. # - Legibilidade é importante. # # ## CARACTERÍSTICAS DA LINGUAGEM PYTHON # Em vez de ter todas as suas funcionalidades incorporadas no seu núcleo, o Python foi projetado para ser facilmente extensível. Essa modularidade compacta foi o principal motivo da **linguagem Python** ter se tornado tão popular, pois pode-se adicionar interfaces programáveis a aplicativos existentes. # # A intenção de Rossum era projetar uma linguagem com um pequeno núcleo, uma grande biblioteca padrão e um interpretador, e de quebra que pudesse ser facilmente extensível... Essas características foram resultado de suas frustrações com a **linguagem ABC**, que adotava uma abordagem oposta. # # ### PRINCIPAIS CARACTERÍSTICAS # 1. **Legível e Interpretada**: Python é uma linguagem muito legível e cada instrução é traduzida individualmente e executada antes da instrução seguinte. # 2. **Fácil de aprender**: Aprender Python é fácil por ela ser uma linguagem expressiva e de alto nível. # 3. **Multiplataforma**: está disponível para execução em vários sistemas operacionais, tais como: Mac OS, MS-Windows, Linux, Unix, Oracle Solaris etc. # 4. **Open Source**: Python é uma linguagem de programação de código aberto. # 5. **Biblioteca Padrão Grande**: a ling. Python vem com uma grande biblioteca padrão com códigos e funções úteis que podem ser usados enquanto se escreve código em Python. # 6. **Gratuita**: a ling. Python é gratuita para download e uso. # 7. **Manipulação de Exceção**: Uma exceção é um evento que pode ocorrer durante a execução do programa e que interrompe o fluxo normal do programa. A ling. Python permite o tratamento de exceções, o que significa que podemos escrever códigos menos propenso a erros e testar vários cenários que possam provocar uma exceção mais tarde. # 8. **Recursos Avançados**: geradores e abrangência de lista (_list comprehension_). Veremos esses recursos mais tarde. # 9. **Gerenciamento Automático de Memória**: a memória é limpa e liberada automaticamente. Você não precisa se preocupar em liberar memória em seus códigos. # # ## Computação Científica # # ### Necessidades dos Cientistas # - Obter dados (simulação, controle de experimentos) # - Manipular e processar dados. # - Visualizar resultados... para entender o que se está fazendo! # - Comunicar resultados: produzir figuras para relatórios ou publicações, escrever apresentações. # ### Soluções Existentes # #### Linguagens Compiladas: C, C++, Fortran etc. # # - Vantagens: # -- Muito rápidas. Compiladores muito otimizados. Para cálculos pesados, é difícil superar a performance dessas linguagens. # -- Bibliotecas científicas muito otimizadas foram escritas para essas linguagens. Exemplos: BLAS (operações vetoriais/matriciais). # - Desvantagens: # -- Uso dolorido: sem interatividade durante o desenvolvimento, passos obrigatórios de compilação, sintaxe com muitos símbolos (&, ::, }}, ; etc.), gerenciamento manual de memória (cheio de truques em C). Essas linguagens são difíceis para quem não é cientista da computação. # # #### Linguagens Interpretadas: Matlab # # - Vantagens: # - Coleção de bibliotecas bastante rica com numerosos algoritmos, para muitos diferentes domínios. Execução rápida porque essas bibliotecas são normalmente escritas numa linguagem compilada. # - Ambiente de desenvolvimento agradável: ajuda completa e bem organizada, editor integrado, facilidades de depuração etc. # - Apoio comercial e técnico disponíveis. # - Desvantagens: # - A linguagem base é bastante pobre e pode se tornar restritiva para usuários avançados. # - Não gratuita. # # #### Outras linguagens de *script*: Scilab, Octave, Igor, R, IDL etc. # # - Vantagens: # - Fonte aberta (*open-source*), gratuita, ou pelo menos mais barata que o Matlab. # - Alguns recursos podem ser bem avançados (estatísticas em R, figuras em Igor, etc.) # - Desvantagens: # - Menos algoritmos disponíveis que em Matlab, e a linguagem não é mais avançada. # - Alguns softwares são mais dedicados a um domínio. Ex: Gnuplot ou xmgrace para desenho de curvas. Estes programas são muito poderosos, mas eles são restritos a um único tipo de aplicação, tal como traçado gráfico (*plotting*). # # #### Python # # - Vantagens: # - Bibliotecas para computação científica muito ricas (porém, um pouco menos que Matlab) # - Linguagem bem pensada, permite a escrita de códigos bem estruturados e muito legíveis: "codificamos o que pensamos". # - Muitas bibliotecas para outras tarefas além da computação científica (gerenciamento de servidor Web, acesso à porta serial etc.) # - Software gratuito e *open-source*, amplamente difundido, com uma comunidade vibrante. # - Desvantagens: # - Ambiente de desenvolvimento menos agradável que, por exemplo, o do Matlab. (Mais orientado ao público Geek). # - Não possui todos os algorimtos que podem ser encontrados em softwares mais especializados ou *toolboxes*. # ### Python Científico - Blocos de Construção # Diferentemente do Matlab, Scilab ou R, Python não vem com um conjunto de módulos para computação científica. A seguir são mostrados alguns blocos de construção que podem ser combinados para se obter um ambiente de computação científica: # # - **Python**, uma linguagem de computação genérica e moderna. # - Ling. Python: tipos de dados (string, int), controle de fluxo, coleções de dados (listas, dicionários), padrões etc. # - Biblioteca padrão # - Um grande número de módulos especializados ou aplicações escritas em Python: protocolos Web, estruturas (frameworks) Web etc. e computação científica. # - Ferramentas de desenvolvimento (testes automáticos, geração de documentação) # # # - **IPython**, um *shell* Python (interface de comando de linha) avançado (http://ipython.scipy.org/moin/) # <img src="img/snapshot_ipython.png" alt="img/snapshot_ipython.png" class="align-right" style="width: 700.0px; height: 400.0px;"> # # # - **Numpy**: fornece objetos arranjos numéricos (vetor, matriz) poderosos , e rotinas para manipulá-los (http://www.numpy.org/) # # # - **Scipy**: rotinas de processamento de dados em alto nível. Otimização, regressão, interpolação etc. (http://www.scipy.org/) # # # - **Matplotlib**: visualização em 2D, gráficos "pronto-para-publicação" (http://matplotlib.sourceforge.net/) # <img src="img/random_c.jpg" alt="img/random_c.jpg" class="align-right" style="width: 340.0px; height: 210.0px;"> # # # - **Mayavi**: visualização em 3D (http://code.enthought.com/projects/mayavi/) # <img src="img/superficie_irregular.jpg" style="width: 340px; height: 210.0px;"> # ## Introdução à Linguagem Python # Trata-se de uma linguagem de programação de alto nível, interpretada, interativa, versátil, de código aberto e legível aos seres humanos, orientada a objetos/imperativa/funcional/estruturada e de uso geral. Possui um sistema de tipificação dinâmica de variáveis, gerenciamento automático de memória e uma biblioteca padrão bastante abrangente. Como outras linguagens dinâmicas, o Python é frequentemente usado na elaboração de *scripts* (interpretados) mas também pode ser compilado e montado em programas executáveis. # # Nos exemplos a seguir, a entrada (comandos) e a saída (respostas) são diferenciadas pela presença ou ausência de sinais de prontidão do interpretador. # - **Interpretador Python** (padrão): os *prompts* são `>>>` ou `...`, dependendo do contexto da entrada do comando do usuário. Para reproduzir o exemplo, você deve digitar o texto que aparece após o ***prompt*** do interpretador, `>>>`. As linhas que não começam com ***prompt*** são as respostas aos comandos digitados, e são geradas pelo interpretador. Observe que um ***prompt*** secundário `...` sozinho numa linha de um exemplo significa que você deve digitar uma linha em branco, isto é, ele é usado para encerrar um comando de várias linhas. # - **Interpretador IPython** (interativo): o *prompt* de entrada de comando é `In [5]:` e a saída de dados, quando houver, aparece na linha seguinte com ou sem indicação de _prompt_ `Out[5]`. # # Muitos exemplos desta apostila, mesmo aqueles inseridos no modo interativo (digitados no prompt), incluem comentários. Comentários em Python começam com o caractere ***hash*** '#', e se estendem até o final da linha física. Um comentário pode aparecer no início de uma linha ou após um espaço em branco depois do código, mas não dentro de uma *string*. Um caracter ***hash*** dentro de uma *string* é apenas mais um caracter da *string*. Como os comentários são usados apenas para esclarecer o código e não são interpretados pelo Python, eles podem ser omitidos ao se digitar os exemplos. # Exemplo de script Python: cria duas variáveis, uma do tipo 'int' e outra do tipo 'string' porta = 1 # porta aberta texto = "Deixe seu comentario em #comPythonehmaisfacil" # Inicialmente usaremos um ambiente interativo chamado **Jupyter Notebook** (acessível via *browser*), e posteriormente usaremos um IDE com editor, interpretador e outros utilitários usuais nas outras linguagens. Essa apostila foi elaborada a partir dessa tecnologia, **Jupyter Notebook**. # # Esse ambiente permite a apresentação simultânea de textos formatados com linguagem de marcação (tipo HTML) e de linhas de códigos Python para ser executado dinamicamente, e a característica mais importante é que o usuário pode alterar o código de acordo com a sua imaginação e verificar o resultado, tudo isso sem abandonar o navegador (*browser*). # ### Modo Interativo # O interpretador pode ser usado como uma simples calculadora: você pode digitar uma **expressão aritmética** e pressionar [*Enter*] para ver o valor calculado. A sintaxe da **expressão** é direta: os operadores artiméticos (<tt>+, -, *</tt> e <tt>/</tt>) funcionam como na maioria das outras linguagens (Pascal, Java ou C). Os parênteses, '(' e ')', são usados para alterar a hierarquia de resolução dos operadores. # # Nesse curso estamos utilizando um recurso conhecido como _`Jupyter Notebook`_ (caderno Jupyter) para apresentação e execução de textos, imagens, tabelas e códigos exemplos desse material didático. O projeto **Jupyter** visa fornecer um conjunto consistente de ferramentas para fluxos de trabalho de computação interativa em várias linguagens de programação. Os projetos Jupyter já são bem populares em todas as etapas de um projeto de pesquisa, desde a fase de exploração até a comunicação de resultados e ensino. # # <img src="img/jupyter.png" width=300> # # > O principal projeto Jupyter, o **Notebook**, e sua versão modernizada, o **JupyterLab** são aplicativos da web que permitem a criação de documentos, incluindo texto, código executável e visualizações interativas.<p></p> # > O _kernel_ é a parte do _backend_ responsável pela execução do código escrito pelo usuário no aplicativo da web. Por exemplo, no caso de um Jupyter Notebook, a execução do código normalmente é feita pelo **ipykernel**, a implementação de referência. # # Os __cadernos Jupyter__ são compostos de células de três tipos possíveis: Markdown (linguagem de marcação), Código interpretável (geralmente em Python, mas pode ser em outras tantas linguagens) e Crua (_Raw_, conteúdo da célula não é traduzido numa conversão via NBConvert). As células de código são identificadas por um _prompt_ que exibe a sequência de execução dos códigos entre colchetes, enquanto que as demais não possuem _prompt_. As células Markdown são convertidas em _rich text_ no __modo comando__ (bordas azuis quando a célula está selecionada) e as células cruas não sofrem alteração naquele modo... O __modo de edição__ (bordas verdes quando a célula está selecionada) permite ao usuário criar ou alterar o conteúdo das células. # # <img src="img/modos.png" width=500 height=400> # Se você estiver interessado em seguir um tutorial de qualidade sobre a **linguagem Markdown** então clique [aqui](http://www.markdowntutorial.com). # ### Usando o Caderno Jupyter: # # A célula seguinte é uma célula de código Python (veja o kernel selecionado, no canto superior direito dessa página, que executará os comandos desse caderno): 2 + 22 50 - 5*6 (50 - 5*6)/2 # + print(8/5) # divisão com operandos inteiros, retorna resultado inteiro (vs.2.7) print(8./5) # divisão com pelo menos um operando real, retorna resultado real (vs.2.7) # - 5**2 # 5 ao quadrado # Mostrando uma equação matemática numa célula de código from IPython.display import display, Math display(Math(r'\sqrt{a^2 + b^2}')) # Assim como na maioria das outras linguagens de programação, o sinal de igualdade '=' também é usado no Python para atribuir um valor a uma variável. Depois de uma atribuição nenhum resultado é exibido automaticamente: a = 10. b = 5 * 9 area = a * b print(area,'m2') print(type(a), type(b), type(area)) b = "IFG - Mai/19 " print(b) print(b + b) print(b * 4) # Por ser uma linguagem de tipificação dinâmica de variáveis, no Python não existe declaração de variáveis, o interpretador assume o tipo de dado mais adequado ao se fazer uma atribuição à variável. Além disso, o tipo da variável pode mudar, no sentido que em determinado instante ela pode ter um valor de um certo tipo, e noutro instante, ela pode ter outro valor de um diferente tipo do anterior: como visto para a var. <tt>b</tt> nos dois trechos de códigos anteriores. # # Se uma variável não estiver **"definida"** (atribuída a algum valor), ao se tentar usá-la um erro será reportado pelo interpretador. # # Exemplo: n # tentativa de acessar uma variável não definida # Na versão 2.7 a saída de dados é feita pelo comando <tt>print</tt>, enquanto que na versão 3.4, pela função <tt>print()</tt>. print("Oi Pessoal!","\nTudo bem?",40,22) # #### Palavras Chaves - Python 2.7 # <img src="img/palavraschave.png" width=400 height=150><center>Fonte: https://www.programiz.com/python-programming/keyword-list</center> # #### Palavras Chaves - Python 3.7 # <img src="img/palavraschave3.7.png"><center>Fonte: https://www.turbopython.com/2019/06/keywords-and-identifiers-in-python.html</center> # # Além de `False, None` e `True`, todas as outras 32 palavras-chave são escritas em minúsculas. Você deve usá-las como definidas, porque o **Python** faz distinção entre maiúsculas e minúsculas (_case-sensitive_). # # Para obter a lista de palavras-chave no Python, siga os seguintes passos: # 1. Abra um interpretador Python (_Python Shell_). # 2. Importe o módulo de palavra-chave usando `import keyword`. # 3. Mostre a lista de palavras-chave usando `keyword.kwlist`. # ou # 4. Execute o comando `help("keywords")`. # # Palavras-chave acrescentadas pela versão 3.7 em relação à versão 3.6: `async` e `await`. # # Se precisar de ajuda sobre as novas palavras-chave da versão 3.7 ou qual outra palavra-chave, digite: <p>`help("palavra-chave de interesse")`. help("async") help("await") from keyword import kwlist # pacote de palavras-chave (keyword) print kwlist # keywords list Vs. 2.7.9 len(kwlist) import keyword # pacote de palavras-chave (keyword) print(keyword.kwlist) # keywords list Vs. 3.7.2 len(keyword.kwlist) # <hr> # # ### Preparação do Ambiente # Para rodar cadernos Jupyter (*Jupyter Notebooks*) é necessário ter instalado o interpretador Python, pois embora o **Jupyter** execute códigos em diversas linguagens de programação, o Python é pré-requisito (Python 3.3 ou superior, ou Python 2.7) para instalar o próprio **Jupyter Notebook**. # # ### 1. Instalando o Interpretador Python Oficial # Você pode instalar o interpretador Python OFICIAL em “qualquer” Sistema Operacional..., tais como Windows, Mac OS X, Linux/Unix, Solaris e outros. Para fazer a instalação do Python em seu Sistema Operacional, siga os seguintes passos: # 1. Acesse www.python.org/downloads/ para baixar o interpretador. # # <img src="img/winp1.png"> # # ![image.png](img/winp2.png) # # ![image.png](img/winp3.png) # # 2. Este é o site oficial da linguagem Python. A página web detectará o sistema operacional instalado no seu computador, e recomendará a versão adequada a ser baixada. Como estou usando o Windows-64 no meu notebook, foram dadas as opções de download para Python-2 e Python-3 para Windows. Neste curso usaremos a versão 3.7 da ling. Python, portanto recomendo que você baixe a versão mais recente (à época da escrita desse texto era a versão Python 3.7.2 - ver figura anterior). # 3. As etapas de instalação são bem simples. Você só precisa escolher o diretório para instalação e clicar para avançar nas próximas etapas: botão \[Next >\]. # ![image.png](img/winp4.png) # ### 2. Instalando o Jupyter via Anaconda # É recomendável instalar o Python e o [Jupyter](https://jupyter.org/install) usando o **Anaconda Distribution**, que inclui o Python, o Jupyter Notebook e outros pacotes comumente usados na computação científica e ciência de dados. # 1. Baixe o Anaconda. Recomendamos o _download_ da versão mais recente do Python 3 do Anaconda. # 2. Instale a versão do Anaconda descarregada, seguindo as instruções... # Pronto, o Jupyter Notebook já está instalado! Para executar um _notebook_ digite o seguinte comando no terminal (Mac/Linux) ou no _prompt_ de comando (Windows): # # `jupyter notebook` # # ### 3. Instalando o Jupyter com pip # Como usuário experiente em Python, você pode instalar o Jupyter usando o gerenciador de pacotes do Python, `pip`, em vez do Anaconda. Se você tem o Python 3 instalado (recomendado): # # `python3 -m pip install --upgrade pip # python3 -m pip install jupyter` # # Se você tem o Python 2 instalado: # # `python -m pip install --upgrade pip # python -m pip install jupyter` # # ### 4. "Instalando" o Jupyter via WinPython # O WinPython é uma distribuição __portátil__ de código aberto da linguagem de programação Python para Windows XP/7/8, projetada para cientistas, suportando versões de 32 e 64 bits do Python 2 e Python 3. Desde setembro de 2014, o desenvolimento do projeto mudou para https://winpython.github.io/ # # Características do WinPython: # - Projetado para usuários científicos regulares: processamento de dados interativo e visualização usando Python com Spyder # - Projetado para usuários científicos avançados e desenvolvedores de software: desenvolvimento de aplicativos Python com Spyder, controle de versão com o Mercurial e outras ferramentas de desenvolvimento (como gettext, etc.) # - Portátil: pré-configurado, ele deve rodar sem problemas em qualquer máquina Windows (sem qualquer requisito) e a pasta que contém o WinPython pode ser movida para qualquer dispositivo (HD local, rede ou unidade removível) com a maioria das configurações do aplicativo # - Flexível: pode-se usar quantas versões do WinPython forem necessárias (como ambientes isolados e auto-consistentes), mesmo que elas executem versões diferentes do Python (2.7, 3.x) ou arquiteturas diferentes (32 ou 64 bits) na mesma máquina # - Personalizável: O gerenciador de pacotes integrado (wppm - WinPython Package Manager) ajuda a instalar, desinstalar ou atualizar pacotes Python. Como o WPPM pode não suportar alguns pacotes, também é possível instalar ou atualizar pacotes usando "easy_install" ou "pip" no prompt de comando do WinPython. Um arquivo de configuração permite definir variáveis de ambiente em tempo de execução. # # #### Passos: # - Download: acesse https://sourceforge.net/projects/winpython/ e clique o botão verde <Download>. Aguarde a finalização do download da versão mais recente do WinPython... # - Descompactação: clique sobre o arquivo baixado e escolha o diretório em que será descompactado o arquivo-bolha do WinPython, e aguarde o final... e clique no botão <Finish>. # - Execução: clique no ícone do programa Jupyter Notebook.exe para abrir um caderno, ou clique no ícone do programa Spyder.exe para abrir o IDE. # # |<img alt="ícone spyder" src="img/spyder.png" width=200> | ;) |<img alt="ícone notebook jupyter" src="img/jupyter_notebook.png" width=200>| # | :-: | :-: | :-: | # %lsmagic pip # ## Mais Núcleos... # O Jupyter Notebook pode ter suas células de código (atalho de teclado para conversão: Y) executadas por diversas linguagens, tais como Python2, Python3, Octave etc... # # Supondo que vc instalou o Jupyter Notebook com o **WinPython** provavelmente o Python3 deve ter sido instalado automaticamente. Se vc quiser instalar o interpretador Python2 faça: # 1. Baixe a última versão 2.7 do Python no site oficial (www.python.org) - à epóca da elaboração desse documento, a última versão era a [2.7.16](https://www.python.org/downloads/release/python-2716/), de 04/03/2019. Escolha o arquivo de instalação específico para o Sistema Operacional da sua máquina. Para o MS-Windows baixe o arquivo `Windows x86-64 MSI installer`. # 2. Instale o arquivo descarregado. # 3. Acesse o diretório `Scripts` da versão instalada por vc... (Ex.: `E:\python\python-2.7.16\Scripts`) e execute o seguinte comando: `pip install ipykernel`. # 4. Acesse o diretório `Lib/Site-packages` da versão instalada por vc... (Ex.: `E:\python\python-2.7.16\Lib\site-packages` e execute o seguinte comando: `..\..\python ipykernel install --user`. O resultado deve ser: # `Installed kernelspec python2 in C:\Users\usuario\AppData\Roaming\jupyter\kernels\python2`. # # Supondo que vc instalou o Jupyter Notebook com o **Anaconda** provavelmente o Python3 deve ter sido instalado automaticamente. Se vc quiser instalar o interpretador Python2 faça: # 1. Baixe a última versão 2.7 do Python no site oficial (www.python.org) - à epóca da elaboração desse documento, a última versão era a [2.7.16](https://www.python.org/downloads/release/python-2716/), de 04/03/2019. Escolha o arquivo de instalação específico para o Sistema Operacional da sua máquina. Para o MS-Windows baixe o arquivo `Windows x86-64 MSI installer`. # 2. Instale o arquivo descarregado. # 3. Execute os seguintes comandos: # conda create -n ipykernel_py2 python=2 ipykernel # source activate ipykernel_py2 # no Windows, remova a palavra 'source' # python -m ipykernel install --user # ## Linguagem Markdown para Cadernos Jupyter # # #### <font color=red>Cabeçalhos</font> # Use caracteres '#' seguidos por espaço em branco para criar títulos de cadernos e cabeçalhos de seção. Exemplos: # + active="" # # <font color=green>Títulos # ## <font color=green>Cabeçalho de Seção # ### <font color=green>Subcabeçalhos # #### <font color=green>Subtítulos de nível</font> # - # # <font color=green>Títulos # ## <font color=green>Cabeçalho de Seção # ### <font color=green>Subcabeçalhos # #### <font color=green>Subtítulos de nível</font> # # #### <font color=red>Ênfases em Texto</font> # Para __negrito__, use este código: \_\_string\_\_ ou \*\*string\*\* # Para _Itálico_, use este código: \_string\_ ou \*string\* # Para ~~tachado~~, use este código: \~\~string\~\~ # Para <tt>fonte monoespaçada</tt>, use a tag: \< tt\>string\< \/tt\> # # #### <font color=red>Símbolos matemáticos</font> # + active="" # Use este código: $ símbolos\;matemáticos: \theta \sqrt2 $ # - # $ símbolos\;matemáticos: \theta \sqrt2 $ # `Fonte monoespaçada e cor de fundo diferente`, coloque o texto entre crases: \`string\` # Use monoespaçamento para o caminho do arquivo, nomes de arquivos, entradas de texto do usuário e comandos/funções. # #### <font color=red>Quebras de linha</font> # Às vezes, a marcação não faz quebras de linha quando você as deseja. Use 2 espaços ou a tag \<br\> para uma quebra de linha manual. # # #### <font color=red>Escape para os seguintes caracteres com a barra invertida</font>: # |Caracter| Descrição| <tt>Sequência de Caracteres</tt> | # |:---:|:---:|:---| # |\\| contra barra (barra inversa)| <tt>\\\\</tt> | # |\`| crase | <tt>\\\`</tt>| # |\*| asterisco | \\\* | # |\_ | sublinhado | \\_ | # |\{ \} | chaves | \\{ \\} | # |\[ \] | colchetes | \\[ \\] # |\( \) | parênteses | \\( \\) # |\# | sinal hash | \\# # |\+ | sinal mais | \\+ # |\- | sinal menos (hífen) | \\- # |\. | ponto | \\. # |\! | sinal de exclamação | \\! # | $ | cifrão | \\ \$ # #### <font color=red>Cores</font> # + active="" # Use este código: <font color=blue|red|green|pink|yellow> Texto </font> # - # Nem todo código _markdown_ funciona dentro de uma _tag_ de fonte, portanto, revise seu texto colorido com cuidado! # # #### <font color=red>Recuo para citação</font> # Use um sinal maior que (\>) e, em seguida, um espaço e digite o texto. O texto é recuado e tem uma linha horizontal cinza à esquerda até o retorno do próximo carro. # + active="" # > Exemplo de recuo: # - # > Exemplo de recuo # #### <font color=red>Marcadores</font> # Use o sinal de menos (-) com um espaço depois, ou um espaço, um traço e um espaço, para criar um marcador circular. Para criar um sub-marcador, use uma tabulação seguida de um traço e um espaço. Você também pode usar um asterisco em vez de um traço. # + active="" # - Marcador # - Sub-marcador # - # - Marcador # - Sub-marcador # #### <font color=red>Listas Numeradas</font> # Comece com 1. seguido de um espaço e, em seguida, começa a numeração automática para você. Comece cada linha com algum número e um período, depois um espaço. Use tabulação para aumentar o recuo e obter subnumeração. # + active="" # 1. Numeração # 1. Sub-numeração # 2. Outra # 4. Mais uma # - # 1. Numeração # 1. Sub-numeração # 2. Outra # 4. Mais uma # #### <font color=red>Imagem</font> # Você pode anexar arquivos de imagem diretamente a um notebook somente em células Markdown: arraste e solte suas imagens na célula do Markdown para anexá-lo ao notebook. Para adicionar imagens a outros tipos de células, você pode usar apenas gráficos hospedados na web. Você não pode adicionar legendas para gráficos no momento. # Use este código: # + active="" # <img src="img/logo_mkdw.png" alt="Texto alternativo que descreve a imagem" width=150 title="título"> # - # <img src = "img/logo_mkdw.png" alt = "Texto alternativo que descreve a imagem" title = "título" width=150 /> # + active="" # #### Outra forma: # - Estilo em linha: # # ![texto alternativo](https://github.com/adam-p/markdown-here/raw/master/src/common/images/icon48.png "Texto do título do logotipo 1") # # - Estilo de referência: # # ![texto alternativo][logo] # # [logo]: https://github.com/adam-p/markdown-here/raw/master/src/common/images/icon48.png "Texto do título do logotipo 2" # - # #### Outra forma: # - Estilo em linha: # # ![texto alternativo](https://github.com/adam-p/markdown-here/raw/master/src/common/images/icon48.png "Texto do título do logotipo 1") # # - Estilo de referência: # # ![texto alternativo][logo] # # [logo]: https://github.com/adam-p/markdown-here/raw/master/src/common/images/icon48.png "Texto do título do logotipo 2" # #### <font color=red>Linha Horizontal</font> # Três alternativas: # 1. Use três asteriscos: *** # 2. Use \< hr \>. # 3. Use três hífens: --- # --- # #### <font color=red>Link Interno</font> # + active="" # Para vincular a uma seção, use este código: [título da seção](#link-secao) # - # Para o texto entre parênteses, substitua espaços e caracteres especiais por hífen. Certifique-se de testar todos os links! Alternativamente, você pode adicionar um ID para uma seção logo acima do título da seção. # + active="" # Use este código: <a id="ID_secao"> </a> # - # Certifique-se de que ID_secao seja exclusivo no notebook. # Use este código para o link e certifique-se de testar todos os links! # + active="" # [título da seção](#ID_secao) # - # [título da seção](#ID_secao) # #### <font color=red>Link Externo</font> # + active="" # Use este código e teste todos os links! [Texto do link](http://url) # - # [Texto do link](https://medium.com/ibm-data-science-experience/markdown-for-jupyter-notebooks-cheatsheet-386c05aeebed) # #### <font color=red>Outras formas de Link</font> # # [Sou um link no estilo inline](https://www.google.com) # # [Sou um link no estilo referência](https://www.mozilla.org) # # [Você pode usar números para definições de links no estilo referência][1] # Ou deixe em branco e use o [texto do próprio link] # # URLs e URLs em colchetes serão automaticamente transformados em links. # http://www.example.com ou <http://www.example.com> e às vezes example.com (mas não no Github, por exemplo). # # Algum texto para mostrar que os links de referência podem aparecer mais tarde. # [texto de referência arbitrário, sem distinção entre maiúsculas e minúsculas]: https://www.mozilla.org # # [1] : http://slashdot.org # # [texto do próprio link]: http://www.reddit.com # #### <font color=red>Código e Realce de Sintaxe</font> # Os blocos de código fazem parte da especificação do Markdown, mas o realce de sintaxe não. No entanto, muitos renderizadores - como o Github e o Markdown Here - suportam realce de sintaxe. O Markdown Here suporta o destaque para dezenas de linguagens (e até de descrições não-linguagens, como diffs e cabeçalhos HTTP); para ver a lista completa e como escrever os nomes dos idiomas, veja a página de demonstração do [highlight.js](http://softwaremaniacs.org/media/soft/highlight/test.html). # # <tt>Código em linha (inline) tem crases (três) na abertura e outras (tres) no fechamento.</tt> # # Blocos de código são delimitados por linhas com três marcações, ou são recuadas com quatro espaços. Recomenda-se usar somente os blocos de códigos delimitados - eles são mais fáceis e somente eles suportam realce de sintaxe. # # Exemplos: # + active="" # ```javascript # var s = "JavaScript syntax highlighting"; # alert(s); # ``` # # ```python # s = "Python syntax highlighting" # print s # ``` # # ``` # Nenhuma linguagem foi indicada, então nenhum realce de sintaxe acontecerá. # Mas vamos colocar uma <b>tag</b>. # ``` # - # ```javascript # var s = "JavaScript syntax highlighting"; # alert(s); # ``` # # ```python # s = "Python syntax highlighting" # print s # ``` # # ``` # Nenhuma linguagem foi indicada, então nenhum realce de sintaxe acontecerá. # Mas vamos colocar uma <b>tag</b>. # ``` # + language="html" # <!-- Configuração para Tabelas --> # <style> # table td, table th, table tr {text-align:left !important; float:center} # </style> # - # #### <font color=red>Tabelas</font> # As tabelas não fazem parte da especificação principal do Markdown, mas fazem parte do __GFM__ (_Github Flavored Markdown_) e o _Markdown Here_. Eles são uma maneira fácil de adicionar tabelas ao seu e-mail - uma tarefa que, de outra forma, exigiria copiar e colar de outro aplicativo. # # Dois pontos ':' podem ser usados para alinhar colunas. # # | Tabelas | são | legais | # |---------|:---:| ------: | # | col 3 é | alinhada à direita | US$ 1600 | # | col 2 é | centrada | US\$ 12 | # | col 1 é | alinhada à esquerda | US\$ 1 | # # As traves externas (|) são opcionais e você não precisa deixar a linha bem alinhada. Você também pode usar o Markdown em linha. # # Markdown | Menos | Bonita # --- | --- | --- # * Ainda * | `renders` | **agradável** # 1 | 2 | 3 # #### <font color=red>Fórmulas Matemáticas TeX</font> # Uma descrição completa dos símbolos matemáticos do TeX está além do escopo deste notebook. Aqui está uma boa [referência](https://en.wikibooks.org/wiki/LaTeX/Mathematics), e você pode experimentar no [CodeCogs](https://www.codecogs.com/latex/eqneditor.php). Aqui estão alguns exemplos para experimentar: # + active="" # $-b \pm \sqrt{b^2 - 4ac} \over 2a$ # $x = a_0 + \frac{1}{a_1 + \frac{1}{a_2 + \frac{1}{a_3 + a_4}}}$ # $\forall x \in X, \quad \exists y \leq \epsilon$ # - # $-b \pm \sqrt{b^2 - 4ac} \over 2a$ # $x = a_0 + \frac{1}{a_1 + \frac{1}{a_2 + \frac{1}{a_3 + a_4}}}$ # $\forall x \in X, \quad \exists y \leq \epsilon$ # Os sinais de cifrão inicial e final ($) são os delimitadores da marcação TeX. # Fontes: # 1. https://www.datacamp.com/community/tutorials/tutorial-jupyter-notebook##UseJupyter # 2. https://www.turbopython.com/2019/06/keywords-and-identifiers-in-python.html # 3. https://ipython.readthedocs.io/en/latest/install/kernel_install.html#kernels-for-python-2-and-3 # 4. https://medium.com/ibm-data-science-experience/markdown-for-jupyter-notebooks-cheatsheet-386c05aeebed
notebooks/0_Intr/Introducao_ao_Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # this pip install is required to extract data from .env file # !pip install python-dotenv from dotenv import load_dotenv,find_dotenv # The function will walk down the directory to find the location of dotenv file dotenv_path=find_dotenv() # load the entries as environment variable load_dotenv(dotenv_path) # extracting environment variable using os.environ.get import os KAGGLE_USERNAME = os.environ.get("KAGGLE_USERNAME") KAGGLE_PASSWORD = os.environ.get("KAGGLE_PASSWORD") print(KAGGLE_USERNAME) print(KAGGLE_PASSWORD) print(os.environ.get("key")) import requests from requests import session import os from dotenv import load_dotenv,find_dotenv # + # payload creation ## This method did not work '''payload = { 'action': 'login', 'username': os.environ.get("username"), 'password': os.environ.get("key") } payload = { 'action':'login', 'username': os.environ.get("KAGGLE_USERNAME"), 'password': os.environ.get("<PASSWORD>") } ''' # url for the data url = 'https://www.kaggle.com/c/titanic/download/train.csv' ## you don't need to use requests.session as session was imported seperately with session() as c: # post request r=c.post('https://www.kaggle.com/account/login',data=payload) print(r) # get request #response = c.get(url) # print response text #print(response.text) # - # !pip install kaggle import os from dotenv import load_dotenv,find_dotenv os.environ['KAGGLE_USERNAME'] = os.environ.get("username") os.environ['KAGGLE_KEY'] = os.environ.get("key") # !kaggle competitions download titanic -p "/DSA_Role/PluralSight/DataScience/python-data-science/module2/titanic/data/raw" # !kaggle competitions list
notebooks/Extracting Titanic Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### CIS 9 ### # ### Numpy, Basic Statistics ### # Reading: # <br>- Python Data Science Handbook: Ch 2 except Fancy Indexing, Structured Arrays sections # <br>- Think Stats: Ch 1; Ch 2 up to and including Variance; Ch 3: Percentiles # To use numpy, we first need to import the `numpy` module: import numpy as np # **Initializing an array, size of an array** # + # 1. 1D array from list oneD = np.array([1, 2, 3, 4]) print(oneD) # print the size of the array? print() # + # 2. 2D array from list of lists twoD = np.array([[1,2,3],[4,5.1,6],[7,8,9],[10,11,12]]) print(twoD) # print the size of the array? print() # A 2D or higher dimension array must have the same number of elements across each dimension. # Example: for a 2D array, all rows must have the same number of elements and # all columns must have the same number of elements # + # 3. array of int zeros zeros = np.zeros(8, dtype=int) print(zeros) # what data type does the array store? # how to have the array store numpy's int? # + # 4. array of zeros floatZeros = np.zeros((2,2)) print(floatZeros) # what's the default data type for numpy? # - # 5. array of ones ones = np.ones(3) print(ones) # + # 6. array of same values filled = np.full((2,3),-2) print(filled) # what is (2,3)? # + # 7. array of integers, in a range countingNums = np.arange(1,11) print(countingNums) every2 = np.arange(10,0,-2) print(every2) # are the upper and lower limits of np.arange() the same as Python's range()? # - # 8. array of float random numbers, always between 0 and 1 randNums = np.random.random((3,2)) print(randNums) # + # 9. array of 80 int random numbers from 10 to 19 intRandNums = np.random.randint(10, 20, 80) print(intRandNums) print(intRandNums.shape, '\n') # create a 3 rows x 4 cols array of random integers from 10 to 19? # print the array and the shape? # + # 10. array from csv file import csv import random with open("sample.csv", "w", newline='') as f : writer = csv.writer(f) for i in range(3) : writer.writerow([random.randint(1,11) for i in range(4)]) data = np.loadtxt("sample.csv", delimiter=",") print(data.shape) print(data,'\n') data = np.loadtxt("sample.csv", delimiter=",", dtype=np.int8) print(data.shape) print(data, '\n') with open("sample.txt", "w") as f : f.write("one two three") data = np.genfromtxt("sample.txt", dtype=str) print(data) with open("sample.txt", "w") as f : f.write("1,2,3") # read sample.txt into a numpy array with 3 integer elements and print the array? # genfromtxt will result in a 1D array # - # **Array indexing** # 11. numeric indexing arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) print(arr, '\n') print(arr[2], '\n') print(arr[2,3], '\n') print(arr[-1], '\n') print(arr[-2,-2]) # + # 12. slice indexing arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) print(arr, '\n') print(arr[:2], '\n') print(arr[1:,1:3], '\n') # we can also mix integer indexing with slice indexing, # however, this will yield an array of lower rank than the original array print(arr[-1,:3], '\n') # 1D, lower rank than original arr print(arr[:-2,1:-1]) # 2D, same rank as arr # + # 13. Each time we create a slice of an array we get a view into the same array. # We're not creating a new array, so modifying it will modify the original array. arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) print(arr) view = arr[:2,1:3] print(view) view[0,0] = 100 print(view) print(arr, '\n') # to actually make a copy, use the copy() method: copy = arr[:2,1:3].copy() copy[0,0] = -1 print(copy) print(arr) # copying takes up memory so using a view is preferable if: # a) data is for analysis only (no modification needed) # or b) if data need to be changed but the original array must remain the same # - # 14. index with an array arr = np.array([1,12,3,4,8,5]) print(arr) print(arr[[0,2,5]]) index = [0,1,2] print(arr[index]) # + # 15. boolean indexing arr = np.array([[1,12,3,4], [15,6,7,10], [2,20,8,1]]) print(arr, '\n') print(arr[arr<10], '\n') print(arr[arr%2==1], '\n') # describe what the last print statement will print for any general array? # - # **Changing array shape** # + # 16. change the shape of an array, as long as the new shape has the same number of elements arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) # 12 elements newArr1 = arr.reshape((6,2)) newArr2 = arr.reshape((12,)) print(arr, '\n') print(newArr1,'\n') print(newArr2) # will the following will work? why or why not? # newArr3 = arr.reshape((1,)) # newArr4 = arr.reshape((2,5)) # - # 17. transpose a 2D array (matrix) arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) print(arr) print(arr.T, '\n') arr = np.array([[1,2,3]]) print(arr) print(arr.T) # 18. reverse a 1D array: arr = np.array([1,2,3,4,5]) print(arr) print(arr[::-1]) # **Array math** # <br>Take advantage of numpy's ufuncs below and do not loop. Do the reading so you can see the speed difference between unfuncs and loops. # + # 19. basic math functions operate element-wise on arrays, and are available both as operator overloads # and as functions in the numpy module: x = np.array([[1,2],[3,4]], dtype=np.float) y = np.array([[5,6],[7,8]], dtype=np.float) print(x + y) print(np.add(x, y)) # - # 20. other arithmetic operations between 2 arrays print(x - y) print(x * y) print(x / y) print(x // y) print(x % y) # 21. arithmetic operations between an array and a scalar x = np.array([[1,2],[3,4]], dtype=int) print(x - 1) print(x * 2) print(x / 3) print(x // 4) print(x % 3) print(x ** 2) # + # 22. square root print(np.sqrt(x), '\n') # absolute value print(np.abs([-1,2]), '\n') # - # **Aggregate functions:** # + # 23. Math #sum arr = np.array([[1,2],[3,4]]) print(np.sum(x)) print(np.sum(x, axis=0)) print(np.sum(x, axis=1), '\n') # min print(arr.min()) print(arr.min(0)) print(arr.min(1), '\n') # describe what the 3 statements above print? # don't copy the output, explain what the output is # max print(arr.max()) print(arr.max(axis=0)) print(arr.max(1), '\n') # + # 24. Statistics arr = np.array([[1,4,-3,2], [7,-1,3,8]]) # mean: central tendency print(arr.mean()) print(arr.mean(axis=0)) print(arr.mean(1), '\n') # variance: spread # standard devision: spread from the mean print(arr.std()) print(arr.std(axis=0)) print(arr.std(1), '\n') # median: mid-point print(np.median(arr)) print(np.median(arr, axis=0)) print(np.median(arr,1), '\n') # percentile rank: percentage of values that are less than or equal to a given value # percentile: value with a given percentile rank print(np.percentile(arr,75)) print(np.percentile(arr,25)) # - # **Broadcasting** # + # 25. broadcasting or extending an array happens during computation between 2 arrays of different sizes, # as long as the 2 arrays have specific dimensions that can be matched arr = np.array([[1,2,3], [4,5,6]]) print(arr + 2) # the 2 is broadcasted to: [ [2,2,2], [2,2,2]] so it can be added to arr # + arr1 = np.array([[1,2,3]]) arr2 = np.array([[1], [2], [3]]) print(arr1.shape, arr2.shape) # (1,3) (3,1) print(arr1 + arr2, '\n') # [[1,2,3], [[1,1,1], # [1,2,3], + [2,2,2], # [1,2,3]] [3,3,3]] arr3 = np.array([1,2]) print(arr2.shape, arr3.shape) # (3,1) (2,) print(arr2 + arr3, '\n') # [[1,1], [[1,2], # [2,2], + [1,2], # [3,3]] [1,2]] print(arr1.shape, arr3.shape) # (1,3) (2,) print(arr1 + arr3[:,np.newaxis], '\n') # [[1,2,3]] + [[1], => [[1,2,3], [[1,1], # [2]] [1,2,3]] + [2,2]] arr4 = np.array([[1,2],[-1,-2]]) print(arr1.shape, arr4.shape) # (1,3) (2,2) print(arr1[:,np.newaxis,:] + arr4[:,:,np.newaxis], '\n') # (1,3) => (1,1,3) (2,2) => (2,2,1) # or: print(arr1[...,np.newaxis,:] + arr4[...,np.newaxis]) # - # **Boolean operations** # + # 26. checking data in an array arr = np.array([[1,2,-2],[-3,1,0]]) print(arr<0) print(np.sum(arr<0)) print(np.sum(arr<0,axis=0), '\n') # describe the output the last 2 print statements above? # don't just copy the output for your answer print(np.any(arr<0)) print(np.all(arr<0)) print(np.all(arr<0,axis=1)) # - # **Sorting** # + # 27. sort values in the array arr = np.array([5,-2,0,2,-1,-2,4]) print(np.sort(arr),'\n') arr = np.array([[2,0,-1],[1,8,3],[7,1,0]]) print(np.sort(arr), '\n') print(np.sort(arr, axis=0), '\n') print(np.sort(arr, axis=1), '\n') # which axis is the default when no axis is specified? # - # **Get index values** # + # 18. get the index of the sorted values arr = np.array([5,-2,0,2,-1,-2,4]) print(np.argsort(arr)) ind = np.argsort(arr) print(arr[ind],'\n') arr = np.array([[2,0,-1],[1,8,3],[7,1,0]]) print(np.argsort(arr), '\n') ind = np.argsort(arr) # print the smallest value of each row by using arr and ind? print("smallest value of each row:", ) # + # 29. get the indices that match a boolean condition arr = np.array([5,-2,0,2,-1,-2,4, -3,1]) print(np.where(arr>0)) ind = np.where(arr>0) # print the positive values in arr by using ind? print("positive values:", ) # -
CIS009/1_Data_Analysis_with_numpy/2-Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df_general= pd.read_csv('../data_wedev/data_useful/linkedin.csv') df_general.sample(5) df_general.isnull().sum() df_general.shape df_general['profile_picture'].fillna(method="ffill", inplace= True) df_general.isnull().sum() df_general.drop(['index','linkedin','Name','skills'],axis=1, inplace=True) df_general.isnull().sum() df_general.sample(5) df_general.drop(['Experience'],axis=1, inplace=True) df_general.fillna(method="bfill", inplace = True) df_general.isnull().sum() df_general.sample(5) general_data= df_general general_data.drop(['location','position','clean_skills'], axis=1, inplace=True) general_data.sample(5) general_data.shape general_data.describe(include='all') general_data['category'].unique() df_loc= pd.read_csv('../data_wedev/data_useful/loc.csv') df_loc2= pd.read_csv('../data_wedev/data_useful/loc2.csv') df_loc1=pd.concat([df_loc, df_loc2]) df_loc1.sample(5) df_loc1.isnull().sum() general_data.sample(5) general_data.to_csv(index=False, path_or_buf = '../data_wedev/general_data.csv') general_data.shape df_loc1.shape
data_notebooks/general_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Configuration # _Initial steps to get the notebook ready to play nice with our repository. Do not delete this section._ # Code formatting with [black](https://pypi.org/project/nb-black/). # %load_ext lab_black import os import pathlib this_dir = pathlib.Path(os.path.abspath("")) data_dir = this_dir / "data" import pytz import glob import requests import pandas as pd import json from datetime import datetime # ## Download # Retrieve the page url = "https://services.arcgis.com/rQj5FcfuWPllzwY8/arcgis/rest/services/Mono_Regional_Case_Results/FeatureServer/0//query?where=1%3D1&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&resultType=none&distance=0.0&units=esriSRUnit_Meter&returnGeodetic=false&outFields=Name%2CConfirmed%2CDeaths&returnGeometry=true&returnCentroid=false&featureEncoding=esriDefault&multipatchOption=xyFootprint&maxAllowableOffset=&geometryPrecision=&outSR=&datumTransformation=&applyVCSProjection=false&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnExtentOnly=false&returnQueryGeometry=false&returnDistinctValues=false&cacheHint=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&having=&resultOffset=&resultRecordCount=&returnZ=false&returnM=false&returnExceededLimitFeatures=true&quantizationParameters=&sqlFormat=none&f=pjson&token=" r = requests.get(url) data = r.json() # ## Parse dict_list = [] for item in data["features"]: d = dict( county="Mono", area=item["attributes"]["Name"], confirmed_cases=item["attributes"]["Confirmed"], ) dict_list.append(d) df = pd.DataFrame(dict_list) # Get timestamp date_url = "https://services.arcgis.com/rQj5FcfuWPllzwY8/arcgis/rest/services/Mono_Regional_Case_Results/FeatureServer/0?f=json" date_r = requests.get(date_url) date_data = date_r.json() timestamp = date_data["editingInfo"]["lastEditDate"] timestamp = datetime.fromtimestamp((timestamp / 1000)) latest_date = pd.to_datetime(timestamp).date() df["county_date"] = latest_date exclude = [ "Mono County", "All Regions", "Outside Mono County", "Marine Warfare Training Center", ] df = df[~df["area"].isin(exclude)] # Drop Bishop until it reaches 10 cases, then alert df = df[~(df["confirmed_cases"] <= 10) & ~(df["area"] == "Bishop")] # ## Vet try: assert not len(df) > 3 except AssertionError: raise AssertionError( f"Mono County's scraper has extra {len(export_df) - 9} rows: {list(export_df.area)}" ) try: assert not len(df) < 3 except AssertionError: raise AssertionError( f"Mono County's scraper is missing {9 - len(export_df)} rows: {list(export_df.area)}" ) # ## Export # Set date tz = pytz.timezone("America/Los_Angeles") today = datetime.now(tz).date() slug = "mono" df.to_csv(data_dir / slug / f"{today}.csv", index=False) # ## Combine csv_list = [ i for i in glob.glob(str(data_dir / slug / "*.csv")) if not str(i).endswith("timeseries.csv") ] df_list = [] for csv in csv_list: if "manual" in csv: df = pd.read_csv(csv, parse_dates=["date"]) else: file_date = csv.split("/")[-1].replace(".csv", "") df = pd.read_csv(csv, parse_dates=["county_date"]) df["date"] = file_date df_list.append(df) df = pd.concat(df_list).sort_values(["date", "area"]) df.to_csv(data_dir / slug / "timeseries.csv", index=False)
places/mono.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} tags=[] # # Accessing Hydrology and Climatology database using web services through Python # # **<NAME>** # # **ESIP IT&I 2021** # # <img src="https://raw.githubusercontent.com/cheginit/pangeo_showcase21/main/notebooks/logos.png" width="300"/> # + slideshow={"slide_type": "slide"} tags=[] from IPython.display import IFrame IFrame(src="https://hyriver.readthedocs.io/en/latest", width=950, height=600) # + [markdown] slideshow={"slide_type": "slide"} tags=[] # ## Showcase # # 1. Geometry data for plots: US states and coastlines # 2. Tidal and Estuary USGS Stations # 3. NHDPlus attributes for all stations # 4. Accumulated Dams at catchment-scale for Columbia # + [markdown] slideshow={"slide_type": "slide"} tags=[] # ### US States and Coastlines # # **Census TIGER (Topologically Integrated Geographic Encoding and Referencing database)** # + import warnings from pathlib import Path warnings.filterwarnings("ignore", message=".*initial implementation of Parquet.*") root = Path("..", "data") root.mkdir(parents=True, exist_ok=True) BASE_PLOT = {"facecolor": "k", "edgecolor": "b", "alpha": 0.2, "figsize": (18, 9)} CRS = "esri:102008" # + slideshow={"slide_type": "slide"} tags=[] import geopandas as gpd from shapely.geometry import box conus_bounds = box(-125, 24, -65, 50) cfile = Path(root, "conus.feather") if cfile.exists(): conus = gpd.read_feather(cfile) else: tiger_url = ( lambda x: f"https://www2.census.gov/geo/tiger/TIGER2020/{x.upper()}/tl_2020_us_{x}.zip" ) coastline = gpd.read_file(tiger_url("coastline")) state = gpd.read_file(tiger_url("state")) conus = state[state.intersects(conus_bounds)].copy() conus_coastline = coastline[coastline.within(conus_bounds)] conus["coastal"] = conus.intersects(conus_coastline.unary_union) conus.to_feather(cfile) # + slideshow={"slide_type": "subslide"} tags=[] ax = conus.to_crs(CRS).plot(column="coastal", **BASE_PLOT) ax.axis("off") ax.margins(0) # + [markdown] slideshow={"slide_type": "slide"} tags=[] # ### Tidal and Estuary USGS stations # # <img src="https://raw.githubusercontent.com/cheginit/HyRiver-examples/main/notebooks/_static/pygeohydro_logo.png" width="400"/> # + [markdown] slideshow={"slide_type": "slide"} tags=[] # We need to look at the [Water Services](https://waterservices.usgs.gov/rest/Site-Service.html) API. # + slideshow={"slide_type": "subslide"} tags=[] import pygeohydro as gh nwis = gh.NWIS() cfile = Path(root, "coast_stations.feather") if cfile.exists(): coast_stations = gpd.read_feather(cfile) else: queries = [ { "stateCd": s.lower(), "siteType": "ST-TS,ES", "hasDataTypeCd": "dv", "outputDataTypeCd": "dv", } for s in conus.loc[conus.coastal, "STUSPS"] ] sites = nwis.get_info(queries, False) coast_stations = gpd.GeoDataFrame( sites, geometry=gpd.points_from_xy(sites.dec_long_va, sites.dec_lat_va), crs="epsg:4269", ) coast_stations.to_feather(cfile) # + slideshow={"slide_type": "subslide"} tags=[] st = coast_stations[["site_no", "site_tp_cd", "geometry"]].to_crs(CRS) ts = st[st.site_tp_cd == "ST-TS"].drop_duplicates() es = st[st.site_tp_cd == "ES"].drop_duplicates() station_ids = ts.site_no.tolist() + es.site_no.tolist() # + slideshow={"slide_type": "subslide"} tags=[] ax = conus.to_crs(CRS).plot(**BASE_PLOT) ts.plot(ax=ax, lw=3, c="r") es.plot(ax=ax, lw=3, c="g") ax.legend([f"ST-TS ({ts.shape[0]})", f"ES ({es.shape[0]})"], loc="best") ax.axis("off") ax.margins(0) # + [markdown] slideshow={"slide_type": "slide"} tags=[] # ### Mean daily discharge for all stations # + tags=[] import numpy as np import pandas as pd cfile = Path(root, "discharge.parquet") dates = ("2000-01-01", "2015-12-31") if cfile.exists(): discharge = pd.read_parquet(cfile) else: nwis = gh.NWIS() discharge = nwis.get_streamflow( station_ids, dates, ) discharge[discharge < 0] = np.nan discharge.to_parquet(cfile) # + slideshow={"slide_type": "subslide"} tags=[] ax = discharge.plot(legend=False, lw=0.8, figsize=(15, 6)) ax.set_ylabel("Q (cms)") ax.set_xlabel("") ax.margins(x=0) # + [markdown] slideshow={"slide_type": "slide"} tags=[] # ### River network data # # <img src="https://raw.githubusercontent.com/cheginit/HyRiver-examples/main/notebooks/_static/pynhd_logo.png" width="400"/> # # + [markdown] slideshow={"slide_type": "slide"} tags=[] # #### Main river network # + import pynhd as nhd nldi = nhd.NLDI() # + slideshow={"slide_type": "subslide"} tags=[] from pygeoogc import ZeroMatched from rich.progress import Progress cfiles = list(Path(root).glob("flowline_main_*.feather")) if all(c.exists() for c in cfiles): flws_main = {f.stem.rsplit("_", 1)[-1]: gpd.read_feather(f) for f in cfiles} else: with Progress() as progress: task = progress.add_task("Main Flowlines", total=len(station_ids)) for s in station_ids: cfile = Path(root, f"flowline_main_{s}.feather") if cfile.exists(): progress.update(task, description=f"ID: {s:<19}", advance=1) continue try: flws_main = nldi.navigate_byid( fsource="nwissite", fid=f"USGS-{s}", navigation="upstreamMain", source="flowlines", distance=2000, ) flws_main.to_feather(cfile) except (ConnectionError, ZeroMatched): pass progress.update(task, description=f"{s:<15}", advance=1) # + slideshow={"slide_type": "subslide"} tags=[] flws_main = ( pd.concat(flws_main) .reset_index() .drop(columns="level_1") .rename(columns={"level_0": "station"}) ) print( "\n".join( [ f"No. of missing stations: {len(station_ids) - len(cfiles)}/{len(station_ids)}", f"No. of flowlines: {len(flws_main)}", ] ) ) # + slideshow={"slide_type": "subslide"} tags=[] ax = conus.to_crs(CRS).plot(**BASE_PLOT) ts.plot(ax=ax, lw=1, c="r") es.plot(ax=ax, lw=1, c="g") flws_main.to_crs(CRS).plot(ax=ax, lw=2, color="b") ax.axis("off") ax.margins(0) # + [markdown] slideshow={"slide_type": "slide"} tags=[] # #### Dams in upstream drainage area # + tags=[] import pickle cfile = Path(root, "nid_flw.pkl") if cfile.exists(): with open(cfile, "rb") as f: nid_flw = pickle.load(f) else: meta = nhd.nhdplus_attrs() nid_years = ( meta[meta.description.str.contains("dam", case=False)].sort_values("name").name.tolist() ) nid_flw = {n.split("_")[-1]: nhd.nhdplus_attrs(n) for n in nid_years} with open(cfile, "wb") as f: pickle.dump(nid_flw, f) # + slideshow={"slide_type": "subslide"} tags=[] comids = [int(c) for c in flws_main.nhdplus_comid.tolist()] nid_vals = { yr: df.loc[df.COMID.isin(comids), ["COMID", f"ACC_NID_STORAGE{yr}", f"ACC_NDAMS{yr}"]].rename( columns={ "COMID": "comid", f"ACC_NID_STORAGE{yr}": "smax", f"ACC_NDAMS{yr}": "ndams", } ) for yr, df in nid_flw.items() } nid_vals = pd.concat(nid_vals).reset_index().drop(columns="level_1") nid_vals = nid_vals.rename(columns={"level_0": "year"}).astype({"year": int}) # + slideshow={"slide_type": "subslide"} tags=[] nid_vals = nid_vals.set_index("comid").merge( flws_main.astype({"nhdplus_comid": int}).set_index("nhdplus_comid"), left_index=True, right_index=True, suffixes=(None, None), ) nm = len(station_ids) - len(nid_vals.station.unique()) print(f"No. of missing stations: {nm}/{len(station_ids)}") # + [markdown] slideshow={"slide_type": "slide"} tags=[] # #### Mean Annual Discharge # + import cytoolz as tlz queries = [ { "sites": ",".join(ss), "startDT": 1930, "endDT": 2013, "statReportType": "annual", "statTypeCd": "mean", "missingData": "on", "parameterCd": "00060", } for ss in tlz.partition_all(10, station_ids) ] q_yr = nwis.retrieve_rdb("stat", queries).astype({"year_nu": int, "mean_va": float}) q_yr = ( q_yr.sort_values("year_nu")[["year_nu", "site_no", "mean_va"]] .reset_index(drop=True) .rename(columns={"year_nu": "year", "site_no": "station", "mean_va": "q"}) ) nm = len(station_ids) - len(q_yr.station.unique()) print(f"No. of missing stations: {nm}/{len(station_ids)}") # + slideshow={"slide_type": "subslide"} tags=[] import matplotlib.pyplot as plt smax = nid_vals.groupby(["year", "station"]).sum()["smax"].unstack() q = q_yr.set_index(["year", "station"])["q"].unstack() fig, axs = plt.subplots(1, 2, figsize=(18, 5)) smax.plot(ax=axs[0], legend=False) axs[0].set_ylabel("$S_{max}$ (acre-feet)") axs[0].margins(x=0) q.plot(ax=axs[1], legend=False) axs[1].set_ylabel("$Q_{mean}$ (cfs)") axs[1].margins(x=0) # + [markdown] slideshow={"slide_type": "slide"} tags=[] # ### Columbia River # + tags=[] ax = conus.to_crs(CRS).plot(**BASE_PLOT) ts.plot(ax=ax, lw=1, c="r") es.plot(ax=ax, lw=1, c="g") flws_main.to_crs(CRS).plot(ax=ax, lw=2, color="b") ax.axis("off") ax.margins(0) # + [markdown] slideshow={"slide_type": "subslide"} tags=[] # #### Basin # + tags=[] station_id = "14246900" cfile = Path(root, "basin.feather") if cfile.exists(): basin = gpd.read_feather(cfile) else: basin = nldi.get_basins(station_id) basin.to_feather(cfile) # + [markdown] slideshow={"slide_type": "subslide"} tags=[] # #### Main # + tags=[] cfile = Path(root, "flowline_main.feather") if cfile.exists(): flw_main = gpd.read_feather(cfile) else: flw_main = nldi.navigate_byid( fsource="nwissite", fid=f"USGS-{station_id}", navigation="upstreamMain", source="flowlines", distance=2000, ) flw_main.to_feather(cfile) # + [markdown] slideshow={"slide_type": "subslide"} tags=[] # #### Tributaries # + tags=[] cfile = Path(root, "flowline_trib.feather") if cfile.exists(): flw_trib = gpd.read_feather(cfile) else: flw_trib = nldi.navigate_byid( fsource="nwissite", fid=f"USGS-{station_id}", navigation="upstreamTributaries", source="flowlines", distance=2000, ) flw_trib.to_feather(cfile) flw_trib["nhdplus_comid"] = flw_trib["nhdplus_comid"].astype("float").astype("Int64") # + slideshow={"slide_type": "subslide"} tags=[] ax = basin.plot(**BASE_PLOT) flw_trib.plot(ax=ax) flw_main.plot(ax=ax, lw=3, color="r") ax.legend(["Tributaries", "Main"]) ax.axis("off") ax.margins(0) # + [markdown] slideshow={"slide_type": "subslide"} tags=[] # #### Accumulated Dams # + tags=[] comids = [int(c) for c in flw_trib.nhdplus_comid.tolist()] nid_vals = { yr: df.loc[df.COMID.isin(comids), ["COMID", f"ACC_NID_STORAGE{yr}", f"ACC_NDAMS{yr}"]].rename( columns={ "COMID": "comid", f"ACC_NID_STORAGE{yr}": "smax", f"ACC_NDAMS{yr}": "ndams", } ) for yr, df in nid_flw.items() } nid_vals = pd.concat(nid_vals).reset_index().drop(columns="level_1") nid_vals = nid_vals.rename(columns={"level_0": "year"}).astype({"year": int}) # + [markdown] slideshow={"slide_type": "subslide"} tags=[] # #### Accumulated Max Storage # + tags=[] nid_vals = ( nid_vals.set_index("comid") .merge( flw_trib.astype({"nhdplus_comid": int}).set_index("nhdplus_comid"), left_index=True, right_index=True, suffixes=(None, None), ) .reset_index() .rename(columns={"index": "comid"}) ) smax = nid_vals.groupby(["year", "comid"]).sum()["smax"].unstack() smax = gpd.GeoDataFrame( smax.T.merge( flw_trib.astype({"nhdplus_comid": int}).set_index("nhdplus_comid"), left_index=True, right_index=True, suffixes=(None, None), ) ) # + slideshow={"slide_type": "subslide"} tags=[] yr = 2013 ax = basin.plot(**BASE_PLOT) smax.plot(ax=ax, scheme="Quantiles", k=2, column=yr, cmap="coolwarm", lw=0.5, legend=False) ax.set_title(f"Accumulated Maximum Storage Capacity of Dams up to {yr}") ax.axis("off") ax.margins(0)
notebooks/esip.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Diophantine Equation # # Goal - finding the integer solutions # # $a \in \mathbb{Z}$ and $b \in \mathbb{Z}$ # # Then the following diophantine equation has a solution. # # $a.x + b.y = gcd(a,b)$ # # ---- # # Example: # 47x + 30y = 1 # Use Euclidean Algorithm # a / b = q + r => a = b.q + r # # # 47/30 = 1, r 17 <br/><br/> # # $\begin{equation} # \begin{split} # 47 &= 30(1) &+ 17 &\implies 17 &= 47(1) &+ 30(-1) \\ # 30 &= 17(1) &+ 13 &\implies 13 &= 30(1) &+ 17(-1) \\ # 17 &= 13(1) &+ 4 &\implies 4 &= 17(1) &+ 13(-1) \\ # 13 &= 4(3) &+ 1 &\implies 1 &= 13(1) &+ 4(-3) \\ # \end{split} # \end{equation}$ # # # # $\begin{equation} # \begin{split} # 1 &= 13(1) &+ 4(-3) &&\text{ substitute 4} \\ # &= 13(1) &+ \Big(17(1) &+ 13(-1)\Big)&(-3) \\ # &= 13(1) &+ 17(-3) &+ 13(3) \\ # &= 17(-3) &+ 13(4) &&\text{ substitute 13} \\ # &= 17(-3) &+ \Big( 30(1) &+ 17(-1) \Big)&(4) \\ # &= 17(-3) &+ 30(4) &+ 17(-4) \\ # &= 30(4) &+ 17(-7) &&\text{ substitute 17}\\ # &= 30(4) &+ \Big( 47(1) &+ 30(-1) \Big)&(-7) \\ # &= 30(4) &+ 47(-7) &+ 30(7) \\ # &= 47(-7) &+ 30(11) \\ # \end{split} # \end{equation}$ # # 47x + 30y = 1 has a solution, x=-7, y=11 # #
notebooks/math/number_theory/diophantine-equation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <small><small><i> # All the IPython Notebooks in this lecture series by Dr. <NAME> are available @ **[GitHub](https://github.com/milaan9/08_Python_Date_Time_Module)** # </i></small></small> # # How to get current date and time in Python? # # In this class, you will learn to get today's date and current date and time in Python. We will also format the date and time in different formats using **`strftime()`** method. # # There are a number of ways you can take to get the current date. We will use the **`date`** class of the **[datetime](https://github.com/milaan9/08_Python_Date_Time_Module/blob/main/001_Python_datetime_Module.ipynb)** module to accomplish this task. # ### Example 1: Python get today's date # + # Example 1: Python get today's date from datetime import date today = date.today() print("Today's date:", today) # When you run the program, the output will be something like below: # - # > **Note:** Here, we imported the **`date`** class from the **`datetime`** module. Then, we used the **`date.today()`** method to get the current local date. # # By the way, **`date.today()`** returns a **`date`** object, which is assigned to the today variable in the above program. Now, you can use the **[strftime()](https://github.com/milaan9/08_Python_Date_Time_Module/blob/main/002_Python_strftime%28%29.ipynb)** method to create a string representing date in different formats. # ### Example 2: Current date in different formats # + # Example 2: Current date in different formats from datetime import date today = date.today() # dd/mm/YY d1 = today.strftime("%d/%m/%Y") print("d1 =", d1) # Textual month, day and year d2 = today.strftime("%B %d, %Y") print("d2 =", d2) # mm/dd/y d3 = today.strftime("%m/%d/%y") print("d3 =", d3) # Month abbreviation, day and year d4 = today.strftime("%b-%d-%Y") print("d4 =", d4) # When you run the program, the output will be something like below: # + [markdown] cell_style="center" # If you need to get the current date and time, you can use **`datetime`** class of the **`datetime`** module. # - # ### Example 3: Get the current date and time # + # Example 3: Get the current date and time from datetime import datetime # datetime object containing current date and time now = datetime.now() print("now =", now) # dd/mm/YY H:M:S dt_string = now.strftime("%d/%m/%Y %H:%M:%S") print("date and time =", dt_string) # - # **Explanation:** # # Here, we have used **`datetime.now()`** to get the current date and time. Then, we used **`strftime()`** to create a string representing date and time in another format.
004_Python_current_date_and_time.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: finlab # language: python # name: finlab # --- # # 參數優化 - 將歷史報酬提升數倍的方式! # 這個單元中,我們首先要來撰寫一個獲利的策略 # # 寫一個策略,首先要來將我們之前爬到的數據拿出來 # # ## 拿出歷史大盤數據 # + from finlab.data import Data data = Data() twii = data.get('發行量加權股價指數') twii = twii[(twii.index.second == 0)]['台股指數'] # + import matplotlib.pyplot as plt plt.style.use("ggplot") # %matplotlib inline twii.plot() # + sma = twii.rolling(500).mean() bias = twii / sma ub =1+ bias.rolling(1000).std() * 2 lb = 1 - bias.rolling(1000).std() * 2 import matplotlib.pyplot as plt plt.plot(bias['2008-03'].values) plt.plot(ub['2008-03'].values) plt.plot(lb['2008-03'].values) plt.show() plt.plot(twii['2008-03'].values) # - # ## 乖離率策略 # + import numpy as np import pandas as pd # %matplotlib inline def strategy(m1=500, m2=1000, m3=1, m4=1, draw_plot=False): sma = twii.rolling(m1).mean() bias = (twii / sma) ub = 1 + bias.rolling(m2).std() * m3 lb = 1 - bias.rolling(m2).std() * m4 buy = (bias < lb) sell = (bias > ub) hold = pd.Series(np.nan, index=sell.index) hold[buy] = 0 hold[sell] = 1 hold = hold.ffill() returns = twii.shift(-2) - twii.shift(-1) returns[hold == 0] = 0 returns -= hold.diff().abs() * 3 creturn = returns.cumsum() if draw_plot: (hold*1000).plot() creturn.plot() return creturn.dropna()[-1] strategy(draw_plot=True) # - # ## 乖離率優化 vmax = 0 for m1 in range(500, 2000, 200): for m2 in range(400, 800, 20): v = strategy(m1, m2, 1, 1) if v > vmax: vmax = v print(vmax, m1, m2, 1, 1) # ## 繪製出歷史報酬 strategy(1300, 780, 1, 1, draw_plot=True)
finlab/u04_parameter_optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Copyright © 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # # Configure an Amazon Web Services (AWS) Container Publishing Destination # # In order to create an Amazon Web Services (AWS) destination, you must complete the following steps: # # 1. Find a valid domain with AWS credentials. # 2. Create credentials for a specific user or group and define a credential domain. # 3. Submit an API post to create an AWS destination. # ## Establish a Connection and Define URLs # + import sys sys.path.append('..') import mmAuthorization import requests import json, os, pprint import base64 # + host_name= "localhost" port = "8080" host_url="http://" + host_name + ":" + port mm_auth = mmAuthorization.mmAuthorization("myAuth") admin_userId = '<SAS_user_admin_ID>' user_id = '<SAS_user_ID>' user_passwd = '<<PASSWORD>>' auth_token = mm_auth.get_auth_token(host_url, user_id, user_passwd) admin_auth_token = mm_auth.get_auth_token(host_url, admin_userId, user_passwd) # - # ## Create User Credentials and Define Domain # + credential_put_header = { 'Content-Type': "application/vnd.sas.credential+json", 'Accept': "application/vnd.sas.credential+json", 'Authorization': 'Bearer ' + admin_auth_token} user_credential_name = user_id my_credential_url = my_domain_url + "/users/" + user_credential_name key_id = "<AWS_key_id>" secret_access_Key = "<AWS_secret_access_key>" encoded_key_id = str(base64.b64encode(key_id.encode("utf-8")), "utf-8") encoded_access_key = str(base64.b64encode(secret_access_Key.encode("utf-8")), "utf-8") print(encoded_access_key) credential_attrs = { "domainId":domain_name, "identityType":"user", "identityId":user_credential_name, "domainType":"base64", "properties":{"accessKeyId":encoded_key_id}, "secrets":{"secretAccessKey":encoded_access_key} } domain = requests.put(my_credential_url, data=json.dumps(credential_attrs), headers=credential_put_header) print(domain) pprint.pprint(domain.json()) # - # ## Get Credential Headers # + headersGet = { 'Authorization': 'Bearer ' + admin_auth_token} credentialURL = domains_url + domain_name + '/credentials' print(credentialURL) credentialGet = requests.get(credentialURL, headers=headersGet) print(credentialGet) for i, domain in enumerate(credentialGet.json()['items']): print(f"{i}. Domain ID: {domain['domainId']}, {domain['identityId']}, {domain['identityType']}") # - # ## Create an AWS Destination # + destination_aws_post_headers = { "Content-Type":"application/vnd.sas.models.publishing.destination.aws+json", mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token } dest_name = "AWS_Demo" domainName = "aws0521" awsRegion = "us-east-1" k8sClusterName = "mm-docker-models-eks" destination_attrs = { "name":dest_name, "destinationType":"aws", "properties":[ {"name": "credDomainId", "value": domainName}, {"name": "region", "value": awsRegion}, {"name": "kubernetesCluster", "value": k8sClusterName} ] } destination = requests.post(host_url + '/modelPublish/destinations', data=json.dumps(destination_attrs), headers=destination_aws_post_headers) print(destination) pprint.pprint(destination.json()) # -
addons/destinations/CreateAWSDestination.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="dzLKpmZICaWN" from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import matplotlib.pyplot as plt # TensorFlow and Keras import tensorflow as tf import tensorflow_datasets as tfds # + # Construct a tf.data.Dataset (train_images, train_labels), (test_images, test_labels) = tfds.as_numpy(tfds.load( 'fashion_mnist', split=['train', 'test'], batch_size=-1, as_supervised=True, )) train_images = np.squeeze(train_images) test_images = np.squeeze(test_images) classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # + colab={} colab_type="code" id="zW5k_xz1CaWX" print("Training dataset shape =", train_images.shape) print("Training labels length =", len(train_labels)) print("Some training labels =", train_labels[:5]) print("Test dataset shape =", test_images.shape) print("Test labels length =", len(test_labels)) # + colab={} colab_type="code" id="m4VEw8Ud9Quh" plt.figure() plt.imshow(train_images[0]) plt.colorbar() plt.grid(False) plt.show() # + colab={} colab_type="code" id="bW5WzIPlCaWv" train_images = train_images / 255.0 test_images = test_images / 255.0 # + colab={} colab_type="code" id="oZTImqg_CaW1" plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(classes[train_labels[i]]) plt.show() # + colab={} colab_type="code" id="9ODch-OFCaW4" model = tf.keras.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10) ]) # + colab={} colab_type="code" id="Lhan11blCaW7" model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # + colab={} colab_type="code" id="xvwvpA64CaW_" model.fit(train_images, train_labels, epochs=10) # + colab={} colab_type="code" id="VflXLEeECaXC" test_loss, test_accuracy = model.evaluate(test_images, test_labels, verbose=2) print('\nTest accuracy:', test_accuracy) # + colab={} colab_type="code" id="3DmJEUinCaXK" probability_model = tf.keras.Sequential([model, tf.keras.layers.Softmax()]) predictions = probability_model.predict(test_images) print(predictions[0:3]) # + colab={} colab_type="code" id="qsqenuPnCaXO" print("Class ID, predicted | real =", np.argmax(predictions[0]), "|", test_labels[0]) # + colab={} colab_type="code" id="DvYmmrpIy6Y1" def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array, true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(classes[predicted_label], 100*np.max(predictions_array), classes[true_label]), color=color) # - def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array, true_label[i] plt.grid(False) plt.xticks(range(10)) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') # + colab={} colab_type="code" id="HV5jw-5HwSmO" i = 0 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions[i], test_labels, test_images) plt.subplot(1,2,2) plot_value_array(i, predictions[i], test_labels) plt.show() # + colab={} colab_type="code" id="hQlnbqaw2Qu_" # Plot the first X test images, their predicted labels, and the true labels. # Color correct predictions in blue and incorrect predictions in red. num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions[i], test_labels, test_images) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions[i], test_labels) plt.tight_layout() plt.show()
Activity01/Activity01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Manifest # This is the script used to create the Space Fluff project manifest. I had a list of the names of the images, coupled with some physical variables (coordinates and color). import re import itertools import os.path from os import path from PIL import Image filename_list = '/home/anna/Desktop/SUNDIAL/images/aux_data.txt' def read(path): with open(path, 'r', encoding="utf-8") as f: return f.read().splitlines() variable_list = read(filename_list) # I want to add a copy all the images in photo_new into the launch forlder, and put a cross on them, and then write the manifest image_try = '/home/anna/Desktop/SUNDIAL/images/launch/UDGcand_401_insp.png' pil_im = Image.open(image_try) pil_im # + from pylab import * im = array(pil_im) imshow(im) plot(im.shape[0]/2,im.shape[1]/2,'x' ) plt.axis('off') #savefig('/home/anna/Desktop/SUNDIAL/images/try_out.png',bbox_inches = 'tight', dpi=300) # - # We decided to add images with a superimposed cross at the center, following comments from the Beta phase feedback. # + counter = 0 for myline in itertools.islice(variable_list, 400, None): line = myline.split() #print(line) x = re.findall('[0-9]+', line[0]) #print(x) if os.path.exists('/home/anna/Desktop/SUNDIAL/images/launch/UDGcand_'+str(x[0])+'_insp.png') is True: counter += 1 im = array(Image.open('/home/anna/Desktop/SUNDIAL/images/launch/UDGcand_'+str(x[0])+'_insp.png')) if im is not None: imshow(im) plot(im.shape[0]/2,im.shape[1]/2,'x' ) plt.axis('off') #savefig('/home/anna/Desktop/SUNDIAL/images/launch/UDGcand_'+str(x[0])+'_insp_cross.png',bbox_inches = 'tight', dpi=300) close() #print('UDGcand_'+str(x[0])+'_insp.png') print('We have a total of: ', counter, ' missed images') # - # creating a manifest # + counter = 0 for myline in itertools.islice(variable_list, 400, None): line = myline.split() x = re.findall('[0-9]+', line[0]) if os.path.exists('/home/anna/Desktop/SUNDIAL/images/launch/UDGcand_'+str(x[0])+'_insp.png') is True and os.path.exists('/home/anna/Desktop/SUNDIAL/images/launch/UDGcand_'+str(x[0])+'_insp_cross.png') is True: counter += 1 print(str(x[0]) +','+ line[0] +'_insp.png,'+ line[0] +'_insp_cross.png,' + line[1] +','+ line[2] +','+line[3] +','+line[4], file=open("/home/anna/Desktop/SUNDIAL/images/launch/manifest.csv", "a")) else: print('something is wrong with: ', 'UDGcand_'+str(x[0])+'_insp.png') print('We have a total of: ', counter, ' images') # -
Manifest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import cv2 import time import cache from sklearn.cluster import KMeans from sklearn.neighbors import NearestNeighbors import data_manager as dm # # Bag of Words Based Image Retrieval with ORB Features # by [<NAME>](http://perso.ens-lyon.fr/tristan.sterin/) / [GitHub](https://github.com/tcosmo/) # Please refer to the [report](https://github.com/tcosmo/dresscode/blob/master/report.pdf) if you want to know exactly all what this is about and part of. # In order to judge our Neural Features methods we implement the 'old school' approach to our problem. # We collect ORB features on each images, learn a dictionary upon it and acheive retrieval with Bag Of Words technique. # Please find more information on ORB features [here](http://www.dabi.temple.edu/~hbling/Teaching/12S_8543/Presentation/Rublee11ORB.pdf), and more information on BoW techniques [here](https://arxiv.org/abs/1304.5168). # The ORB method will give us 500 32-dimensional feature vectors per image. # ## Getting our Data # Please refer to the repo's readme for a complete description of the data we are using. # We collect our catalogue item and queries in dictionnaries where the key is the id present in the files names. # As we ignore some of them, id's are not continuous. What we call "class" is the corresponding number when only counting the images we kept. # We refer to the dresses catalogue with **cat** and to the set of queries with **quer**. # + # some data is ill-formated, we dont want it to_ignore_file = "to_ignore.txt" db_path = 'db/robes/' cat_by_id = dm.get_images_by_id(db_path+'cat/',dm.id_getter_cat,dm.id_select_cat,to_ignore_file) quer_by_id = dm.get_images_by_id(db_path+'mod/',dm.id_getter_quer,dm.id_select_quer,to_ignore_file) class_of_id, id_of_class = dm.get_id_class_correspondance(cat_by_id) print("\nThe database contains "+str(len(cat_by_id))+" query/item pairs such as:\n\t"+str(cat_by_id[0])+"\n\t"+str(quer_by_id[0])+"\n") id_img_ex = 1 img_cat_ex = dm.img_getter(cat_by_id[id_img_ex]) img_quer_ex = dm.img_getter(quer_by_id[id_img_ex]) plt.figure(figsize=(10,10)) plt.subplot(121) plt.imshow(img_cat_ex) plt.subplot(122) plt.imshow(img_quer_ex) plt.show() # - # ## Orb Features Collection on the Catalogue # We compute the set of orb features for each image in the catalogue. # We dump it to a file to avoid calculus when re-using the notebook. orb = cv2.ORB_create() def compute_ORB_features(img_set): ''' Routine to extract orb features for any image set, that is a dictionary id -> image_path. Returns a dictionary id -> list of features. ''' to_return = {} for id_ in img_set: gray = dm.img_getter_gray(img_set[id_]) kp, des = orb.detectAndCompute(gray,None) to_return[id_] = des return to_return cat_ORB_per_id = cache.cache("dumps/cat_ORB_per_id.dump",compute_ORB_features,cat_by_id) # For instance, the ORB feature vector of catalogue image 0 are: print(cat_ORB_per_id[0]) print(cat_ORB_per_id[0].shape) # ### Feature Plotting # We can plot the ORB descriptors in a standard way, they are the green dots on the following image: gray = dm.img_getter_gray(cat_by_id[id_img_ex]) kp,_ = orb.detectAndCompute(gray,None) img_cat_ex_orb = cv2.drawKeypoints(img_cat_ex, kp, None, color=(0,255,0), flags=0) plt.figure(figsize=(10,10)) plt.imshow(img_cat_ex_orb) plt.show() # ### Numpy Matrix # We aggregate all the features of all the images in one big matrix. all_ORB = cat_ORB_per_id[0] for id_ in cat_ORB_per_id: if id_ == 0: continue all_ORB = np.r_[all_ORB, cat_ORB_per_id[id_]] print(all_ORB.shape) # ## Dictionary Learning # We then aggregate our 97012 feature vectors into 2048 clusters with k-means. 2048 was chosen to mimic the dimension of our Neural Features. Even with 12 processors this process is quite long (12m). We dump the resulting clusters even if they would be different at each run. # ### K-means Aggregation # %%time kmeans = KMeans(n_clusters=2048,n_jobs=12,verbose=10) kmeans_result = cache.cache("dumps/cat_ORB_2048_kmeans.dump", kmeans.fit, all_ORB) # ### Dictionary Creation # For each of our image, we compute a visual histogram out of its ORB features. It consists in calculating an histogram of the clusters to which these features belong to. def get_visual_hist(kmeans,orb_descs): ''' Routine to extract a 'visual histogram', that is a #cluster-dimensional histogram vector, out of a set of orb descriptors. ''' list_cluster = [] list_cluster = kmeans.predict(orb_descs) hist = np.zeros(kmeans.n_clusters) for a in list_cluster: hist[a] += 1 return hist.astype(int) # %%time cat_ORB_dict = {} for id_ in cat_by_id: cat_ORB_dict[id_] = get_visual_hist(kmeans_result,cat_ORB_per_id[id_]) # For instance here's a visualization of the final feature vector of image 0: plt.figure(figsize=(15,5)) im = plt.imshow(cat_ORB_dict[0].reshape((2**5,2**6))) plt.colorbar(im) plt.show() # ### Numpy Matrix # We merge our final dictionary into a np matrix. cat_ORB_dict_np = [cat_ORB_dict[0]] for id_ in cat_ORB_dict: if id_ == 0: continue cat_ORB_dict_np = np.r_[cat_ORB_dict_np, [cat_ORB_dict[id_]]] print(cat_ORB_dict_np.shape) # ## Retrieval # ### Computing ORB Features on Queries quer_ORB_per_id = cache.cache("dumps/quer_ORB_per_id.dump",compute_ORB_features,quer_by_id) # ### 10-Nearset Neighbours Model # We construct a 10-NN model over our catalogue dictionary in order to proceed with retrieval and compute 10-accuracy score. ten_nbrs = NearestNeighbors(n_neighbors=10).fit(cat_ORB_dict_np) # # Results # First we test retrieval for query image 0: quer_visual_hist = get_visual_hist(kmeans_result,quer_ORB_per_id[0]) distances,indices = ten_nbrs.kneighbors([quer_visual_hist]) print("Top-10 retrieved ids and distances:") print("\t",indices) print("\t",distances) print("\nquery image 0 correct match:",class_of_id[0] in indices[0]) k_accuracy = [1,3,5,10] success_k = np.zeros(len(k_accuracy)) time_per_query = [] for id_ in quer_ORB_per_id: t = time.time() quer_visual_hist = get_visual_hist(kmeans_result,quer_ORB_per_id[id_]) distances,indices = ten_nbrs.kneighbors([quer_visual_hist]) for i,k in enumerate(k_accuracy): success_k[i] += class_of_id[id_] in indices[0][:k] time_per_query.append(time.time()-t) success_k /= len(quer_ORB_per_id) for i,k in enumerate(k_accuracy): print(str(k)+"-accuracy score:",success_k[i]) print("mean time per request:",str(np.array(time_per_query).mean())+"s") # The traditionnal Bag Of Word method achieves a **25% success rate at 10-accuracy**. # ## License (MIT) # # Copyright (c) 2017 by <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.
Results_ORB_BagOfWords.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "skip"} # <!--HEADER--> # [*NBBinder test on a collection of notebooks about some thermodynamic properperties of water*](https://github.com/rmsrosa/nbbinder) # + [markdown] slideshow={"slide_type": "skip"} # <!--BADGES--> # <a href="https://colab.research.google.com/github/rmsrosa/nbbinder/blob/master/tests/nb_builds/nb_water/04.00-High_Dim_Fittings.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Google Colab" title="Open in Google Colab"></a><a href="https://mybinder.org/v2/gh/rmsrosa/nbbinder/master?filepath=tests/nb_builds/nb_water/04.00-High_Dim_Fittings.ipynb"><img align="left" src="https://mybinder.org/badge.svg" alt="Open in binder" title="Open in binder"></a><a href="https://nbviewer.jupyter.org/github/rmsrosa/nbbinder/blob/master/tests/nb_builds/nb_water/04.00-High_Dim_Fittings.ipynb"><img align="left" src="https://img.shields.io/badge/view%20in-nbviewer-orange" alt="View in NBViewer" title="View in NBViewer"></a><a href="https://nbviewer.jupyter.org/github/rmsrosa/nbbinder/blob/master/tests/nb_builds/nb_water_slides/04.00-High_Dim_Fittings.slides.html"><img align="left" src="https://img.shields.io/badge/view-slides-darkgreen" alt="View Slides" title="View Slides"></a>&nbsp; # + [markdown] slideshow={"slide_type": "skip"} # <!--NAVIGATOR--> # [<- Low-Dimensional Fittings](03.00-Low_Dim_Fittings.ipynb) | [Water Contents](00.00-Water_Contents.ipynb) | [References](BA.00-References.ipynb) | [Choosing the Best Fit with AIC ->](05.00-Best_AIC_Fitting.ipynb) # # --- # # + [markdown] slideshow={"slide_type": "slide"} # # High-Dimensional Fittings # # Now we fit higher degree polynomials to the data and compare the results and errors. # + [markdown] slideshow={"slide_type": "skip"} # ## Importing the libraries # + slideshow={"slide_type": "skip"} import csv import numpy as np import matplotlib.pyplot as plt # + [markdown] slideshow={"slide_type": "slide"} # ### Loading the data # # We load the data and define the header and the respective vectors with the temperature and with the density values. # + slideshow={"slide_type": "fragment"} water_csv = list(csv.reader(open('water.csv',"r"), delimiter=",")) header = dict([(water_csv[0][i],water_csv[1][i]) for i in range(3)]) T, f = np.loadtxt(open('water.csv', "r"), delimiter=",", skiprows=2, usecols=(0,1), unpack=True) N = len(T) N_half = int(N/2) # + [markdown] slideshow={"slide_type": "slide"} # ### The Vandermonde matrices # # We build a number of Vandermonde matrices, up to the number of data points available. # + slideshow={"slide_type": "fragment"} A = list() for j in range(N_half): A.append(np.vstack([T**i for i in range(j+1)]).T) # + [markdown] slideshow={"slide_type": "slide"} # ### Solving the least-square problems # + slideshow={"slide_type": "fragment"} a = list() for j in range(N_half): a.append(np.linalg.lstsq(A[j], f, rcond=None)[0]) # + [markdown] slideshow={"slide_type": "slide"} # ### Building the approximating polynomials # + slideshow={"slide_type": "fragment"} p = list() for j in range(N_half): p.append(np.array(sum([a[j][i]*T**i for i in range(j+1)]))) # + [markdown] slideshow={"slide_type": "slide"} # ### Plotting the approximations # + slideshow={"slide_type": "fragment"} plt.figure(figsize=(10,5)) plt.plot(T, f, 'o', label='Data', color='tab:blue') for j in range(N_half): plt.plot(T, p[j], label=f'degree {j}') plt.title('Plot of the data and of the polynomial approximations', fontsize=14) plt.xlabel(header['temp'], fontsize=12) plt.ylabel(header['density'], fontsize=12) plt.legend() plt.show() # - # ### Calculating the mean quadratic errors Err = list() for j in range(N_half): Err.append(np.linalg.lstsq(A[j], f, rcond=None)[1][0]/N) print(f'j={j}: Error={Err[j]:.2e}') # + [markdown] slideshow={"slide_type": "slide"} # ### Plotting the mean quadratic errors # + slideshow={"slide_type": "fragment"} plt.figure(figsize=(10,5)) plt.plot(range(len(Err)), Err, 'o', color='tab:red', markersize=10) plt.grid(True) plt.yscale('log') plt.ylim(10**(-10), 10**(-3)) plt.title('Mean quadratic error in terms of the degree of the approximating polynomial', fontsize=14) plt.xlabel('degree', fontsize=12) plt.ylabel('error', fontsize=12) plt.show() # + [markdown] slideshow={"slide_type": "fragment"} # Notice how there is not much advantage going beyond degree four. # + [markdown] slideshow={"slide_type": "slide"} # <!--NAVIGATOR--> # # --- # [<- Low-Dimensional Fittings](03.00-Low_Dim_Fittings.ipynb) | [Water Contents](00.00-Water_Contents.ipynb) | [References](BA.00-References.ipynb) | [Choosing the Best Fit with AIC ->](05.00-Best_AIC_Fitting.ipynb)
tests/nb_builds/nb_water/04.00-High_Dim_Fittings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/PANDASANG1231/522_Ramen/blob/main/037_Finetune.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="xnFXuqV2lQSx" colab={"base_uri": "https://localhost:8080/"} outputId="9fabcc5a-2a97-4ac0-bfdf-dc4b4318f6f3" import os import sys from google.colab import drive drive.mount('/content/drive', force_remount=True) sys.path.append('/content/drive/MyDrive/Colab Notebooks/deeplearning_note') from tool import * # + id="W1PWmio23Hin" # ! pip install d2l # + colab={"base_uri": "https://localhost:8080/"} id="5SemXnflYY1X" outputId="8479fdd8-e232-4510-8935-c175d5dbfc23" # ! nvidia-smi # + [markdown] id="MDarzT3O41nL" # ## Resnet Finetune vs Resnet # # - Finetune Use Resnet pretrain model. So, we need to do some preprocess # - The resnet model has normalize, so we need to do that as well. # - The resnet has a input size of 224, so we need to do the resize. # # + id="MFbTCpKB3Dq_" from d2l import torch as d2l d2l.DATA_HUB['hotdog'] = (d2l.DATA_URL + 'hotdog.zip', 'fba480ffa8aa7e0febbb511d181409f899b9baa5') data_dir = d2l.download_extract('hotdog') # + id="aLwIFWNGwJMk" normalize = torchvision.transforms.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) train_augs = torchvision.transforms.Compose([ torchvision.transforms.RandomResizedCrop(224), torchvision.transforms.ToTensor(), normalize]) test_augs = torchvision.transforms.Compose([ torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224), torchvision.transforms.ToTensor(), normalize]) train_dataset = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'train'), transform=train_augs) test_dataset = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'test'), transform=test_augs) train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True) test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=64, shuffle=False) device = torch.device('cuda') # + [markdown] id="bazmo924fKWO" # ### No Finetune # # - No pretrain, use xavier as weight initiate # - learning_rate = 0.05, final accuracy is 0.834 # - training speed: 608 examples/sec on cuda # + id="EGdtTSUWIGOu" colab={"base_uri": "https://localhost:8080/"} outputId="09ad055a-3371-40bf-eb66-f19d379b7e07" resnet = torchvision.models.resnet18(pretrained=False) resnet.fc = nn.Linear(resnet.fc.in_features, 2) def init_xavier(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: nn.init.xavier_uniform_(m.weight) resnet.apply(init_xavier) loss = nn.CrossEntropyLoss() learning_rate = 0.05 optimizer = torch.optim.Adam(params=resnet.parameters(), lr=learning_rate) train_p2(num_epochs=10, net=resnet, loss=loss, train_iter=train_dataloader, test_iter=test_dataloader, device=device, optimizer=optimizer) # + [markdown] id="dk7BdjXVfesr" # ### Finetune(different learning rate) # # - Just xavier last fully connected layer # - Different learning rate in different layer, last layer 10 times # - learning rate is 0.0001, but accuracy is 0.963. (Far better) # - training speed is the same, 608 examples per sec on cuda # # + id="9JDRaSE7A2mS" colab={"base_uri": "https://localhost:8080/"} outputId="efebda40-c810-45db-bf1b-6803a5b2b320" resnet = torchvision.models.resnet18(pretrained=True) resnet.fc = nn.Linear(resnet.fc.in_features, 2) nn.init.xavier_uniform_(resnet.fc.weight) loss = nn.CrossEntropyLoss() learning_rate = 0.0001 param_norm = [param for name, param in resnet.named_parameters() if name not in ['fc.weight', 'fc.bias']] optimizer = torch.optim.Adam(params=[{"params": param_norm}, {"params": resnet.fc.parameters(), "lr": learning_rate * 10}], lr=learning_rate) train_p2(num_epochs=10, net=resnet, loss=loss, train_iter=train_dataloader, test_iter=test_dataloader, device=device, optimizer=optimizer) # + [markdown] id="bpCBYAOk6utL" # ### Finetune(require grad=True) # - Just xavier last fully connected layer # - Only update grads in the last # - learning rate is 0.0035, but accuracy is 0.926. (Far better) # - training speed is the faster(3 times), 1650 examples per sec on cuda # + colab={"base_uri": "https://localhost:8080/"} id="x_XtUVcvA7X_" outputId="a2d1a9d1-a4a3-4d23-d5b0-16165dace85d" resnet = torchvision.models.resnet18(pretrained=True) resnet.fc = nn.Linear(resnet.fc.in_features, 2) nn.init.xavier_uniform_(resnet.fc.weight) loss = nn.CrossEntropyLoss() learning_rate = 0.0035 for name, param in resnet.named_parameters(): if name not in ['fc.weight', 'fc.bias']: param.requires_grad = False optimizer = torch.optim.Adam(params=resnet.parameters(), lr=learning_rate) train_p2(num_epochs=10, net=resnet, loss=loss, train_iter=train_dataloader, test_iter=test_dataloader, device=device, optimizer=optimizer)
037_Finetune.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## box plot # * box-and-whisker plot, box-and-whisker diagram # * 상자 수염그림 # * 가공하지 않은 자료 그대로를 이용하여 그린 것이 아니라, 자료로부터 얻어낸 통계량인 5가지 요약 수치로 그린다. # * 5가지 요약 수치란 기술통계학에서 자료의 정보를 알려주는 아래의 다섯 가지 수치를 의미한다. # # 1. 최솟값 # 2. 제 1 사분위수 # 3. 제 2 사분위수() == 중앙값 # 4. 제 3 사분위수() # 5. 최댓값 # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl # %matplotlib inline mpl.rcParams['axes.unicode_minus'] = False # - df = pd.DataFrame(np.random.rand(10,5), columns = ['A','B','C','D','E']) df df.describe() # 이를 바탕으로 box plot 이 그려짐 df.plot.box() # + color = {"boxes": "DarkGreen", "whiskers": "DarkOrange", "medians": "DarkBlue", "caps": "Gray"} # 각 색상 지정 df.plot.box(color=color, sym='r+') #이상치 빨간 + # - df.plot.box(vert=False, positions = [1,4,5,6,8]) # 가로 df = pd.DataFrame(np.random.rand(10,5)) df.head(1) bp = df.boxplot() df = pd.DataFrame(np.random.rand(10, 2), columns=['Col1','Col2']) df.head(2) df['X'] = pd.Series(['A','A','A','A','A','B','B','B','B','B']) df bp = df.boxplot(by='X') np.random.seed(1234) df_box = pd.DataFrame(np.random.randn(50, 2)) df_box df_box['g'] = np.random.choice(['A','B'], size=50) df_box.loc[df_box['g'] == 'B', 1] += 3 bp = df_box.boxplot(by='g') bp = df_box.groupby('g').boxplot()
chapter_1/.ipynb_checkpoints/Pandas box plot-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np np.random.seed(42) import pandas as pd import string import re import gensim from collections import Counter import pickle import tensorflow as tf from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import roc_auc_score from keras.models import Model from keras.layers import Input, Dense, Dropout, Conv1D, Embedding, SpatialDropout1D, concatenate from keras.layers import GRU, LSTM,Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D from keras.layers import CuDNNLSTM, CuDNNGRU from keras.preprocessing import text, sequence from keras.callbacks import Callback from keras import optimizers from keras.layers import Lambda import warnings warnings.filterwarnings('ignore') from nltk.corpus import stopwords import os os.environ['OMP_NUM_THREADS'] = '4' import gc from keras import backend as K from sklearn.model_selection import KFold from unidecode import unidecode import time eng_stopwords = set(stopwords.words("english")) # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # 1. preprocessing train = pd.read_csv("../input/processing-helps-boosting-about-0-0005-on-lb/train_processed.csv") test = pd.read_csv("../input/processing-helps-boosting-about-0-0005-on-lb/test_processed.csv") # + _cell_guid="7b04e352-b723-4287-8b17-d3f9be960f5c" _uuid="b4475b7eaffa730fc571a442caeeb565245e210d" #2. remove non-ascii special_character_removal = re.compile(r'[^A-Za-z\.\-\?\!\,\#\@\% ]',re.IGNORECASE) def clean_text(x): x_ascii = unidecode(x) x_clean = special_character_removal.sub('',x_ascii) return x_clean train['clean_text'] = train['comment_text'].apply(lambda x: clean_text(str(x))) test['clean_text'] = test['comment_text'].apply(lambda x: clean_text(str(x))) X_train = train['clean_text'].fillna("something").values y_train = train[["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]].values X_test = test['clean_text'].fillna("something").values # + _cell_guid="e4afa12c-f7cc-4c71-877b-ed7957878cdb" _uuid="dd51878df2b540f4ca21269d5df60332c56e2c27" def add_features(df): df['comment_text'] = df['comment_text'].apply(lambda x:str(x)) df['total_length'] = df['comment_text'].apply(len) df['capitals'] = df['comment_text'].apply(lambda comment: sum(1 for c in comment if c.isupper())) df['caps_vs_length'] = df.apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df['num_words'] = df.comment_text.str.count('\S+') df['num_unique_words'] = df['comment_text'].apply(lambda comment: len(set(w for w in comment.split()))) df['words_vs_unique'] = df['num_unique_words'] / df['num_words'] return df train = add_features(train) test = add_features(test) features = train[['caps_vs_length', 'words_vs_unique']].fillna(0) test_features = test[['caps_vs_length', 'words_vs_unique']].fillna(0) ss = StandardScaler() ss.fit(np.vstack((features, test_features))) features = ss.transform(features) test_features = ss.transform(test_features) # + _cell_guid="602faa61-83bd-463f-a221-692b760809c7" _uuid="9bb33ed2eaf58bdb48ad1e85d5467c9bd8887bcb" # For best score (Public: 9869, Private: 9865), change to max_features = 283759, maxlen = 900 max_features = 10000 maxlen = 50 tokenizer = text.Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(X_train) + list(X_test)) X_train_sequence = tokenizer.texts_to_sequences(X_train) X_test_sequence = tokenizer.texts_to_sequences(X_test) x_train = sequence.pad_sequences(X_train_sequence, maxlen=maxlen) x_test = sequence.pad_sequences(X_test_sequence, maxlen=maxlen) print(len(tokenizer.word_index)) # + _cell_guid="8d3b0728-bc6c-46e9-96ea-bf216d4c1891" _uuid="8fe8a034e5f0da11c1f3af5afe8b080e624e9c95" # Load the FastText Web Crawl vectors EMBEDDING_FILE_FASTTEXT="../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec" EMBEDDING_FILE_TWITTER="../input/glove-twitter-27b-200d-txt/glove.twitter.27B.200d.txt" def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32') embeddings_index_ft = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open(EMBEDDING_FILE_FASTTEXT,encoding='utf-8')) embeddings_index_tw = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE_TWITTER,encoding='utf-8')) # + _cell_guid="ec916961-60af-4e30-b3a8-5f3990b76681" _uuid="73db6660e9f3380335b63d8432a8574e947a5b4f" spell_model = gensim.models.KeyedVectors.load_word2vec_format(EMBEDDING_FILE_FASTTEXT) # + _cell_guid="0a14b1e3-eff5-4079-b5cc-d485a9fe0f24" _uuid="f27accdd49c359f12521bbac2c6e5500f371d3f0" # This code is based on: Spellchecker using Word2vec by CPMP # https://www.kaggle.com/cpmpml/spell-checker-using-word2vec words = spell_model.index2word w_rank = {} for i,word in enumerate(words): w_rank[word] = i WORDS = w_rank # Use fast text as vocabulary def words(text): return re.findall(r'\w+', text.lower()) def P(word): "Probability of `word`." # use inverse of rank as proxy # returns 0 if the word isn't in the dictionary return - WORDS.get(word, 0) def correction(word): "Most probable spelling correction for word." return max(candidates(word), key=P) def candidates(word): "Generate possible spelling corrections for word." return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word]) def known(words): "The subset of `words` that appear in the dictionary of WORDS." return set(w for w in words if w in WORDS) def edits1(word): "All edits that are one edit away from `word`." letters = 'abcdefghijklmnopqrstuvwxyz' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts) def edits2(word): "All edits that are two edits away from `word`." return (e2 for e1 in edits1(word) for e2 in edits1(e1)) def singlify(word): return "".join([letter for i,letter in enumerate(word) if i == 0 or letter != word[i-1]]) # + _cell_guid="c5cb1175-0d64-40aa-ba8e-844478cc0f54" _uuid="38a44f6c963283dac6d1cefc46d186b4c301b568" word_index = tokenizer.word_index nb_words = min(max_features, len(word_index)) embedding_matrix = np.zeros((nb_words,501)) something_tw = embeddings_index_tw.get("something") something_ft = embeddings_index_ft.get("something") something = np.zeros((501,)) something[:300,] = something_ft something[300:500,] = something_tw something[500,] = 0 def all_caps(word): return len(word) > 1 and word.isupper() def embed_word(embedding_matrix,i,word): embedding_vector_ft = embeddings_index_ft.get(word) if embedding_vector_ft is not None: if all_caps(word): last_value = np.array([1]) else: last_value = np.array([0]) embedding_matrix[i,:300] = embedding_vector_ft embedding_matrix[i,500] = last_value embedding_vector_tw = embeddings_index_tw.get(word) if embedding_vector_tw is not None: embedding_matrix[i,300:500] = embedding_vector_tw #(1) Assuming we have both fasttext 300d and glove 300d pre-trained word vector #(2) Use fasttext first and if the word is not found in fasttext then use its glove pre-trained vector #(3) Finally, extend all the 300d word vector to 301d, the extra dimension is 1 if the word is written in all capital letter or the extra dimension is 0. # Fasttext vector is used by itself if there is no glove vector but not the other way around. for word, i in word_index.items(): if i >= max_features: continue if embeddings_index_ft.get(word) is not None: embed_word(embedding_matrix,i,word) else: # change to > 20 for better score. if len(word) > 0: embedding_matrix[i] = something else: word2 = correction(word) if embeddings_index_ft.get(word2) is not None: embed_word(embedding_matrix,i,word2) else: word2 = correction(singlify(word)) if embeddings_index_ft.get(word2) is not None: embed_word(embedding_matrix,i,word2) else: embedding_matrix[i] = something # + _cell_guid="77676ed2-6a21-4c89-8a6c-90c502079f2d" _uuid="30cf8af108c8e43539847cddcd728ab7a8214f62" class RocAucEvaluation(Callback): def __init__(self, validation_data=(), interval=1): super(Callback, self).__init__() self.interval = interval self.X_val, self.y_val = validation_data self.max_score = 0 self.not_better_count = 0 def on_epoch_end(self, epoch, logs={}): if epoch % self.interval == 0: y_pred = self.model.predict(self.X_val, verbose=1) score = roc_auc_score(self.y_val, y_pred) print("\n ROC-AUC - epoch: %d - score: %.6f \n" % (epoch+1, score)) if (score > self.max_score): print("*** New High Score (previous: %.6f) \n" % self.max_score) model.save_weights("best_weights.h5") self.max_score=score self.not_better_count = 0 else: self.not_better_count += 1 if self.not_better_count > 3: print("Epoch %05d: early stopping, high score = %.6f" % (epoch,self.max_score)) self.model.stop_training = True # + _cell_guid="cbc38b37-1c92-4e69-b444-4e1c6ed90a09" _uuid="54e58c905ec4916f7f9a492d823f06f4d4b76229" def get_model(features,clipvalue=1.,num_filters=40,dropout=0.5,embed_size=501): features_input = Input(shape=(features.shape[1],)) inp = Input(shape=(maxlen, )) # Layer 1: concatenated fasttext and glove twitter embeddings. x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) # Uncomment for best result # Layer 2: SpatialDropout1D(0.5) #x = SpatialDropout1D(dropout)(x) # Uncomment for best result # Layer 3: Bidirectional CuDNNLSTM #x = Bidirectional(LSTM(num_filters, return_sequences=True))(x) # Layer 4: Bidirectional CuDNNGRU x, x_h, x_c = Bidirectional(GRU(num_filters, return_sequences=True, return_state = True))(x) # Layer 5: A concatenation of the last state, maximum pool, average pool and # two features: "Unique words rate" and "Rate of all-caps words" avg_pool = GlobalAveragePooling1D()(x) max_pool = GlobalMaxPooling1D()(x) x = concatenate([avg_pool, x_h, max_pool,features_input]) # Layer 6: output dense layer. outp = Dense(6, activation="sigmoid")(x) model = Model(inputs=[inp,features_input], outputs=outp) adam = optimizers.adam(clipvalue=clipvalue) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) return model # + _cell_guid="663b6d15-7071-483c-a58c-d13c1bfe8fb8" _uuid="518f6fd237f88d8c15368f2b70f17e1cd2f87445" model = get_model(features) batch_size = 32 # Used epochs=100 with early exiting for best score. epochs = 1 gc.collect() K.clear_session() # Change to 10 num_folds = 2 #number of folds predict = np.zeros((test.shape[0],6)) # Uncomment for out-of-fold predictions #scores = [] #oof_predict = np.zeros((train.shape[0],6)) kf = KFold(n_splits=num_folds, shuffle=True, random_state=239) for train_index, test_index in kf.split(x_train): kfold_y_train,kfold_y_test = y_train[train_index], y_train[test_index] kfold_X_train = x_train[train_index] kfold_X_features = features[train_index] kfold_X_valid = x_train[test_index] kfold_X_valid_features = features[test_index] gc.collect() K.clear_session() model = get_model(features) ra_val = RocAucEvaluation(validation_data=([kfold_X_valid,kfold_X_valid_features], kfold_y_test), interval = 1) model.fit([kfold_X_train,kfold_X_features], kfold_y_train, batch_size=batch_size, epochs=epochs, verbose=1, callbacks = [ra_val]) gc.collect() #model.load_weights(bst_model_path) model.load_weights("best_weights.h5") predict += model.predict([x_test,test_features], batch_size=batch_size,verbose=1) / num_folds #gc.collect() # uncomment for out of fold predictions #oof_predict[test_index] = model.predict([kfold_X_valid, kfold_X_valid_features],batch_size=batch_size, verbose=1) #cv_score = roc_auc_score(kfold_y_test, oof_predict[test_index]) #scores.append(cv_score) #print('score: ',cv_score) print("Done") #print('Total CV score is {}'.format(np.mean(scores))) sample_submission = pd.read_csv("../input/jigsaw-toxic-comment-classification-challenge/sample_submission.csv") class_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] sample_submission[class_names] = predict sample_submission.to_csv('model_9872_baseline_submission.csv',index=False) # uncomment for out of fold predictions #oof = pd.DataFrame.from_dict({'id': train['id']}) #for c in class_names: # oof[c] = np.zeros(len(train)) # #oof[class_names] = oof_predict #for c in class_names: # oof['prediction_' +c] = oof[c] #oof.to_csv('oof-model_9872_baseline_submission.csv', index=False) # + _cell_guid="52b9b444-4d2a-4507-9965-224ae88d52fb" _uuid="f269d6206590f6f40b6a6524d411b5b579546f97"
kaggle-quora-insincere-question/toxic-comments-code-for-alexander-s-9872-9d324f.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import numpy as np import matplotlib.pyplot as plt import datetime as dt import xarray as xr import cartopy.crs as ccrs from pyresample.geometry import AreaDefinition from pyresample.geometry import GridDefinition from pyresample import image, geometry, load_area, save_quicklook, SwathDefinition, area_def2basemap from pyresample.kd_tree import resample_nearest from scipy import spatial sys.path.append('../saildrone/subroutines/') from read_routines import read_all_usv, read_one_usv,add_coll_vars,get_filelist_l2p,get_orbital_data_l2p from read_routines import add_coll_vars_ds_jplrss import warnings warnings.simplefilter('ignore') # filter some warning messages from glob import glob # + dir_data = 'C:/Users/gentemann/Google Drive/public/ALL_Saildrone_Data/' #'f:/data/cruise_data/saildrone/saildrone_data/' dir_data_pattern = 'C:/Users/gentemann/Google Drive/public/ALL_Saildrone_Data/*west*.nc' #dir_data 'f:/data/cruise_data/saildrone/saildrone_data/' #dir_data_pattern = 'f:/data/cruise_data/saildrone/saildrone_data/*.nc' #get list of all filenames in directory files = glob(dir_data_pattern) print('number of file:',len(files)) #for ifile,file in enumerate(files): # print(ifile,file) ds_usv = xr.open_dataset(files[4]).rename({'latitude':'lat','longitude':'lon'}) # + ds_usv,name_usv = read_one_usv(files[0]) usv_day = ds_usv.time[0] filelist_jpl = get_filelist_l2p(0, usv_day) filelist_rss = get_filelist_l2p(1, usv_day) file=filelist_jpl[0] ii = file.find('_r') iorb = int(file[ii+2:ii+7]) file_rss=[tem for tem in filelist_rss if str(iorb) in tem] ds = xr.open_dataset(filelist_jpl[0]) ds['cellon'] = (ds['cellon'] + 180) % 360 - 180 ds.close() ds2 = xr.open_dataset(file_rss[0]) ds2.close() # - print() x = ds.cellon.data y = ds.cellat.data z = ds.sss_smap.data ax = plt.axes(projection=ccrs.PlateCarree()) cs1 = ax.scatter(x, y, s=1.0, c=z, edgecolor='none', cmap='jet') #minlon,maxlon,minlat,maxlat = ds_usv.lon.min().data,ds_usv.lon.max().data,ds_usv.lat.min().data,ds_usv.lat.max().data #ax.plot([minlon,maxlon,maxlon,minlon,minlon],[minlat,minlat,maxlat,maxlat,minlat]) #ax.plot(ds_usv.lon,ds_usv.lat,'ro') ax.coastlines() #ax.set_xlim(-130,-100) #ax.set_ylim(20,40) x = ds2.lon.data y = ds2.lat.data z = ds2.smap_sss.data ax = plt.axes(projection=ccrs.PlateCarree()) cs1 = ax.scatter(x, y, s=1.0, c=z, edgecolor='none', cmap='jet') #minlon,maxlon,minlat,maxlat = ds_usv.lon.min().data,ds_usv.lon.max().data,ds_usv.lat.min().data,ds_usv.lat.max().data #ax.plot([minlon,maxlon,maxlon,minlon,minlon],[minlat,minlat,maxlat,maxlat,minlat]) #ax.plot(ds_usv.lon,ds_usv.lat,'ro') ax.coastlines() #ax.set_xlim(-130,-100) #ax.set_ylim(20,40) # # Read in All Saildrone cruises downloaded from https://data.saildrone.com/data/sets # - 2017 onwards, note that earlier data is going to lack insruments and be poorer data quality in general # - For this code I want to develop a routine that reads in all the different datasets and creates a standardized set # - It may work best to first read each of the files individually into a dictionary # - then go through each dataset finding all variable names # - I decided to put all SST into TEMP_CTD_MEAN and same for Salinity so there is a single variable name # - this still preserves all the dataset information # + dir_data = 'C:/Users/gentemann/Google Drive/public/ALL_Saildrone_Data/' #'f:/data/cruise_data/saildrone/saildrone_data/' dir_data_pattern = 'C:/Users/gentemann/Google Drive/public/ALL_Saildrone_Data/*west*.nc' #dir_data 'f:/data/cruise_data/saildrone/saildrone_data/' #dir_data_pattern = 'f:/data/cruise_data/saildrone/saildrone_data/*.nc' #get list of all filenames in directory files = [x for x in glob(dir_data_pattern)] print('number of file:',len(files)) for ifile,file in enumerate(files): print(ifile,file) # - # ## An example showing how the using matplotlib maps orbital data quickly and easily # # + #ds_tem = xr.open_dataset('C:/Users/gentemann/Google Drive/public/2019_saildrone/saildrone_arctic_sd1037_2019.nc') #ds_tem2 = xr.open_dataset('f:/data/cruise_data/saildrone/2019_arctic/post_mission/saildrone-gen_5-arctic_misst_2019-sd1037-20190514T230000-20191011T183000-1_minutes-v1.1575487464625.nc') #ds_tem = ds_tem.isel(row=slice(60,-1)) #ds_tem2 = ds_tem2.isel(obs=slice(60*24,-1)) #print(ds_tem.time[0].data,ds_tem.time[-1].data) #print(ds_tem2.time[0,0].data,ds_tem2.time[0,-1].data) #ds #ds_usv # - adir = 'C:/Users/gentemann/Google Drive/public/2019_saildrone/' ds_usv = xr.open_dataset(files[4]).rename({'latitude':'lat','longitude':'lon'}) #file = 'F:/data/sat_data/smap/SSS/L2/JPL/V4.3/2016/002/SMAP_L2B_SSS_04909_20160102T044855_R16010_V4.3.h5' file = 'F:/data/sat_data/smap/SSS/L2/RSS/V3/40km/2018/115/RSS_SMAP_SSS_L2C_40km_r17250_20180425T004136_2018115_FNL_V03.0.nc' ds = xr.open_dataset(file) ds.close() x = ds.cellon.data y = ds.cellat.data z = ds.sss_smap.data ax = plt.axes(projection=ccrs.PlateCarree()) cs1 = ax.scatter(x, y, s=1.0, c=z, edgecolor='none', cmap='jet') minlon,maxlon,minlat,maxlat = ds_usv.lon.min().data,ds_usv.lon.max().data,ds_usv.lat.min().data,ds_usv.lat.max().data ax.plot([minlon,maxlon,maxlon,minlon,minlon],[minlat,minlat,maxlat,maxlat,minlat]) #ax.plot(ds.cellon[jj,ii],ds.cellat[jj,ii,0],'b*') #ax.plot(ds_usv.lon[1000],ds_usv.lat[1000],'ro') ax.plot(ds_usv.lon,ds_usv.lat,'ro') ax.coastlines() ax.set_xlim(-130,-110) ax.set_ylim(25,40) # # test read in a file to look at dimension names file = 'F:/data/sat_data/smap/SSS/L2/JPL/V4.3/2016/002/SMAP_L2B_SSS_04909_20160102T044855_R16010_V4.3.h5' #file = 'F:/data/sat_data/smap/SSS/L2/RSS/V4/SCI/2018/115/RSS_SMAP_SSS_L2C_r17250_20180425T004136_2018115_FNL_V04.0.nc' ds = xr.open_dataset(file,decode_cf=False) ds # # What lon range for satellite & insitu? are we going 0-360 or -180 to 180? print(ds.cellon.min().data,ds.cellon.max().data) print(ds_usv.lon.min().data,ds_usv.lon.max().data) # ## First let's figure out what orbital files actually have data in our area of interest. To do this, use the pyresample software # # - read in the in situ data # - calculate the in situ min/max dates to know what files to check # # Now we have our time of interest # # - loop through the satellite data # - calculate the in situ min/max lat/lon on the same day to define a small box of interest # - use pyresample to map the data onto a predefined 0.1 deg resolution spatial grid # - subset the gridded map to the area of interest # - see if there is any valid data in that area # - if there is any valid data, go to next step # # ## Use the fast search kdtree which is part of pyresample software, but I think maybe comes originally from sci-kit-learn. # # - read in the in situ data # - read in a single orbit of satellite data # - kdtree can't handle it when lat/lon are set to nan. I frankly have no idea why there is orbital data for both the JPL and RSS products that have nan for the geolocation. That isn't normal. But, okay, let's deal with it. # - stack the dataset scanline and cell positions into a new variable 'z' # - drop all variables from the dataset when the longitude is nan # - set up the tree # - loop through the orbital data # - only save a match if it is less than 0.25 deg distance AND time is less than any previous match # - save the satellite indices & some basic data onto the USV grid # # # # + def get_time_start_end(isat,ds): if isat==0: orbit_time = np.datetime64(ds.attrs['time_coverage_start'])-np.timedelta64(24,'h') #changed to 24 hr for sss orbit_time2 = np.datetime64(ds.attrs['time_coverage_end'])+np.timedelta64(24,'h') if isat==1: orbit_time = ds.time[0].data-np.timedelta64(12,'h') orbit_time2 = ds.time[-1].data+np.timedelta64(12,'h') return orbit_time,orbit_time2 area_def = load_area('areas.cfg', 'pc_world') rlon=np.arange(-180,180,.1) rlat=np.arange(90,-90,-.1) # + iname = 1 #set number of cruise to process #for isat in range(2): ds_usv,name_usv = read_one_usv(files[iname]) ds_usv = add_coll_vars_ds_jplrss(ds_usv) fileout_rss = 'F:/data/cruise_data/saildrone/sss/2sat_sss_collocations_orbital/'+name_usv+'jplv04.3_rssv04.0_orbital.nc' #search usv data minday,maxday = ds_usv.time[0],ds_usv.time[-1] usv_day = minday print(iname,name_usv) print(minday.data,maxday.data) while usv_day<=maxday: print(usv_day.data,maxday.data) ds_day = ds_usv.sel(time=slice(usv_day-np.timedelta64(1,'D'),usv_day+np.timedelta64(1,'D'))) ilen = ds_day.time.size if ilen<1: #don't run on days without any data usv_day += np.timedelta64(1,'D') continue minlon,maxlon,minlat,maxlat = ds_day.lon.min().data,ds_day.lon.max().data,ds_day.lat.min().data,ds_day.lat.max().data filelist_jpl = get_filelist_l2p(1, usv_day) filelist_rss = get_filelist_l2p(0, usv_day) x,y,z = [],[],[] for ifile,file in enumerate(filelist_jpl): ds = xr.open_dataset(file) ds.close() ds = ds.rename({'row_time':'time','ice_concentration':'fice'}) #print('****************') #print(file) #find corresponding RSS file file=filelist_jpl[0] ii = file.find('SSS_') iorb = int(file[ii+4:ii+9]) file_rss=[tem for tem in filelist_rss if str(iorb) in tem] ds2 = xr.open_dataset(file_rss[0]) ds2.close() #ds = ds.isel(look=0) ds2 = ds2.rename({'iqc_flag':'quality_flag','cellon':'lon','cellat':'lat','sss_smap':'smap_sss','sss_smap_40km':'smap_sss_40km','ydim_grid':'phony_dim_0','xdim_grid':'phony_dim_1'}) ds2['lon']=np.mod(ds2.lon+180,360)-180 ds_rss = ds2 #first do a quick check using resample to project the orbit onto a grid #and quickly see if there is any data in the cruise area on that day #if there is, then continue to collocation x = ds['lon'].fillna(-89).data y = ds['lat'].fillna(-89).data z = ds['smap_sss'].data lons,lats,data = x,y,z swath_def = SwathDefinition(lons, lats) # Resample swath to a fixed 0.01 x 0.01 grid, represented by the variable grid_def: # https://stackoverflow.com/questions/58065055/floor-and-ceil-with-number-of-decimals #changed to be just the region of the usv cruise to make grid even smaller (hopefully) #when working with global orbital data, work with usv BUT #when working with granules use ds instead of ds_usv so you just do granule region grid_def_lon_min, grid_def_lon_max = np.round(ds_day.lon.min().data - 0.5 * 10**(-2), 2), np.round(ds_day.lon.max().data + 0.5 * 10**(-2), 2) grid_def_lat_min, grid_def_lat_max = np.round(ds_day.lat.min().data - 0.5 * 10**(-2), 2), np.round(ds_day.lat.max().data + 0.5 * 10**(-2), 2) grid_def_lons, grid_def_lats = np.arange(grid_def_lon_min,grid_def_lon_max+0.1,0.1), np.arange(grid_def_lat_max,grid_def_lat_min-0.1,-0.1) grid_mesh_lons,grid_mesh_lats = np.meshgrid(grid_def_lons,grid_def_lats) # Since we have the lon and lat values for the area, we define a grid instead of an area: # https://pyresample.readthedocs.io/en/latest/geo_def.html#griddefinition grid_def = GridDefinition(lons=grid_mesh_lons,lats=grid_mesh_lats) result1 = resample_nearest(swath_def, data, grid_def, radius_of_influence=20000, fill_value=None) da = xr.DataArray(result1,name='sss',coords={'lat':grid_def_lats,'lon':grid_def_lons},dims=('lat','lon')) numdata = np.isfinite(da).sum() if numdata<1: continue #stack xarray dataset then drop lon == nan ds2 = ds.stack(z=('phony_dim_0', 'phony_dim_1')).reset_index('z') #drop nan ds_drop = ds2.where(np.isfinite(ds2.lon),drop=True) lats = ds_drop.lat.data lons = ds_drop.lon.data inputdata = list(zip(lons.ravel(), lats.ravel())) tree = spatial.KDTree(inputdata) orbit_time, orbit_time2 = get_time_start_end(1,ds) cond = (ds_usv.time.data>orbit_time) & (ds_usv.time.data<orbit_time2) item = np.argwhere(cond) if item.sum()<1: #no data within 12 hr of orbit continue for iusv_index in range(int(item[0]),int(item[-1])): pts = np.array([ds_usv.lon[iusv_index], ds_usv.lat[iusv_index]]) #pts = np.array([ds_usv.lon[iusv]+360 tree.query(pts,k=1) i = tree.query(pts)[1] rdist = tree.query(pts)[0] if rdist>.25: #don't use matchups more than 25 km away continue #use .where to find the original indices of the matched data point #find by matching sss and lat, just randomly chosen variables, you could use any result = np.where((ds.smap_sss == ds_drop.smap_sss[i].data) & (ds.lat == ds_drop.lat[i].data)) listOfCoordinates = list(zip(result[0], result[1])) if len(listOfCoordinates)==0: continue ii, jj = listOfCoordinates[0][0],listOfCoordinates[0][1] # if isat==0: # deltaTa = ((ds_usv.time[iusv_index]-ds.time[ii,jj]).data)/ np.timedelta64(1,'m') # if isat==1: deltaTa = ((ds_usv.time[iusv_index]-ds.time[ii]).data)/ np.timedelta64(1,'m') if np.abs(deltaTa)<np.abs(ds_usv.deltaT[iusv_index].data): ds_usv.deltaT[iusv_index]=deltaTa ds_usv.smap_SSS_jpl[iusv_index]=ds.smap_sss[ii,jj] ds_usv.smap_rev_number_jpl[iusv_index]=int(ds.attrs['REVNO']) #int(ds.attrs['orbit_number']) ds_usv.smap_iqc_flag_jpl[iusv_index]=ds.quality_flag[ii,jj] ds_usv.smap_name_jpl[iusv_index]=str(file) ds_usv.smap_fice_jpl[iusv_index]=ds.fice[ii,jj] ds_usv.smap_dist_jpl[iusv_index]=rdist ds_usv.smap_ydim_jpl[iusv_index]=ii ds_usv.smap_xdim_jpl[iusv_index]=jj #match with JPL found, now get RSS data mlat = ds.lat[ii,jj] mlon = ds.lon[ii,jj] dist = ((ds_rss.lat.isel(look=0)-mlat)**2+(ds_rss.lon.isel(look=0)-mlon)**2)**.5 dd = ds_rss.isel(dist.argmin(dim=["phony_dim_1", "phony_dim_0"])) ds_usv.smap_SSS_rss_40km[iusv_index]=dd.smap_sss_40km.mean('look') ds_usv.smap_SSS_rss[iusv_index]=dd.smap_sss.mean('look') ds_usv.smap_iqc_flag_rss[iusv_index]=dd.quality_flag.mean('look') ds_usv.smap_fice_rss[iusv_index]=dd.fice ds_usv.smap_fland_rss[iusv_index]=dd.fland.mean('look') usv_day += np.timedelta64(1,'D') ds_usv.to_netcdf(fileout) # - dist = ((ds2.lat.isel(look=0)-mlat)**2+(ds2.lon.isel(look=0)-mlon)**2)**.5 dd = ds2.isel(dist.argmin(dim=["phony_dim_1", "phony_dim_0"])) dist = ((ds2.lat.isel(look=0)-mlat)**2+(ds2.lon.isel(look=0)-mlon)**2)**.5 #rename files from filesave4 to orbital since later I did 8day collocations as well, import os dir_data_pattern = 'F:/data/cruise_data/saildrone/sss/2sat_sss_collocations_orbital_norepeat/*.nc' files = glob(dir_data_pattern) print('number of file:',len(files)) for file in files: file2 = file.replace('filesave4','orbital') print(file,file2) os.rename(file,file2)
.ipynb_checkpoints/Collocate_two_orbital_satellites_with_sss_all_saildrone-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt carDataFrame = pd.read_csv("carInfo.csv") print(carDataFrame.head()) carDataFrame=carDataFrame.sort_values(by=["Started"]) plt.hist(carDataFrame["Started"], 10, facecolor='blue', alpha=0.5) plt.xlabel('Year') plt.ylabel('Cars manufactured') plt.show() # ### This shows that the database consists of cars which were mostly introduced after year 2000 carDataFrame=carDataFrame.sort_values(by=["Power"]) plt.hist(carDataFrame["Power"], 5, facecolor='blue', alpha=0.5) plt.xlabel('Power (hp)') plt.ylabel('Number of cars') plt.show() # ### This shows that the database consists of cars which mostly have lesser Power (hp) # + from collections import Counter carTypes = Counter(carDataFrame["Car type"]) labels = carTypes.keys() sizes = carTypes.values() colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue'] explode = (0.1, 0.1, 0.1, 0.1) # Plot plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True) plt.title("Car Types") plt.axis('equal') plt.show() # - # ### The database consists of a majority of coupes and convertibles! (Like we even care about the other types) # + year = carDataFrame["Started"] power = carDataFrame["Power"] plt.bar(year, power, width=2, color='#8080ff') plt.xlabel('Year manufactured') plt.ylabel('Power (hp)') plt.show() # - # ### This shows that more powerful cars started coming up in the last 5-7 years
src/carinfo_plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] tags=["remove_cell"] # # 1量子ビットゲートの合成の基礎 # - from qiskit import * from qiskit.tools.visualization import plot_histogram # %config InlineBackend.figure_format = 'svg' # 画像をいい感じに表示する import numpy as np # + [markdown] colab_type="text" id="pmm5uV8cQQN6" # ## 1 # # アダマールゲートが次の2つの形で書けることを示してください。 # # # $$H = \frac{X+Z}{\sqrt{2}} \equiv \exp\left(i \frac{\pi}{2} \, \frac{X+Z}{\sqrt{2}}\right).$$ # # # ここで、$\equiv$はグローバル位相の違いを除いて等しいということを示すために使用されています。 # したがって、得られたゲートは物理的に等価です。 # # ヒント: 固有値が $\pm 1$ の任意の行列 $M$ が、$M^2=I$ を満たしていることと、 $e^{i\frac{\pi}{2} M} \equiv M$ が成り立つことを証明するのが簡単かもしれません。 # + [markdown] colab_type="text" id="CJyxxSDUQQN9" # ## 2 # # アダマールゲートは次のように `rx` と `rz` の操作から構成することが出来ます。 # # # $$ R_x(\theta) = e^{i\frac{\theta}{2} X}, ~~~ R_z(\theta) = e^{i\frac{\theta}{2} Z},\\ H \equiv \lim_{n\rightarrow\infty} \left( ~R_x\left(\frac{\theta}{n}\right) ~~R_z \left(\frac{\theta}{n}\right) ~\right)^n.$$ # # # # この等式は適切に選んだ $\theta$ について成り立ちます。 有限の $n$ について実装をすると、得られるゲートは $n$ が大きくなるとアダマールゲートに漸近していく近似となっているでしょう。 # # 次で、誤った$\theta$ を選んで、Qiskit で実装した例を示しています。(グローバル位相は無視しています。) # # * $\theta$ の正しい値を決定してください。 # # * (正しい $\theta$ を用いているときに)エラーが $n$ が増えるにつれて二次的に減少していくことを示してください。 # + colab={} colab_type="code" id="4yqeQMlZQQN_" q = QuantumRegister(1) c = ClassicalRegister(1) error = {} for n in range(1,11): # 空の量子回路を作る qc = QuantumCircuit(q,c) # 近似アダマールを実装する theta = np.pi # ここで theta として誤った値 π を設定 for j in range(n): qc.rx(theta/n,q[0]) qc.rz(theta/n,q[0]) # 上の近似がどのくらい良いか測る必要があります。これを行う簡単な方法があります。 # Step 1: 上の近似アダマールをキャンセルする実際のアダマールを使います。 # 良い近似ならば, 量子ビットは0状態になるでしょう。悪い近似ならば、何かしらの重ね合わせ状態になっているでしょう。 qc.h(q[0]) # Step 2: 量子回路を実行して、どのくらい測定値 1 が出るのかを調べます。 # (良い近似ならば)確実に0 が出るはずなので、1 が出る割合は誤差の指標となります。 qc.measure(q,c) shots = 20000 job = execute(qc, Aer.get_backend('qasm_simulator'),shots=shots) try: error[n] = (job.result().get_counts()['1']/shots) except: pass plot_histogram(error) # + [markdown] colab_type="text" id="Yr5vuU_eQQOU" # ## 3 # # 近似の改善版が次の式から得られます。 # # # # $$H \equiv \lim_{n\rightarrow\infty} \left( ~ R_z \left(\frac{\theta}{2n}\right)~~ R_x\left(\frac{\theta}{n}\right) ~~ R_z \left(\frac{\theta}{2n}\right) ~\right)^n.$$ # # # これを実装し、エラーのスケーリングを調べてみてください。 # # - import qiskit qiskit.__qiskit_version__
i18n/locales/ja/ch-ex/ex2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Convert human 1000 genomes phase 3 data from VCF to Zarr # # This notebook has example code for converting variant data from the 1000 genomes project phase 3 to Zarr format. # # This notebook uses a single chromosome as an example, however code could be adapted to run all chromosomes. from pathlib import Path import sys import functools import numpy as np # Use scikit-allel for the VCF to Zarr conversion. import allel allel.__version__ # The numcodecs package holds the various compressors that can be used with Zarr. import numcodecs from numcodecs import Blosc numcodecs.__version__ import zarr zarr.__version__ # ## Download and inspect source data # This is a local directory where we will download VCF files to, and also write Zarr outputs. data_path = Path('../data/1000genomes/release/20130502') # !mkdir -pv {data_path} # + # There is one VCF file per chromosome. vcf_fn_template = 'ALL.chr{chrom}.phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz' # Local path to download VCF to. vcf_path_template = str(data_path / vcf_fn_template) # Remote FTP location. ftp_path = 'ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20130502' vcf_ftp_path_template = ftp_path + '/' + vcf_fn_template # - # Download data for chromosome 22. vcf_ftp_path = vcf_ftp_path_template.format(chrom='22') vcf_ftp_path # !cd {data_path} && wget --no-clobber {vcf_ftp_path} vcf_path = vcf_path_template.format(chrom='22') vcf_path # Inspect file size for interest. # !ls -lh {vcf_path} # Inspect which INFO fields are present, for interest. # !zcat {vcf_path} | head -n1000 | grep INFO= # Inspect which FORMAT fields are present, for interest. # !zcat {vcf_path} | head -n1000 | grep FORMAT= # ## Convert VCF to Zarr # + # For a lossless conversion from VCF to Zarr, we will need to know the # maximum number of alternate alleles found in any variant. This will be used to # determine the shape of some arrays, like ALT for example. @functools.lru_cache(maxsize=None) def find_alt_number(chrom): """Scan a VCF to find the maximum number of alleles in any variant.""" vcf_path = vcf_path_template.format(chrom=chrom) callset = allel.read_vcf(vcf_path, fields=['numalt'], log=sys.stdout) numalt = callset['variants/numalt'] return np.max(numalt) # - # Demonstrate finding max number of alternate alleles. find_alt_number('22') def build_zarr(zarr_path, chrom, compressor, fields='*'): """Run VCF to Zarr conversion for the given chromosome.""" # Determine VCF path for this chromosome. vcf_path = vcf_path_template.format(chrom=chrom) # Zarr can't handle pathlib.Path, ensure string zarr_path = str(zarr_path) # Determine max number of ALT alleles. alt_number = find_alt_number(chrom) # Run VCF to Zarr converation. For all the options that this function supports, see # http://alimanfoo.github.io/2017/06/14/read-vcf.html allel.vcf_to_zarr(vcf_path, zarr_path, group=chrom, fields=fields, alt_number=alt_number, log=sys.stdout, compressor=compressor) # Choose a compressor - this one is a good allrounder, good compression ratio and reasonable speed. # Should work well on both local and networked storage. compressor = Blosc(cname='zstd', clevel=1, shuffle=Blosc.AUTOSHUFFLE) zarr_path = data_path / 'zarr' build_zarr(zarr_path, chrom='22', compressor=compressor) # ## Inspect Zarr output # Inspect total size of Zarr data. # !du -hs {str(zarr_path)} # Inspect size breakdown of Zarr data. # !du -hs {str(zarr_path / '*' / '*' / '*')} # Open the Zarr data and inspect the hierarchy. store = zarr.DirectoryStore(str(zarr_path)) callset = zarr.Group(store=store, read_only=True) callset.tree(expand=True) # Get some diagnostics on the genotype data. gtz = callset['22/calldata/GT'] gtz.info # + # %%time # Do a quick benchmark of time to compute allele counts over whole chromosome and cohort. # Wrap Zarr array with scikit-allel class. gt = allel.GenotypeDaskArray(gtz) # It helps to know the max number of ALT alleles to expect. max_allele = callset['22/variants/ALT'].shape[1] # Run the computation. ac = gt.count_alleles(max_allele=max_allele).compute() # - # What does an allele counts array look like? # Rows are variants, columns are alleles, each cell holds the count of observations of an allele for a variant. ac
notebooks/1000-genomes-vcf-to-zarr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import random # %matplotlib inline # + x1 = [5,1,3,7] y1 = [4,5,2,9] x2 = [1,4,10] y2 = [4,6,20] z2 = [5,5,20] # - # Calculate the Centroids ~ Slide 21) def centroid(data): ''' Requires a list of n dimensions where each dimesion is its own list ''' centroids = [] for dat in data: centroids.append(round(sum(dat)/len(dat),2)) return centroids print(centroid([x1,y1])) print(centroid([x2,y2,z2])) #Cluster points around a centroid ---Slide 32 x3 = [1,3,5,9,1,0,10] y3 = [4,2,8,10,1,2,7] cent1 = [2,2] cent2 = [8,10] # + def calc_distance(centers, points): """Takes list of points and centers""" #Make empty list for number of centroids distances = [] for i in range(len(centers)): distances.append([]) num_dimensions = len(points) for cen in range(len(centers)): for loc in range(len(points[0])): dist = round(np.sqrt((centers[cen][0]-points[0][loc])**2 + (centers[cen][1]-points[1][loc])**2),2) distances[cen].append(dist) return distances # - distance = calc_distance([cent1,cent2], [x3,y3]) distance # + def calc_closer(centers, distances,points): new_sets = [[[],[]] for _ in range(len(centers))] #create an intermediate set of centroids: for i in range(len(distances[0])): cents_to_points = [] for dist in distances: cents_to_points.append(dist[i]) val, idx = min((val, idx) for (idx, val) in enumerate(cents_to_points)) new_sets[idx][0].append(points[0][i]) new_sets[idx][1].append(points[1][i]) return new_sets # - new_sets = calc_closer([cent1,cent2], distance, [x3,y3]) new_sets for i in range(3): print (cent1, cent2) distance = calc_distance([cent1,cent2], [x3,y3]) new_sets = calc_closer([cent1,cent2], distance, [x3,y3]) cent1 = centroid(new_sets[0]) cent2 = centroid(new_sets[1])
Data_Science_Mathematics_In_class_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Python для анализа данных # ### Домашнее задание NumPy # Дан массив, содержащий баллы слушателей курса по английскому языку за вступительный экзамен. Слушатели распределены по 15 группам по 10 человек. import numpy as np scores = np.array([[20, 40, 56, 80, 0, 5, 25, 27, 74, 1], [0, 98, 67, 100, 8, 56, 34, 82, 100, 7], [78, 54, 23, 79, 100, 0, 0, 42, 95, 83], [51, 50, 47, 23, 100, 94, 25, 48, 38, 77], [90, 87, 41, 89, 52, 0, 5, 17, 28, 99], [32, 18, 21, 18, 29, 31, 48, 62, 76, 22], [6, 0, 65, 78, 43, 22, 38, 88, 94, 100], [77, 28, 39, 41, 0, 81, 45, 54, 98, 12], [66, 0, 88, 0, 44, 0, 55, 100, 12, 11], [17, 70, 86, 96, 56, 23, 32, 49, 70, 80], [20, 24, 76, 50, 29, 40, 3, 2, 5, 11], [33, 63, 28, 40, 51, 100, 98, 87, 22, 30], [16, 54, 78, 12, 25, 35, 10, 19, 67, 0], [100, 88, 24, 33, 47, 56, 62, 34, 77, 53], [50, 89, 70, 72, 56, 29, 15, 20, 0, 0]]) # ### Задание 1 # # Посчитать, сколько слушателей получили 0 за вступительный экзамен. ### YOUR CODE HERE ### print(np.sum(scores == 0)) # ### Задание 2 # # Посчитать, сколько слушателей получили балл выше 50. ### YOUR CODE HERE ### print(np.sum(scores > 50)) # ### Задание 3 # # Посчитать, сколько человек получили балл не ниже 50, но не выше 70. ### YOUR CODE HERE ### all_elem = np.size(scores) x = np.sum(scores >= 70) y = np.sum(scores <= 50) print(all_elem-x-y) # ### Задание 4 # # Определить, в какой группе средний балл за вступительный экзамен выше. ### YOUR CODE HERE ### x = [] for i in range(len(scores)): y = scores[i].mean() x.append(y) print(x) print(x.index(max(x))) # ### Задание 5 # # Сохранить баллы слушателей выше 0 в массив `nonzero`. # + ### YOUR CODE HERE ### non_zero_ar = np.nonzero(scores) nonzero = scores[non_zero_ar] # - # ### Задание 6 # # Используя массив `nonzero`, определить минимальный балл за вступительный балл по всем группам. # + ### YOUR CODE HERE ### non_zero_ar = np.nonzero(scores) nonzero = scores[non_zero_ar] print(np.min(nonzero)) # - # ### Задание 7 # # Выбрать из массива `nonzero` только те значения, которые соответствуют продвинутому уровню знания языка – баллу за экзамен выше 80. Сохранить полученный результат в массив `advanced`. В заданиях 8-10 нужно работать с массивом `advanced`. ### YOUR CODE HERE ### cond = nonzero > 80 advanced = np. extract(cond, nonzero) print(advanced) # ### Задание 8 # # Определить размерность массива `advanced`. ### YOUR CODE HERE ### advanced.ndim # ### Задание 9 # # Определить форму массива `advanced`. ### YOUR CODE HERE ### advanced.shape # ### Задание 10 # # Определить общее число элементов в массиве `advanced`. ### YOUR CODE HERE ### advanced.size # ### Задание 11 # # На основе исходного массива `scores` создать булев массив `sto`, где `True` соответствует баллам за экзамен, равным 100, а `False` – всем остальным баллам. ### YOUR CODE HERE ### sto = scores == 100 print(sto) # ### Задание 12 # # На основе исходного массива `scores` вывести на экран оценки слушателей первых семи групп (включительно, должны быть 7 списков оценок). # + ### YOUR CODE HERE ### print(scores[:7])
6_Homework.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.2 # language: julia # name: julia-1.5 # --- # # Factorizations and other fun # Based on work by <NAME> # # ## Outline # - Factorizations # - Special matrix structures # - Generic linear algebra # Before we get started, let's set up a linear system and use `LinearAlgebra` to bring in the factorizations and special matrix structures. using LinearAlgebra A = rand(3, 3) x = fill(1, (3,)) b = A * x # ## Factorizations # # #### LU factorizations # In Julia we can perform an LU factorization # ```julia # PA = LU # ``` # where `P` is a permutation matrix, `L` is lower triangular unit diagonal and `U` is upper triangular, using `lufact`. # # Julia allows computing the LU factorization and defines a composite factorization type for storing it. Alu = lu(A) typeof(Alu) # The different parts of the factorization can be extracted by accessing their special properties Alu.P Alu.L Alu.U # Julia can dispatch methods on factorization objects. # # For example, we can solve the linear system using either the original matrix or the factorization object. A\b Alu\b # Similarly, we can calculate the determinant of `A` using either `A` or the factorization object det(A) ≈ det(Alu) # #### QR factorizations # # In Julia we can perform a QR factorization # ``` # A=QR # ``` # # where `Q` is unitary/orthogonal and `R` is upper triangular, using `qrfact`. Aqr = qr(A) # Similarly to the LU factorization, the matrices `Q` and `R` can be extracted from the QR factorization object via Aqr.Q Aqr.R # #### Eigendecompositions # The results from eigendecompositions, singular value decompositions, Hessenberg factorizations, and Schur decompositions are all stored in `Factorization` types. # # The eigendecomposition can be computed Asym = A + A' AsymEig = eigen(Asym) # The values and the vectors can be extracted from the Eigen type by special indexing AsymEig.values AsymEig.vectors # Once again, when the factorization is stored in a type, we can dispatch on it and write specialized methods that exploit the properties of the factorization, e.g. that $A^{-1}=(V\Lambda V^{-1})^{-1}=V\Lambda^{-1}V^{-1}$. inv(AsymEig)*Asym # ## Special matrix structures # Matrix structure is very important in linear algebra. To see *how* important it is, let's work with a larger linear system n = 1000 A = randn(n,n); # Julia can often infer special matrix structure Asym = A + A' issymmetric(Asym) # but sometimes floating point error might get in the way. Asym_noisy = copy(Asym) Asym_noisy[1,2] += 5eps() issymmetric(Asym_noisy) # Luckily we can declare structure explicitly with, for example, `Diagonal`, `Triangular`, `Symmetric`, `Hermitian`, `Tridiagonal` and `SymTridiagonal`. Asym_explicit = Symmetric(Asym_noisy); # Let's compare how long it takes Julia to compute the eigenvalues of `Asym`, `Asym_noisy`, and `Asym_explicit` @time eigvals(Asym); @time eigvals(Asym_noisy); @time eigvals(Asym_explicit); # In this example, using `Symmetric()` on `Asym_noisy` made our calculations about `5x` more efficient :) # #### A big problem # Using the `Tridiagonal` and `SymTridiagonal` types to store tridiagonal matrices makes it possible to work with potentially very large tridiagonal problems. The following problem would not be possible to solve on a laptop if the matrix had to be stored as a (dense) `Matrix` type. n = 1_000_000; A = SymTridiagonal(randn(n), randn(n-1)); @time eigmax(A) # ## Generic linear algebra # The usual way of adding support for numerical linear algebra is by wrapping BLAS and LAPACK subroutines. For matrices with elements of `Float32`, `Float64`, `Complex{Float32}` or `Complex{Float64}` this is also what Julia does. # # However, Julia also supports generic linear algebra, allowing you to, for example, work with matrices and vectors of rational numbers. # #### Rational numbers # Julia has rational numbers built in. To construct a rational number, use double forward slashes: 1//2 3/7 + 3/7 + 1/7 # #### Example: Rational linear system of equations # The following example shows how linear system of equations with rational elements can be solved without promoting to floating point element types. Overflow can easily become a problem when working with rational numbers so we use `BigInt`s. Arational = Matrix{Rational{BigInt}}(rand(1:10, 3, 3))/10 x = fill(1, 3) b = Arational*x Arational\b lu(Arational) # ### Exercises # # #### 11.1 # What are the eigenvalues of matrix A? # # ``` # A = # [ # 140 97 74 168 131 # 97 106 89 131 36 # 74 89 152 144 71 # 168 131 144 54 142 # 131 36 71 142 36 # ] # ``` # and assign it a variable `A_eigv` A = [ 140 97 74 168 131 97 106 89 131 36 74 89 152 144 71 168 131 144 54 142 131 36 71 142 36 ] using LinearAlgebra A_eigv = eigen(A).values # + deletable=false editable=false hide_input=true nbgrader={"checksum": "f9f16fdef201ed372323a291f1dd1346", "grade": true, "grade_id": "cell-4d5f60c8a814c789", "locked": true, "points": 0, "schema_version": 1, "solution": false} @assert A_eigv == [-128.49322764802145, -55.887784553056875, 42.7521672793189, 87.16111477514521, 542.4677301466143] # - # #### 11.2 # Create a `Diagonal` matrix from the eigenvalues of `A`. adsf # + deletable=false editable=false hide_input=true nbgrader={"checksum": "3ca676f6282c1a7c214ab2cb9f9b322d", "grade": true, "grade_id": "cell-3b000a3710c9c263", "locked": true, "points": 1, "schema_version": 1, "solution": false} @assert A_diag == [-128.493 0.0 0.0 0.0 0.0; 0.0 -55.8878 0.0 0.0 0.0; 0.0 0.0 42.7522 0.0 0.0; 0.0 0.0 0.0 87.1611 0.0; 0.0 0.0 0.0 0.0 542.468] # - # #### 11.3 # Create a `LowerTriangular` matrix from `A` and store it in `A_lowertri` # + deletable=false editable=false hide_input=true nbgrader={"checksum": "b3b1a272343a05082f378a5e1aa3426d", "grade": true, "grade_id": "cell-b76cee2b4a8777da", "locked": true, "points": 0, "schema_version": 1, "solution": false} @assert A_lowertri == [140 0 0 0 0; 97 106 0 0 0; 74 89 152 0 0; 168 131 144 54 0; 131 36 71 142 36] # - # ### Please let us know how we're doing! # https://tinyurl.com/introJuliaFeedback # Please click on `Validate` on the top, once you are done with the exercises.
introductory-tutorials/intro-to-julia/12. Factorizations and other fun.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Link prediction with GCN # + [markdown] nbsphinx="hidden" tags=["CloudRunner"] # <table><tr><td>Run the latest release of this notebook:</td><td><a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/link-prediction/gcn-link-prediction.ipynb" alt="Open In Binder" target="_parent"><img src="https://mybinder.org/badge_logo.svg"/></a></td><td><a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/link-prediction/gcn-link-prediction.ipynb" alt="Open In Colab" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a></td></tr></table> # - # In this example, we use our implementation of the [GCN](https://arxiv.org/abs/1609.02907) algorithm to build a model that predicts citation links in the Cora dataset (see below). The problem is treated as a supervised link prediction problem on a homogeneous citation network with nodes representing papers (with attributes such as binary keyword indicators and categorical subject) and links corresponding to paper-paper citations. # # To address this problem, we build a model with the following architecture. First we build a two-layer GCN model that takes labeled node pairs (`citing-paper` -> `cited-paper`) corresponding to possible citation links, and outputs a pair of node embeddings for the `citing-paper` and `cited-paper` nodes of the pair. These embeddings are then fed into a link classification layer, which first applies a binary operator to those node embeddings (e.g., concatenating them) to construct the embedding of the potential link. Thus obtained link embeddings are passed through the dense link classification layer to obtain link predictions - probability for these candidate links to actually exist in the network. The entire model is trained end-to-end by minimizing the loss function of choice (e.g., binary cross-entropy between predicted link probabilities and true link labels, with true/false citation links having labels 1/0) using stochastic gradient descent (SGD) updates of the model parameters, with minibatches of 'training' links fed into the model. # + nbsphinx="hidden" tags=["CloudRunner"] # install StellarGraph if running on Google Colab import sys if 'google.colab' in sys.modules: # %pip install -q stellargraph[demos]==1.2.1 # + nbsphinx="hidden" tags=["VersionCheck"] # verify that we're using the correct version of StellarGraph for this notebook import stellargraph as sg try: sg.utils.validate_notebook_version("1.2.1") except AttributeError: raise ValueError( f"This notebook requires StellarGraph version 1.2.1, but a different version {sg.__version__} is installed. Please see <https://github.com/stellargraph/stellargraph/issues/1172>." ) from None # + import stellargraph as sg from stellargraph.data import EdgeSplitter from stellargraph.mapper import FullBatchLinkGenerator from stellargraph.layer import GCN, LinkEmbedding from tensorflow import keras from sklearn import preprocessing, feature_extraction, model_selection from stellargraph import globalvar from stellargraph import datasets from IPython.display import display, HTML # %matplotlib inline # - # ## Loading the CORA network data # + [markdown] tags=["DataLoadingLinks"] # (See [the "Loading from Pandas" demo](../basics/loading-pandas.ipynb) for details on how data can be loaded.) # + tags=["DataLoading"] dataset = datasets.Cora() display(HTML(dataset.description)) G, _ = dataset.load(subject_as_feature=True) # - print(G.info()) # We aim to train a link prediction model, hence we need to prepare the train and test sets of links and the corresponding graphs with those links removed. # # We are going to split our input graph into a train and test graphs using the EdgeSplitter class in `stellargraph.data`. We will use the train graph for training the model (a binary classifier that, given two nodes, predicts whether a link between these two nodes should exist or not) and the test graph for evaluating the model's performance on hold out data. # Each of these graphs will have the same number of nodes as the input graph, but the number of links will differ (be reduced) as some of the links will be removed during each split and used as the positive samples for training/testing the link prediction classifier. # From the original graph G, extract a randomly sampled subset of test edges (true and false citation links) and the reduced graph G_test with the positive test edges removed: # + # Define an edge splitter on the original graph G: edge_splitter_test = EdgeSplitter(G) # Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G, and obtain the # reduced graph G_test with the sampled links removed: G_test, edge_ids_test, edge_labels_test = edge_splitter_test.train_test_split( p=0.1, method="global", keep_connected=True ) # - # The reduced graph G_test, together with the test ground truth set of links (edge_ids_test, edge_labels_test), will be used for testing the model. # # Now repeat this procedure to obtain the training data for the model. From the reduced graph G_test, extract a randomly sampled subset of train edges (true and false citation links) and the reduced graph G_train with the positive train edges removed: # + # Define an edge splitter on the reduced graph G_test: edge_splitter_train = EdgeSplitter(G_test) # Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G_test, and obtain the # reduced graph G_train with the sampled links removed: G_train, edge_ids_train, edge_labels_train = edge_splitter_train.train_test_split( p=0.1, method="global", keep_connected=True ) # - # G_train, together with the train ground truth set of links (edge_ids_train, edge_labels_train), will be used for training the model. # ## Creating the GCN link model # Next, we create the link generators for the train and test link examples to the model. The link generators take the pairs of nodes (`citing-paper`, `cited-paper`) that are given in the `.flow` method to the Keras model, together with the corresponding binary labels indicating whether those pairs represent true or false links. # # The number of epochs for training the model: epochs = 50 # For training we create a generator on the `G_train` graph, and make an iterator over the training links using the generator's `flow()` method: train_gen = FullBatchLinkGenerator(G_train, method="gcn") train_flow = train_gen.flow(edge_ids_train, edge_labels_train) test_gen = FullBatchLinkGenerator(G_test, method="gcn") test_flow = train_gen.flow(edge_ids_test, edge_labels_test) # Now we can specify our machine learning model, we need a few more parameters for this: # # * the `layer_sizes` is a list of hidden feature sizes of each layer in the model. In this example we use two GCN layers with 16-dimensional hidden node features at each layer. # * `activations` is a list of activations applied to each layer's output # * `dropout=0.3` specifies a 30% dropout at each layer. # We create a GCN model as follows: gcn = GCN( layer_sizes=[16, 16], activations=["relu", "relu"], generator=train_gen, dropout=0.3 ) # To create a Keras model we now expose the input and output tensors of the GCN model for link prediction, via the `GCN.in_out_tensors` method: x_inp, x_out = gcn.in_out_tensors() # Final link classification layer that takes a pair of node embeddings produced by the GCN model, applies a binary operator to them to produce the corresponding link embedding (`ip` for inner product; other options for the binary operator can be seen by running a cell with `?LinkEmbedding` in it), and passes it through a dense layer: prediction = LinkEmbedding(activation="relu", method="ip")(x_out) # The predictions need to be reshaped from `(X, 1)` to `(X,)` to match the shape of the targets we have supplied above. prediction = keras.layers.Reshape((-1,))(prediction) # Stack the GCN and prediction layers into a Keras model, and specify the loss # + model = keras.Model(inputs=x_inp, outputs=prediction) model.compile( optimizer=keras.optimizers.Adam(lr=0.01), loss=keras.losses.binary_crossentropy, metrics=["acc"], ) # - # Evaluate the initial (untrained) model on the train and test set: # + init_train_metrics = model.evaluate(train_flow) init_test_metrics = model.evaluate(test_flow) print("\nTrain Set Metrics of the initial (untrained) model:") for name, val in zip(model.metrics_names, init_train_metrics): print("\t{}: {:0.4f}".format(name, val)) print("\nTest Set Metrics of the initial (untrained) model:") for name, val in zip(model.metrics_names, init_test_metrics): print("\t{}: {:0.4f}".format(name, val)) # - # Train the model: history = model.fit( train_flow, epochs=epochs, validation_data=test_flow, verbose=2, shuffle=False ) # Plot the training history: sg.utils.plot_history(history) # Evaluate the trained model on test citation links: # + train_metrics = model.evaluate(train_flow) test_metrics = model.evaluate(test_flow) print("\nTrain Set Metrics of the trained model:") for name, val in zip(model.metrics_names, train_metrics): print("\t{}: {:0.4f}".format(name, val)) print("\nTest Set Metrics of the trained model:") for name, val in zip(model.metrics_names, test_metrics): print("\t{}: {:0.4f}".format(name, val)) # + [markdown] nbsphinx="hidden" tags=["CloudRunner"] # <table><tr><td>Run the latest release of this notebook:</td><td><a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/link-prediction/gcn-link-prediction.ipynb" alt="Open In Binder" target="_parent"><img src="https://mybinder.org/badge_logo.svg"/></a></td><td><a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/link-prediction/gcn-link-prediction.ipynb" alt="Open In Colab" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a></td></tr></table>
demos/link-prediction/gcn-link-prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework 7 # ## Due Date: Wednesday, October 25th at 11:59 PM # # Problem 1: Linked List Class # Write a linked list class called `LinkedList`. Remember, a singly linked list is made up of nodes each of which contain a value and a pointer. The first node is called the "head node". # # Here are the required methods: # * `__init__(self, head)` where `head` is the value of the head node. You could make the head node an attribute. # * `__len__(self)`: Returns the number of elements in the linked list. # * `__getitem__(self, index)` returns the value of the node corresponding to `index`. Include checks to make sure that `index` is not out of range and that the user is not trying to index and empty list. # * `__repr__(self)` returns `LinkedList(head_node)`. # * `insert_front(self, element)` inserts a new node with value `element` at the beginning of the list. # * `insert_back(self, element)` inserts a new node with value `element` at the end of the list. # # Note: An alternative implementation is to create a `Node` class. You are not required to make a `Node` class but you may if you prefer that implementation. Please don't steal that implementation from the online forums. I've seen those too. # # Problem 2: Binary Tree Class # A binary search tree is a binary tree with the invariant that for any particular node the left child is smaller and the right child is larger. Create the class `BinaryTree` with the following specifications: # # `__init__(self)`: Constructor takes no additional arguments # # `insert(self, val)`: This method will insert `val` into the tree # # (Optional) `remove(self, val)`: This will remove `val` from the tree. # 1. If the node to be deleted has no children then just remove it. # 2. If the node to be deleted has only one child, remove the node and replace it with its child. # 3. If the node to be deleted has two children, replace the node to be deleted with the maximum value in the left subtree. Finally, delete the node with the maximum value in the left-subtree. # # `getValues(self. depth)`: Return a list of the entire row of nodes at the specified depth with `None` at the index if there is no value in the tree. The length of the list should therefore be $2^{\text{depth}}$. # Here is a sample output: # # ```python # bt = BinaryTree() # arr = [20, 10, 17, 14, 3, 0] # for i in arr: # bt.insert(i) # # print("Height of binary tree is {}.\n".format(len(bt))) # for i in range(len(bt)): # print("Level {0} values: {1}".format(i, bt.getValues(i))) # ``` # # ``` # Height of binary tree is 4. # # Level 0 values: [20] # Level 1 values: [10, None] # Level 2 values: [3, 17, None, None] # Level 3 values: [0, None, 14, None, None, None, None, None] # ``` # # Note that you do not need to format your output in this way. Nor are you required to implement a `__len__` method to compute the height of the tree. I did this because it was convenient for illustration purposes. This example is simply meant to show you some output at each level of the tree. # # Problem 3: Peer Evaluations # Evaluate the members of your group for Milestone 1. Please follow the instructions in the provided survey. The survey can be found here: [Milestone 1 Peer Evaluation](https://harvard.az1.qualtrics.com/jfe/form/SV_0JnuXbE5QjLCrKB). # # Problem 4: Course Evaluation # Please take the [Course Evaluation](https://docs.google.com/forms/d/e/1FAIpQLSdDyrtf_aByU4xNeLMSmDrFCJ2OLDrK1Q7ZoeTd2Whf_cdRrw/viewform?usp=sf_link).
Homework/HW7/HW7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction à la compression d'images # Nous observons actuellement une augmentation importante du volume de données vidéo disponibles, par exemple sous forme de séries qui sont diffusées en _web-streaming_ (Netflix pour n'en citer qu'un). # La vidéo est en train de devenir une des principales sources d’informations (YouTube délivre plus de 100 millions de séquences vidéo chaquejour sur Internet) et tend à être de plus en plus omniprésente. L'entreprise Cisco prévoyait qu’en 2012, la vidéo représenterai 50% du trafic Internet et qu’en 2015, 100 millions de minutes de vidéo etaient diffusées sur Internet chaque seconde, en 2019 on est passé au dela de cette limite. Aujourd'hui la plupart des recherches concernant des pratiques se font directement à partir de Youtube. # La diffusion de cette masse de données nécessite d'extraire de chaque image individuelle "la substantifique moelle", c'est à dire les informations qui permettent de la décrire le plus succintement possible. Dans ce cours nous allons voire les bases de la compression d'images en se servant d'un exemple issu du monde réel. Celui de la compression par regroupemment de domaines adjacents, grâce à un graphe. Un domaine est un ensemble connexe de pixels ayant des propriétés similaires (par exemple : deux couleurs similaires, on dit alors qu'elles sont plates). Un graphe est un outil mathématique qui sert avant tout à manipuler des concepts, et à établir un lien entre ces concepts. Il s'agit d'une notion d'algorithmique qui a été très utilisée dans la théorie des reseaux sociaux. # %matplotlib inline # # ## Regrouppement des images en se servant d'un graphe # # Cet exemple construit le graphe des zones quasi plates (appellé en Anglais Region Adjacency Graph ou RAG). Il regroupe progressivement les régions semblables en termes de couleurs pour transformer l'image en une mosaique de pavés colorées de la même manière. # L'union de deux régions adjacentes produit une nouvelle région regroupant tout les pixels de la région sous une même couleur. Un peu comme dans une conquête. # Les régions sont regroupées jusqu'a ce qu'il n'y ai plus qu'une région similaire. # # On va commencer par importer des fonctions de # la librairie scikit-image https://scikit-image.org/ from skimage import data, io, segmentation, color from skimage.future import graph import numpy as np # Nous allons partir d'une tasse de café, qui fait partie des images de la base de données. img = data.coffee() io.imshow(img) # + # Dans ce bloc deux fonctions sont crées pour # manipuler les couleurs def _weight_mean_color(graph, src, dst, n): """Callback to handle merging nodes by recomputing mean color. The method expects that the mean color of `dst` is already computed. Parameters ---------- graph : RAG The graph under consideration. src, dst : int The vertices in `graph` to be merged. n : int A neighbor of `src` or `dst` or both. Returns ------- data : dict A dictionary with the `"weight"` attribute set as the absolute difference of the mean color between node `dst` and `n`. """ diff = graph.node[dst]['mean color'] - graph.node[n]['mean color'] diff = np.linalg.norm(diff) return {'weight': diff} def merge_mean_color(graph, src, dst): """Callback called before merging two nodes of a mean color distance graph. This method computes the mean color of `dst`. Parameters ---------- graph : RAG The graph under consideration. src, dst : int The vertices in `graph` to be merged. """ graph.node[dst]['total color'] += graph.node[src]['total color'] graph.node[dst]['pixel count'] += graph.node[src]['pixel count'] graph.node[dst]['mean color'] = (graph.node[dst]['total color'] / graph.node[dst]['pixel count']) # + # Maintenant on fait tourner la segmentation de l'image # il convient de noter ce qui se passe quand on étiquette 400 segments labels = segmentation.slic(img, compactness=30, n_segments=400) # On vous demande de ne pas modifier le code suivant g = graph.rag_mean_color(img, labels) labels2 = graph.merge_hierarchical(labels, g, thresh=35, rag_copy=False, in_place_merge=True, merge_func=merge_mean_color, weight_func=_weight_mean_color) out = color.label2rgb(labels2, img, kind='avg') out = segmentation.mark_boundaries(out, labels2, (0, 0, 0)) io.imshow(out) io.show() # + # Maintenant on fait tourner la segmentation de l'image # il convient de noter ce qui se passe quand on étiquette 800 segments labels = segmentation.slic(img, compactness=30, n_segments=800) # On vous demande de ne pas modifier le code suivant g = graph.rag_mean_color(img, labels) labels2 = graph.merge_hierarchical(labels, g, thresh=35, rag_copy=False, in_place_merge=True, merge_func=merge_mean_color, weight_func=_weight_mean_color) out = color.label2rgb(labels2, img, kind='avg') out = segmentation.mark_boundaries(out, labels2, (0,0,0)) io.imshow(out) io.show() # - # Ce deuxième cas montre deux choses : # > - Le cercle du bord de la tasse est complet, et l'on peut faire passer un cercle par dessus sans discontinnuité # > - Une petite tache blanche correspondant à un éclat de lumière vient d'apparaître sur la cuillère. # ## Exercice # Pouvez vous décrire avec des mots les parties essetielles de l'image si dessous qui à été simplifiée à l'extrème ? labels = segmentation.slic(img, compactness=5, n_segments=10) # + # On vous demande de ne pas modifier le code suivant g = graph.rag_mean_color(img, labels) labels2 = graph.merge_hierarchical(labels, g, thresh=35, rag_copy=False, in_place_merge=True, merge_func=merge_mean_color, weight_func=_weight_mean_color) out = color.label2rgb(labels2, img, kind='avg') out = segmentation.mark_boundaries(out, labels2, (0, 0, 0)) io.imshow(out) io.show() # - # Ecrivez votre réponse dans cette case :
Regrouppement-IMAGES_DILLMANN-NDO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="aaada6ec-2d46-46de-9b8e-d3b3cf7677d8" _uuid="08eb34d05b01758bcd9623ca5fc6c2cac368545f" # # Titanic Analysis: Collide with destiny # + [markdown] _uuid="5475d6e617f1f0646623a35a55d798b558eda1c5" # ![](https://i.imgur.com/rE1OxtK.png) # + [markdown] _cell_guid="a7a3db49-9111-4536-8a9c-2ee2cd37cacf" _uuid="75f0ba63a67e57f82cae7dc7bc2b01fdb0cfc0bc" # ## Table of Contents # - [Overview](#overview) # - [Wrangling Data](#wranglingData) # - [Developing Model](#developingModel) # - [Validating Model](#validatingModel) # - [Conclusion](#conclusion) # > <B>NOTE</B>: This is my first Kaggle comptition kernel. Any feedback or suggestions will be warmly appreciated. # + [markdown] _uuid="6e23e2573f6c5468c9c0a0a8417548806f7d2879" # ---- # <a id='overview'></a> # ## Overview # # The data has been split into two groups: # # - training set (train.csv) # - test set (test.csv) # # **The training set** should be used to build your machine learning models. For the training set, we provide the outcome (also known as the `ground truth`) for each passenger. Your model will be based on `features` like passengers’ gender and class. You can also use feature engineering to create new [features](https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/). # # **The test set** should be used to see how well your model performs on unseen data. For the test set, we do not provide the ground truth for each passenger. It is your job to predict these outcomes. For each passenger in the test set, use the model you trained to predict whether or not they survived the sinking of the Titanic. # # > ### Variable Notes # **pclass**: A proxy for socio-economic status (SES)<br> # 1st = Upper<br> # 2nd = Middle<br> # 3rd = Lower<br> # **age**: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5<br> # **sibsp**: The dataset defines family relations in this way... <br> # Sibling = brother, sister, stepbrother, stepsister<br> # Spouse = husband, wife (mistresses and fiancés were ignored)<br> # **parch**: The dataset defines family relations in this way...<br> # Parent = mother, father<br> # Child = daughter, son, stepdaughter, stepson<br> # Some children travelled only with a nanny, therefore parch=0 for them. # + _uuid="63a1575787ade35a529041baef9a01a7e52b44bd" # importning libraries import pandas as pd import numpy as np #data visualization library import seaborn as sns import matplotlib.pyplot as plt # models libraries from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from xgboost import XGBClassifier import warnings warnings.filterwarnings('ignore') # + _uuid="427997b4ed3ffebb5ae9d208f758ac74eb627a03" # reading csv file test_df = pd.read_csv('data/test.csv') train_df = pd.read_csv('data/train.csv') # + [markdown] _uuid="668d1961f7bad1ecd1541afcb046064e111d8202" # ---- # <a id='wranglingData'></a> # ## Wrangling Data # + _uuid="7764af22b50a78d95b806c4985878ac30ec81491" train_df.head() # + _uuid="3d9f454263ad79026178ec0fe19a9dd6b10ee5a9" train_df.tail() # + [markdown] _uuid="3c28ac60dace14661843c5026d69410ca686a344" # From above we can see that, Name, Sex, Ticket, Cabin, Embarked colunms have Object (String) values. # + _uuid="568c940f83d9d85f8f30f1bf3fabb357de463e18" train_df.info() # + [markdown] _uuid="e62c4b6e9e6d4669cde854ae2caf424e2333f61e" # We have total, 2 columns float type, 5 columns have integer values and 5 columns have object values. # + _uuid="059572a05fa3ea9063ca6b31a5871bfabb72aca1" train_df.describe() # + [markdown] _uuid="d5b75506c4f7b9503466e5954ea8a6809a964524" # From above we can see that Age column have 177 missing values, also gender do not have any numeric values. <br> Adding numerica values to the gender. # + _uuid="508e2bdacb12d06a8dd25a55a66fec7128590401" genders = {"male": 0, "female": 1} data = [train_df, test_df] for dataset in data: dataset['Sex'] = dataset['Sex'].map(genders) # + [markdown] _uuid="e6f3a4fb78c643a3f7b9f32a25971f7a4faba424" # We have added numerica values to the `Sex` column, now, adding null values to the `Age` column. # + _uuid="08354592532e630ba0fd30e63b0237d1429dbb4b" data = [train_df, test_df] for dataset in data: mean = train_df["Age"].mean() std = test_df["Age"].std() is_null = dataset["Age"].isnull().sum() # compute random numbers between the mean, std and is_null rand_age = np.random.randint(mean - std, mean + std, size = is_null) # fill NaN values in Age column with random values generated age_slice = dataset["Age"].copy() age_slice[np.isnan(age_slice)] = rand_age dataset["Age"] = age_slice dataset["Age"] = train_df["Age"].astype(int) train_df["Age"].isnull().sum() # + _uuid="13f0dc3235a1d5afc7d31bcdf727d2972d8972b7" train_df = train_df.drop(['Ticket'], axis=1) test_df = test_df.drop(['Ticket'], axis=1) # + _uuid="48a94629ec359847fabe0cbafb0ff0d2d9c7fe0b" data = [train_df, test_df] for dataset in data: dataset['Fare'] = dataset['Fare'].fillna(0) dataset['Fare'] = dataset['Fare'].astype(int) # + [markdown] _uuid="3fe470805d8ad1f3a3317f94e5c535abe9c533d5" # Same as `Sex` columns we can add numeric values to the `Embarked` columns, This column have 3 values, S, C and Q. # + _uuid="95c69804197293b76c3fbf8704124d33333cf1b0" common_value = 'S' ports = {"S": 0, "C": 1, "Q": 2} data = [train_df, test_df] for dataset in data: dataset['Embarked'] = dataset['Embarked'].fillna(common_value) dataset['Embarked'] = dataset['Embarked'].map(ports) # + _uuid="debb3b195d668ecbb76107e6ac53587bfa13b076" data = [train_df, test_df] for dataset in data: dataset['relatives'] = dataset['SibSp'] + dataset['Parch'] dataset.loc[dataset['relatives'] > 0, 'not_alone'] = 0 dataset.loc[dataset['relatives'] == 0, 'not_alone'] = 1 dataset['not_alone'] = dataset['not_alone'].astype(int) train_df['not_alone'].value_counts() # + _uuid="e7a4cc5b34ea76c31f303a2edc7dc26be6384a63" import re deck = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7, "U": 8} data = [train_df, test_df] for dataset in data: dataset['Cabin'] = dataset['Cabin'].fillna("U0") dataset['Deck'] = dataset['Cabin'].map(lambda x: re.compile("([a-zA-Z]+)").search(x).group()) dataset['Deck'] = dataset['Deck'].map(deck) dataset['Deck'] = dataset['Deck'].fillna(0) dataset['Deck'] = dataset['Deck'].astype(int) # we can now drop the cabin feature train_df = train_df.drop(['Cabin'], axis=1) test_df = test_df.drop(['Cabin'], axis=1) # + _uuid="4dcfdcca4e0c307c00878134f263aecb07592f97" data = [train_df, test_df] titles = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in data: # extract titles dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False) # replace titles with a more common title or as Rare dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr',\ 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') # convert titles into numbers dataset['Title'] = dataset['Title'].map(titles) # filling NaN with 0, to get safe dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name'], axis=1) test_df = test_df.drop(['Name'], axis=1) # + _uuid="915255395a364a794cce5e160f25b026ae5d4189" data = [train_df, test_df] for dataset in data: dataset['Age_Class']= dataset['Age']* dataset['Pclass'] # + _uuid="b8035f8c86ff52cf3ae705aed62f52b329bb6365" for dataset in data: dataset['Fare_Per_Person'] = dataset['Fare']/(dataset['relatives']+1) dataset['Fare_Per_Person'] = dataset['Fare_Per_Person'].astype(int) # Let's take a last look at the training set, before we start training the models. train_df.head() # + _uuid="cc0904edf2c6b9e09214141653d32f358e280250" train_df = train_df.drop(['PassengerId'], axis=1) # + _uuid="66488f15e9c2b94943f80b279c8fee7332b93357" X_train = train_df.drop("Survived", axis=1) Y_train = train_df["Survived"] X_test = test_df.drop("PassengerId", axis=1).copy() test_df.head() # + _uuid="04b7dbb1bdefd224881cd50c970e995360d5f28e" X_train.head() # + _uuid="3c91a315503ac7955c2f3b83493aa75b6962770f" Y_train.head() # + _uuid="b769bcf06d38d8a7a8d1391f57cf13eb2b21e5ce" X_test.head() # + [markdown] _uuid="cafe6ba7244930881810dc2f1c69ec65359a6504" # Upto now we have done wrangling with dateset, Both `X_train` and `X_test` contains similar rows. # + [markdown] _uuid="b1e00b77126d2d4e9f510e81c9c80085e5dfb569" # ---- # <a id='developingModel'></a> # ## Developing Model # + _uuid="9b6de8c8cb9b1b153b1e1750e9bd79b0bcce2e91" random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_prediction = random_forest.predict(X_test) random_forest.score(X_train, Y_train) # + _uuid="06e78b7b68db00f1d63006e6b5ff4e23b32abdef" logisticRegression = LogisticRegression() logisticRegression.fit(X_train, Y_train) Y_prediction = logisticRegression.predict(X_test) logisticRegression.score(X_train, Y_train) # + _uuid="0004f853a621600adda0b7915ae62a58ef2b7c03" xgBoost = XGBClassifier() xgBoost.fit(X_train, Y_train) Y_prediction = xgBoost.predict(X_test) xgBoost.score(X_train, Y_train) # + [markdown] _uuid="e8bb178aababfb2715a4c26ad6aa427e6ff67454" # ---- # <a id='validatingModel'></a> # ## Validating Model # + [markdown] _uuid="cfdb3df476b77a407e1af035b73a005bf9b133eb" # I have evaluated 3 models and it's score. You can see from above score that random forest classifier is the best model out of 3 for the dataset. # + _uuid="9134ec5dea3eb8e9310ddd1282a2ed9d1f1d7321" acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2) print(round(acc_random_forest,2,), "%") # + _uuid="e5082b52752ca8218a1682f1bd94e522fdf7edc0" random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_prediction = random_forest.predict(X_test) random_forest.score(X_train, Y_train) # + _uuid="0ec1d17b9107d9868f71a44f416bad44dabc7ae4" from sklearn.metrics import precision_recall_curve # getting the probabilities of our predictions y_scores = random_forest.predict_proba(X_train) y_scores = y_scores[:,1] precision, recall, threshold = precision_recall_curve(Y_train, y_scores) def plot_precision_and_recall(precision, recall, threshold): plt.plot(threshold, precision[:-1], "r-", label="precision", linewidth=5) plt.plot(threshold, recall[:-1], "b", label="recall", linewidth=5) plt.xlabel("threshold", fontsize=19) plt.legend(loc="upper right", fontsize=19) plt.ylim([0, 1]) plt.figure(figsize=(14, 7)) plot_precision_and_recall(precision, recall, threshold) plt.show() # + _uuid="df2539851f8f0bd2ed9f3351de92cdc51082d200" from sklearn.metrics import roc_curve # compute true positive rate and false positive rate false_positive_rate, true_positive_rate, thresholds = roc_curve(Y_train, y_scores) # plotting them against each other def plot_roc_curve(false_positive_rate, true_positive_rate, label=None): plt.plot(false_positive_rate, true_positive_rate, linewidth=2, label=label) plt.plot([0, 1], [0, 1], 'r', linewidth=4) plt.axis([0, 1, 0, 1]) plt.xlabel('False Positive Rate (FPR)', fontsize=16) plt.ylabel('True Positive Rate (TPR)', fontsize=16) plt.figure(figsize=(14, 7)) plot_roc_curve(false_positive_rate, true_positive_rate) plt.show() # + _uuid="5bb3bd1b7e72aad5b68fe7c3e3fdee2c6f8c7c9b" from sklearn.metrics import roc_auc_score r_a_score = roc_auc_score(Y_train, y_scores) print("ROC-AUC-Score:", r_a_score) # + [markdown] _uuid="d19feb1269669a248f2ddf9fd37a2e6e8738d83f" # ---- # <a href='conclusion'></a> # ## Conclusion # Here I have implemented three ML algorithms and found the best model *random forest regression*.<br> # # #### Inspired from the [End to End Project](https://www.kaggle.com/niklasdonges/end-to-end-project-with-python) with Python and it's Medium article [Predicting the survival of titanic passengers](https://towardsdatascience.com/predicting-the-survival-of-titanic-passengers-30870ccc7e8). # # What will be in next version # - Improvement accuracy of model # - Explanatory analysis # # To know more about me go to my website [https://krunal3kapadiya.app/](https://krunal3kapadiya.app/ ) <br> # If you like this kernel, don't forgot to **upvote** it.
Titanic-Analysis-Collide-with-destiny/Titanic Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 ('este') # language: python # name: python3 # --- # # Beautiful Soup Tutorial # Como científico de datos, tarde o temprano llegarás a un punto en el que tendrás que recopilar grandes cantidades de datos. Ya sea un proyecto o por pasatiempo y no siempre podremos contar con las API, pero tranquilo tenemos el web scraping... ¡Y una de las mejores herramientas de web scraping es Beautiful Soup! # ## ¿Pero.... qué es el web scraping? # # En pocas palabras, el web scraping es la recopilación automatizada de datos de sitios web (para ser más precisos, del contenido HTML de los sitios web). # # En este Jupyter, aprenderás los conceptos básicos sobre cómo extraer datos de HTML. # # Lo harás extrayendo datos de la página de libros más vendidos de Book Depository, y para lograr esto, también tendrá que hacer uso de un poco de pandas principalmente.. # ### Conoce a tus nuevos mejores amigos: # - Beautiful Soup # - Requests # !pip install beautifulsoup4 # Para obtener la experiencia completa de Beautiful Soup, también deberás instalar un parswer, dentro de ellos tenemos.. # # - html.parser # - lxml # - html5lib # # Vamos a utilizar el lxml ya que es el mas rápido # !pip install lxml # Se necesita una cosa más para que podamos comenzar a hacer web scraping, y es la biblioteca de ```requests```. Con ```requests``` podemos solicitar páginas web de sitios web. # !pip install requests # Ahora asi manos a la obra.. # ## Mi primer scraping # Como siempre lo primero es importar las librerías from bs4 import BeautifulSoup as bs import requests import pandas as pd from splinter import Browser import numpy as np # Ahora, estamos listos para solicitar nuestra primera página web. No es nada complicado: guardamos la URL que queremos raspar en la variable URL, luego solicitamos la URL (requests.get (url)) y guardamos la respuesta en la variable de respuesta: url = "https://www.bookdepository.com/bestsellers" response = requests.get(url) # Cómo saber si se guardo correctamente el sitio web? print(response) # Posibles respuestas: # # - [Respuestas informativas](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#information_responses) (100–199) # - [Respuestas exitosas](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#successful_responses) (200–299) # - [Mensajes de redirección](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#redirection_messages) (300–399) # - [Respuestas de error del cliente](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#client_error_responses) (400–499) # - [Respuestas de error del servidor](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#server_error_responses) (500–599) # Pero necesitamos el contenido HTML de la página web solicitada, así que como siguiente paso guardamos el contenido de la respuesta a html: html = response.content # Lo podemos imprimir para ver su estructura print(html) # Este es el resultado obtenido en HTML de la página de los libros más vendidos, pero es realmente difícil de leer... # Pero para eso usamos BeautifulSoup y lxml # Cómo lo hacemos?.. # Creamos un objeto BeautifulSoup llamado soup con la siguiente línea de código: soup = bs(html, "lxml") # bs? # # > from bs4 import BeautifulSoup as bs # # El primer parámetro del método bs() es html (que fue la variable en la que guardamos ese contenido HTML difícil de leer de la URL de los libros más vendidos) # # El segundo parámetro ('lxml'), es el parser que se usa en html # Ahora vamos a ver el cambio print(soup) # ## Cómo navegar por un objeto de Beautiful Soup # HTML consta de elementos como enlaces, párrafos, encabezados, bloques, etc. Estos elementos están envueltos entre etiquetas; dentro de la etiqueta de apertura y cierre se puede encontrar el contenido del elemento. # ![image](img\html-content-web-scraping.png) # Los elementos HTML también pueden tener atributos que contienen información adicional sobre el elemento. Los atributos se definen en las etiquetas de apertura con la siguiente sintaxis: nombre del atributo = "valor del atributo". # ![image](img\attribute-example-for-web-scraping-1536x386.png) # Ahora que hemos aprendido algo de HTML básico, finalmente podemos comenzar a extraer datos de soup. Simplemente escriba un nombre de etiqueta después de soup y un punto (como soup.title), y observe cómo se desarrolla la magia: soup.title soup.h1 # Y sí queremos solo el texto?.. soup.h1.get_text() # ¿Qué sucede si solo necesita el atributo de un elemento? Tampoco hay problema: soup.a['href'] # También podemos.. # > soup.a.get("href") # La sintaxis de soup.```cualquier_etiqueta``` devuelve solo el primer elemento con ese nombre de etiqueta. En lugar de soup.```cualquier_etiqueta```, también puedes usar el método .find() y obtendrás exactamente el mismo resultado: print("Sin utilizar .find()") print(soup.h1) print("Utilizando .find()") print(soup.find("h1")) # A menudo, no solo necesitas uno, sino todos los elementos (por ejemplo, cada enlace en una página). Para eso es bueno el método .find_all(): soup.find_all('a') # Si nos fijamos podemos ver que lo que nos devuelve es una lista.. # Qué podemos hacer con una lista?.. all_a = soup.find_all('a') for a in all_a[:5]: print(a) # Ok.. Pero como extraigo la data con BeautifilSoup?.. # La página contiene 30 libros con información relacionada con ellos. De los datos disponibles extraeremos los siguientes: # # - book titles # - formats (paperback or hardback) # - publication dates # - prices # Mientras trabajamos con BeautifulSoup, el flujo general de extracción de datos será un enfoque de dos pasos: # # * Inspeccionar en el navegador los elementos HTML que queremos extraer # * Luego encontrar los elementos HTML con BeautifulSoup. # ## Suficiente información... # Manos a la obra # ## Obtener los titulos de los libros (find_all + get_text) # Para ello vamos a inspeccionar en el navegador (click derecho sobre un titulo de un libro y elegimos inspeccionar) # title all_h3 = soup.find_all("h3", class_="") for h3 in all_h3: print(h3.get_text(strip=True)) # ## Tips importantes # soup.find_all(“h3”) encuentra cada elemento h3 en la página web; con class_=”title” especificamos que buscamos específicamente etiquetas h3 que contengan el atributo class_=”title” (nota importante: el “_” en class**__**=”title” no es un error tipográfico, se requiere en Beautiful Soup cuando seleccionando atributos de clase). # Guardamos los elementos h3 en all_h3, que se comporta como una lista, por lo que podemos recorrerlos con un bucle for. En cada iteración extraemos solo el texto del elemento h3 con .get_text(), y con el parámetro strip=True nos aseguramos de eliminar cualquier espacio en blanco innecesario. # ## Obtener los formatos de los libros # Del paso anterior tenemos todos los títulos de libros de la página de los más vendidos. Pero, ¿qué sabemos acerca de sus formatos? ¿Hay más libros de tapa dura o tapa blanda? # # Averigüémoslo inspeccionando el elemento de formato de libro: # Como siempre inspeccionamos y buscamos el formato.. # Y como queremos saber la cantidad de cada formato lo metemos en dataframe # div.item-info p.format formats = soup.select("") # div y p son etiquetas donde se encuentran formats_series = pd.Series(formats) formats_series.value_counts() # ## Obtener las fechas de publicación (find_all + get_text) # Al igual que antes inspeccionamos.... # published dates = soup.find_all("p", class_="") dates = [date.get_text()[-4:] for date in dates] #con esta list comprehension obtenemos solo el año dates_series = pd.Series(dates) dates_series.value_counts() # ## Obtener los precios (find_all + get_text) # Inspeccionamos.. # price prices = soup.find_all("p", class_="") print(prices) prices[0] prices[5].get_text(strip=True).split(' ') final_prices = [] for price in prices: original_price = price.find("span", class_="rrp") if original_price: current_price = str(original_price.previousSibling).strip() # nos quedamos solo con el numero sin etiquetas current_price = float(current_price.split("€")[0].replace(",", ".")) # quitamos el signo de euro y reemplazamos la coma por el punto para que python lo reconozca con float final_prices.append(current_price) else: current_price = float(price.get_text(strip=True).split("€")[0].replace(",", ".")) final_prices.append(current_price) print(final_prices) # ## Recolectar información de un libro # Primero creamos un soup en la pagína 'principal' libros = soup.find_all(class_='item-img') # Guardamos en una variable la url principal url_principal = 'https://www.bookdepository.com/' # Creamos una lista con los urls de los libros # + lista_URLs = [] for libro in libros: URL_libro = libro.find('a')['href'] lista_URLs.append(url_principal+URL_libro) lista_URLs # - # Vamos a analizar el url de un libro primero # + # Hacemos un nuevo request para el primer libro: r = requests.get(lista_URLs[11]) # Creamos una sopa específica con la info de cada libro soup_libro = bs(r.text, "lxml") # - # creamos un soup del primer libro soup_libro # Obtenemos el titulo del libro name = soup_libro.find('h1').text print(name) # El rating rating = soup_libro.find(class_ = 'rating-wrap hidden-md')('span')[5].text.split(' ')[-1].replace(',','.') rating = float(rating) rating # Cantidad de votaciones para el rating rating_count = soup_libro.find(class_ = 'rating-wrap hidden-md')('span')[-1].text.split(' ')[-6].replace('(','').replace('.','') rating_count = int(rating_count) rating_count # Tipo de formato formats = soup_libro.find(class_ = 'meta-info hidden-md')('li')[0].text formats # Autor author1 = soup_libro.find(class_ = 'item-info')('span')[-1].text.split('\n ')[-1] author1 author = soup_libro.find(class_ = 'item-annotation-wrap')('h2')[2].text[6:] author # Precio price = soup_libro.find(class_ = 'sale-price').text price # Url de la portada imagen = soup_libro.find(class_ = 'book-img')['src'] imagen # Ahora lo automatizamos para hacer un web scraping # + pages = np.arange(1, 2) count = 1 lista_libros = [] for page in pages: URL = 'https://www.bookdepository.com/es/bestsellers?page=' + str(page) r = requests.get(URL) soup = bs(r.text, 'lxml') libros_grid = soup.find_all(class_='item-img') count_libro = 1 # para el print de seguimiento de descarga for libro in libros_grid: # Print de seguimiento de descarga: print('Libro {} de {}, pag {}/{}'.format( count_libro, len(libros_grid), page, len(pages))) URL_libro = libro.find('a')['href'] r = requests.get('https://www.bookdepository.com/' + URL_libro) soup_libro = bs(r.text, 'lxml') id_libro = 'lb_' + str(count) name = soup_libro.find('h1').text try: price = soup_libro.find(class_ = 'sale-price').text except: price = None try : author = soup_libro.find(class_ = 'item-annotation-wrap')('h2')[2].text[6:] except: author = soup_libro.find(class_ = 'item-info')('span')[-1].text.split('\n ')[-1] formats = soup_libro.find(class_ = 'meta-info hidden-md')('li')[0].text try: rating = soup_libro.find(class_ = 'rating-wrap hidden-md')('span')[5].text.split(' ')[-1].replace(',','.') rating = float(rating) except: rating = None try: rating_count = soup_libro.find(class_ = 'rating-wrap hidden-md')('span')[-1].text.split(' ')[-6].replace('(','').replace('.','') rating_count = int(rating_count) except: rating_count = None imagen = soup_libro.find(class_ = 'book-img')['src'] data = {"id_libro": id_libro, "name": name, "price": price, "author": author, "format": formats, "rating": rating, "rating_count": rating_count, "imagen": imagen} lista_libros.append(data) # Pasamos al siguiente id count += 1 count_libro += 1 # - # --------------- df = pd.DataFrame(lista_libros) df
BeautifulSoup/BeautifulSoup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import libraries import ee import geemap # Create an interactive map by specifying the center (lat, lon) and zoom level (1-18). Map = geemap.Map(center=[40, -100], zoom=4) Map # + # Import the NLCD collection. dataset = ee.ImageCollection('USGS/NLCD_RELEASES/2019_REL/NLCD') # Filter the collection to the 2016 product. nlcd2016 = dataset.filter(ee.Filter.eq('system:index', '2019')).first() # Select the land cover band. landcover = nlcd2016.select('landcover') # Display land cover on the map. Map.addLayer(landcover, {}, 'NLCD 2019') # - # Add the NLCD legend to the map. Map.add_legend(title='NLCD Land Cover Classification', builtin_legend='NLCD') # + # # To add a custom legend to the map, uncomment the following code and modify the legend dictionary. # legend_dict = { # '11 Open Water': '466b9f', # '12 Perennial Ice/Snow': 'd1def8', # '21 Developed, Open Space': 'dec5c5', # '22 Developed, Low Intensity': 'd99282', # '23 Developed, Medium Intensity': 'eb0000', # '24 Developed High Intensity': 'ab0000', # '31 Barren Land (Rock/Sand/Clay)': 'b3ac9f', # '41 Deciduous Forest': '68ab5f', # '42 Evergreen Forest': '1c5f2c', # '43 Mixed Forest': 'b5c58f', # '51 Dwarf Scrub': 'af963c', # '52 Shrub/Scrub': 'ccb879', # '71 Grassland/Herbaceous': 'dfdfc2', # '72 Sedge/Herbaceous': 'd1d182', # '73 Lichens': 'a3cc51', # '74 Moss': '82ba9e', # '81 Pasture/Hay': 'dcd939', # '82 Cultivated Crops': 'ab6c28', # '90 Woody Wetlands': 'b8d9eb', # '95 Emergent Herbaceous Wetlands': '6c9fb8' # } # Map.add_legend(title="NLCD Land Cover Classification", legend_dict=legend_dict) # - # Print the list of system ids of all available NLCD images. dataset.aggregate_array("system:id").getInfo() # Select the seven NLCD epoches after 2000. years = ['2001', '2004', '2006', '2008', '2011', '2013', '2016', '2019'] # Get an NLCD image by year. def getNLCD(year): # Import the NLCD collection. dataset = ee.ImageCollection('USGS/NLCD_RELEASES/2019_REL/NLCD') # Filter the collection by year. nlcd = dataset.filter(ee.Filter.eq('system:index', year)).first() # Select the land cover band. landcover = nlcd.select('landcover') return landcover ## Create an NLCD image collection for the selected years. collection = ee.ImageCollection(ee.List(years).map(lambda year: getNLCD(year))) # Print the list of system ids of selected NLCD images. collection.aggregate_array('system:id').getInfo() # Create a list of labels to populate the dropdown list. labels = [f'NLCD {year}' for year in years] labels # Add a split-panel map for visualizing NLCD land cover change. Map.ts_inspector( left_ts=collection, right_ts=collection, left_names=labels, right_names=labels ) Map
examples/notebooks/nlcd_app.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def insertionSort(A): for j, value in enumerate(A): i = j - 1 while i > -1 and A[i] > value: A[i+1] = A[i] i = i - 1 A[i+1] = value A = list(range(100, -100, -1)) insertionSort(A) print(A)
python/insertionSort/insertion-sort-refreshed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + --- layout: project title: Project slug: /project items: - title: Third Prize (Team Award) image: src: /assets/img/interests/interests-art.png alt: interests-art description: 2019<br />Xi‘an Xingfu Woodland Landscape and Lighting Design International Competition, Comprehensive Reconstruction Management Committee of Xingfu Road District, Xi’an - title: Winning Prize (Team Award) image: src: /assets/img/interests/interests-design.png alt: interests-design description: 2014 to present<br />I enjoy ecological planning and planting design. And I would like to make my planning more rational.<br /> --- <br /> <br />
project_p.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ctarrington/try-colab/blob/master/binomial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="oRZwjpMWXMtO" colab_type="code" colab={} from math import factorial import numpy as np from scipy.stats import beta from matplotlib.pyplot import plot, fill_between from IPython import display # + [markdown] id="RNZJXzv3IY82" colab_type="text" # #Combinations # Given a set of n items how many distinct subsets of size k can you form? Order does not matter. # # EX: How many ways can you get 2 heads when tossing a coin 3 times? # # Spoken as "n choose k" # # Written as # $n \choose k$ = $ \frac{n!}{k! (n-k)!}$ # # So for the example, $ 3 \choose 2$ = $ \frac 6 2 $ = 3 # # {HHT, HTH, THH} # # Note: The "order does not matter" followed by an example where the order or position of the T seems to be the only difference seems contradictory. The most coherent explanation I have found is [combinations and permutations in coin tossing](https://math.stackexchange.com/questions/1243182/combinations-and-permutations-in-coin-tossing) # + id="I7M1ves3a1zM" colab_type="code" colab={} def comb(n,k): return factorial(n)/(factorial(k) * factorial(n-k)) # + [markdown] id="KDxOMpeiSwZF" colab_type="text" # # Binomial Distribution # Given n binary trials, what is the probability of k successes assuming that the probability of success in a single trial is p? # # If you got all of the successes first and then all of the failures the probability would be $ p^k (1-p)^{(n-k)}$ # # But the successes don't have to come first, there are many ways that we can get k successes in n trials. Specifically, there are $ n \choose k $ ways. # # So, if the probability of success in a single trial is p then the total probability of k successes in n trials is: # # Binomial = ${n \choose k} p^k (1-p)^{(n-k)}$ # + id="HfvVaNh4cFdk" colab_type="code" colab={} def binomial(n,p): def distribution(k): return comb(n, k) * p**k * (1-p)**(n-k) return distribution # + [markdown] id="U4ogupV21Cyk" colab_type="text" # ## Examples # + id="PlyRghzadNj5" colab_type="code" colab={} # n = 7, p = 1/5 p = binomial(7,1/5) # + id="SNnp5vFsdmre" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3a1d4b41-6025-4170-e6eb-ec41e6b51975" # Probability of at least 2 successes sum([p(k) for k in range(2,8)]) # + id="Q1bLpn2vdq_i" colab_type="code" colab={} # n = 25, p = 1/10 p2 = binomial(25,1/10) # + id="l62BSbwtfTCh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3d281e3b-7a3e-4963-b254-66155ca7bc21" # Probability of at least 2 successes sum([p2(k) for k in range(2,26)]) # + [markdown] id="yeyaaLi7v7oi" colab_type="text" # # Working Backwards from data # Given a sample outcome, what can we learn about the single trial probability of success? # # EX: If we had 60 heads in 100 trials what does that tell us about a single throw? # # We generate a new function that takes the single trial probability as an argument and answers with the probabilty of getting the given number of successes in the given number of trials. # # # + id="dbZ1D6M6wxgw" colab_type="code" colab={} def binomial_p(n,k): def distribution(single_trial_p): return comb(n, k) * single_trial_p**k * (1-single_trial_p)**(n-k) return distribution # + id="_XP_VucUw48A" colab_type="code" colab={} num_points = 2000 trial_ps = [p/num_points for p in range(0,num_points+1, 1)] # + id="dzs93JBaxFSl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="9c56b320-e43c-4dfb-a23d-f92b097c7cf2" binomial_100_60 = binomial_p(100, 60) probabilities = [binomial_100_60(p) for p in trial_ps] plot(trial_ps, probabilities, 'o') area_under_curve = sum(probabilities) print('Each dot indicates how likely the 60 successes in 100 trials is at the specified probability of a success in a single trial') # + [markdown] id="7qctb3bnzEI0" colab_type="text" # Note: The shape is useful, but this is not a pmf - it doesn't sum to 1. # # + id="WhSXOMaH5iFq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9b2073b6-b6f2-40ff-afe8-adf60374cb9d" print('approximate area under curve', area_under_curve) # + id="1Xi3wPJpxYh_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="dc9f927c-6781-4ce0-9df6-3813ed5de5ea" normalized_probabilities = [p/area_under_curve for p in probabilities] plot(trial_ps, normalized_probabilities, 'o') print('area under normalized curve', sum(normalized_probabilities)) # + [markdown] id="UytuKf1z1XbO" colab_type="text" # If our goal is the normalized distribution or pmf then we are doing extra work - each value is scaled up by the ${n \choose k} $ then scaled by the sum of all of the values. # # The shape of the curve is entirely determined by $p^k (1-p)^{(n-k)}$ # # I wouldn't believe me either, let's try it out... # + id="6ZZCfh37ywIx" colab_type="code" colab={} def simplified_binomial_p(n,k): def distribution(p): return p**k * (1-p)**(n-k) return distribution # + id="4_41ZjQ82ev7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="f80e216a-7734-41ae-bf83-424425245124" simplified_binomial_100_60 = simplified_binomial_p(100, 60) simplified_probabilities = [simplified_binomial_100_60(p) for p in trial_ps] area_under_simplified_curve = sum(simplified_probabilities) normalized_simplified_probablities = [p/area_under_simplified_curve for p in simplified_probabilities] print('approximate area under curve', area_under_simplified_curve) print('approximate area under normalized curve', sum(normalized_probabilities)) plot(trial_ps, normalized_simplified_probablities, 'o') # + id="N11TMK6W3P84" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d79ca008-592f-44a9-cca2-740d759b3098" print(normalized_probabilities[100], normalized_simplified_probablities[100]) # + [markdown] id="uQc6j2cITKHw" colab_type="text" # ##Sample usage of normalized binomial # # Data is 4 heads in 10 flips # What is the probability that the per trial probability of heads is more than 60%? # + id="sY_I37FV3myB" colab_type="code" colab={} trial_ps = [p/num_points for p in range(0,num_points+1, 1)] simplified_binomial_10_4 = simplified_binomial_p(10, 4) simplified_probabilities = [simplified_binomial_10_4(p) for p in trial_ps] area_under_simplified_curve = sum(simplified_probabilities) normalized_simplified_probablities = [p/area_under_simplified_curve for p in simplified_probabilities] # + id="HYhjY6-zUNvy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="27ec9a3f-b58b-4da7-fae9-7459fe58a3f7" sixty_percent_of_points = round(num_points*.6) probs_less_than_60 = normalized_simplified_probablities[0:sixty_percent_of_points] fill_between(trial_ps[0:sixty_percent_of_points], probs_less_than_60) print('sum P(trial_p < 60 = ', sum(probs_less_than_60)) probs_greater_than_60 = normalized_simplified_probablities[sixty_percent_of_points:] fill_between(trial_ps[sixty_percent_of_points:], probs_greater_than_60) print('sum P(trial_p > 60 = ', sum(probs_greater_than_60)) # + [markdown] id="VSj3wJYtnUgh" colab_type="text" # ## Beta Distribution # Our normalized discrete pmf from the Binomial distribution works. But of course there is a smoother and easier continous solution: The Beta distribution which gives you a nice PDF. # # Beta($ p, \alpha, \beta) = \frac {p^{\alpha-1} (1-p)^{\beta-1}} {beta(\alpha,\beta)}$ # # Where beta is $ \int_0^1 p^{\alpha-1} (1-p)^{\beta-1} dp$ # # Hopefully this looks familiar - our normalization of the Binomial was just numerical integration of the area under the Binomial curve and we too abandoned the $ n\choose k $ when we normalized. # # I am going to wave my hands over the $ \alpha-1 $ and $ \beta-1 $ and walk away. It totally works... Python wouldn't lie... # + id="f78mfRzbjW28" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="29672499-74f7-4e46-b05d-5394c93d6e7f" from_beta = 1 - beta.cdf([.6], 4, 6)[0] print('per beta distribution, P(trial_p > 60) = ', from_beta) print('difference = ', from_beta - sum(probs_greater_than_60)) # + id="iL__WlgajzFx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="39de6f3a-3d37-4aac-879b-aa8ddb79d240" rv = beta(4,6) left_points = trial_ps[0:round(0.6*num_points)] right_points = trial_ps[round(0.6*num_points):num_points] fill_between(left_points, rv.pdf(left_points)) fill_between(right_points, rv.pdf(right_points)) # + [markdown] id="64m8NFKelyDs" colab_type="text" # The scale is completely different for the Beta distribution. Sampling is strange. Trust the Beta, not our little discrete hack. # + [markdown] id="oF7JG27BvDUI" colab_type="text" # ## More samples # # Data is 9 heads and 11 tails. What is the probability that the coin is fair by the definition of 0.45 < p < 0.55? # + id="UGTcCDjFksko" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ab7908e1-1059-4e61-ac54-6842c74d34e9" probabilities = beta.cdf([0.45, 0.55], 9, 11) probabilities[1] - probabilities[0] # + [markdown] id="rotpICT7wLjO" colab_type="text" # Data is 109 heads and 111 tails. # # + id="cfgRRWJcvf9A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3a346ffb-e674-45e9-c07c-482563caed9b" probabilities = beta.cdf([0.45, 0.55], 109, 111) probabilities[1] - probabilities[0] # + id="_AUy68_7wY8T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e056e2e3-c57b-4eb5-a4da-22a29aec89d9" # could it happen with at least a 0.55 bias on the coin? probabilities = beta.cdf([0.55, 1], 109, 111) probabilities[1] - probabilities[0] # + id="ABWzUp-mxUQY" colab_type="code" colab={}
binomial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import builtwith import whois import requests from bs4 import BeautifulSoup import pandas as pd import datetime # ### Página web URL = 'https://tarifaluzhora.es/' # ### Evaluación inicial # Check robots.txt url_robots = URL + '/robots.txt' page = requests.get(url_robots) print(page.text) # Observamos en el archivo robots.txt que para cualquier rastreador se habilitan todos los elementos gráficos, código y estilos y lo que se deshabilita son la parte para registro de usuarios y configuraciones de la web. # Tecnologia del Sitio print(builtwith.builtwith(URL)) # Propietario del Sitio print(whois.whois(URL)) # ### Scraping por fecha # + def scrap_data(tipo_tarifa,d,m,y): url = 'https://tarifaluzhora.es/?tarifa=' + tipo_tarifa + '&fecha=' + d + '%2F' + m + '%2F' + y # Obtenemos página raw_html = requests.get(url).text # La parseamos data = BeautifulSoup(raw_html, 'html.parser') # Generamos dataframe mientras buscamos datos y los limpiamos df = pd.DataFrame() df['Fecha'] = [data.select("span")[6].string] df['Franja más cara'] = [data.select("span")[9].string] df['Franja más barata'] = [data.select("span")[7].string] tags_hora=data.find_all(itemprop="description") tags_precio=data.find_all(itemprop="price") a=zip(tags_hora, tags_precio) for i,j in a: df[i.string] = [j.string.replace(' €/kWh','')] return df # Tarifas: normal, discriminacion (discriminacion horaria), coche_electrico (vehiculos electricos) tarifa = 'normal' # Creamos dataset dataset = pd.DataFrame() # Obtenemos los datos de Enero for i in range (1,32): dataset =pd.concat([dataset, scrap_data(tarifa, str(i).zfill(2), '01', '2021')]) # Obtenemos los datos de Febrero for i in range (1,29): dataset =pd.concat([dataset, scrap_data(tarifa, str(i).zfill(2), '02', '2021')]) # Obtenemos los datos de Marzo for i in range (1,32): dataset =pd.concat([dataset, scrap_data(tarifa, str(i).zfill(2), '03', '2021')]) # + # dataset # - dataset.to_csv(r'tarifa_electrica_EneMar2021.csv',index=False)
src/webscraping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="ZDpiolvXjLyK" outputId="f3ee5730-8dd8-4dd4-b7eb-1f60ffc188cd" pip install Sastrawi # + colab={"base_uri": "https://localhost:8080/"} id="IkzR9zCXjQdf" outputId="87faab77-aea3-4fd9-bc00-b10a66a929ec" import nltk nltk.download('punkt') nltk.download('stopwords') # + id="yaSkYVi7UFAo" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import LinearRegression from keras.preprocessing.sequence import pad_sequences from collections import Counter import nltk import seaborn as sns import string from nltk.corpus import stopwords # import re # from autocorrect import spell import regex as re from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import Dense from keras.backend import eval from keras.optimizers import Adam from keras.layers import LSTM from keras.layers.embeddings import Embedding from keras.layers import Dropout from keras.layers.convolutional import Conv1D,MaxPooling1D # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os # + colab={"base_uri": "https://localhost:8080/"} id="uizAKDxdc_Hm" outputId="7ffafa9c-e4d6-4d25-a81d-5b7ee5946a38" from google.colab import drive drive.mount('/content/gdrive') # + colab={"base_uri": "https://localhost:8080/", "height": 197} id="94yOE7sGesom" outputId="580f6236-a550-41de-d863-5011bc759ead" import numpy as np import pandas as pd rating = pd.read_csv("/content/gdrive/My Drive/bangkit/kpp_serpong.csv") rating.head(5) # + colab={"base_uri": "https://localhost:8080/", "height": 197} id="jO3rj6Obmfzt" outputId="84826de2-4ced-48bd-8a17-26f80628dc88" sentiment = {1: 0, 2: 0, 3: 0, 4: 1, 5: 1} rating["sentiment"] = rating["rating"].map(sentiment) #map sentiment to reviews rating["sentiment"] = pd.to_numeric(rating["sentiment"], errors='coerce') #set invalid parsing as NaN rating = rating.dropna(subset=["sentiment"]) rating["sentiment"] = rating["sentiment"] .astype(int) rating.head() # + colab={"base_uri": "https://localhost:8080/", "height": 278} id="SL-4Y-8tnYw9" outputId="4038a857-b72a-44be-8ced-d53cd574c5bf" rating['sentiment'].value_counts().plot( kind='bar', label="sentiment(numeric)").legend() # + colab={"base_uri": "https://localhost:8080/", "height": 161} id="jiBq_Er6wSuH" outputId="598013bd-deed-409d-92db-1a038a68d1c0" rating.head() # + colab={"base_uri": "https://localhost:8080/"} id="tRcrLC-LyN3h" outputId="08344988-9ab8-4e15-b647-edda69dc3c96" rating.shape # + colab={"base_uri": "https://localhost:8080/", "height": 197} id="44H1Mb2Gxf9d" outputId="cb6bd027-25ac-4785-eae9-487ed46b44e7" permanent = rating[['ulasan' , 'rating' , 'sentiment']] mpermanent=permanent.dropna() mpermanent.head() # + id="oYb-LQFbv7fD" y = mpermanent['sentiment'] x = mpermanent['ulasan'].reset_index() # X =x[xindex(False)] # + colab={"base_uri": "https://localhost:8080/"} id="zEiH0TDKxRJ1" outputId="49b576d8-2cdb-41f8-c1bc-342b7d95a76e" len(y) # len(X) # + colab={"base_uri": "https://localhost:8080/"} id="B7-mVImLyotA" outputId="cd5ed9fe-66ad-47bd-ee36-df5016573391" X = x['ulasan'] print(X) # + [markdown] id="8cNB-GxYoI9m" # **PRA-PROCESSING** # + [markdown] id="OJZYP23VsLKZ" # Stopword Removal # + id="_SXoQsvX1stW" colab={"base_uri": "https://localhost:8080/"} outputId="4443051d-175a-4c69-a4d0-da2e8a4620bd" import nltk nltk.download('punkt') from nltk.corpus import stopwords nltk.download('stopwords') # + id="LQrTke8pxpy8" vocab_size = 122 embedding_dim = 64 max_length = 50 trunc_type = 'post' padding_type = 'post' oov_tok = '<OOV>' #OOV = Out of Vocabulary training_portion = 0.2 # + colab={"base_uri": "https://localhost:8080/", "height": 227} id="zqwNaengxwp_" outputId="9d6d6fca-4f71-4a8b-8c53-0d655bda65f4" articles = [] labels = [] with open("kpp_serpong.csv", 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',') next(reader) for row in reader: labels.append(row[0]) article = row[1] for word in stopwords.words('indonesian'): token = ' ' + word + ' ' article = article.replace(token, ' ') article = article.replace(' ', ' ') articles.append(article) print(len(labels)) print(len(articles))
ProgressML/notebook/Bangkit2021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Ruby 2.5.3 # language: ruby # name: ruby # --- require 'daru/view' Daru::View.plotting_library = :googlecharts data = [ ['Year', 'Sales', 'Expenses'], ['2013', 1000, 400], ['2014', 1170, 460], ['2015', 660, 1120], ['2016', 1030, 540] ] area_chart_table = Daru::View::Table.new(data, {}, chart_class: 'Chartwrapper') area_chart_table.show_in_iruby area_chart_options = { type: :area } area_chart_chart = Daru::View::Plot.new(area_chart_table.table, area_chart_options, chart_class: 'ChartWrapper') area_chart_chart.show_in_iruby # + line_basic_options = { title: 'Company Performance', curveType: 'function', legend: { position: 'bottom' } } line_basic_chart = Daru::View::Plot.new(data, line_basic_options, chart_class: 'Chartwrapper') line_basic_chart.show_in_iruby # - area_chart_options = { type: :area, view: {columns: [0, 1]} } area_chart_chart = Daru::View::Plot.new(area_chart_table.table, area_chart_options, chart_class: 'ChartWrapper') area_chart_chart.show_in_iruby area_chart_options = { type: :area, view: {columns: [1, 2]} } area_chart_chart = Daru::View::Plot.new(area_chart_table.table, area_chart_options, chart_class: 'ChartWrapper') area_chart_chart.show_in_iruby data_str = 'https://docs.google.com/spreadsheets/d/1aXns2ch8y_rl9ZLxSYZIU5ewUB1ZNAg5O6iPLZLApZI/gviz/tq?header=1&tq=' table = Daru::View::Table.new(data_str, {width: 500}, chart_class: 'Chartwrapper') table.show_in_iruby data_str = 'https://docs.google.com/spreadsheets/d/<KEY>5O6iPLZLApZI/gviz/tq?header=1&tq=' table = Daru::View::Plot.new(data_str, {width: 500}, chart_class: 'Chartwrapper') table.show_in_iruby data_str = 'https://docs.google.com/spreadsheets/d/1aXns2ch8y_rl9ZLxSYZIU5ewUB1ZNAg5O6iPLZLApZI/gviz/tq?header=1&tq=' table = Daru::View::Plot.new(data_str, {width: 500, view: {columns: [0, 1]}}, chart_class: 'Chartwrapper') table.show_in_iruby idx = Daru::Index.new ['City', '2010 Population',] data_rows = [ ['New York City, NY', 8175000], ['Los Angeles, CA', 3792000], ['Chicago, IL', 2695000], ['Houston, TX', 2099000], ['Philadelphia, PA', 1526000] ] df_city_pop = Daru::DataFrame.rows(data_rows) df_city_pop.vectors = idx df_city_pop bar_basic_table = Daru::View::Table.new(df_city_pop, {}, chart_class: 'Chartwrapper') bar_basic_table.show_in_iruby bar_basic_options = { title: 'Population of Largest U.S. Cities', type: :bar } bar_basic_chart = Daru::View::Plot.new(df_city_pop, bar_basic_options, chart_class: 'Chartwrapper') bar_basic_chart.show_in_iruby bar_basic_options = { title: 'Population of Largest U.S. Cities', type: :column } bar_basic_chart = Daru::View::Plot.new(bar_basic_table.table, bar_basic_options, chart_class: 'Chartwrapper') bar_basic_chart.show_in_iruby bar_basic_table = Daru::View::Table.new(df_city_pop, {view: {columns: [0]}}, chart_class: 'Chartwrapper') bar_basic_table.show_in_iruby query_string = 'SELECT A, H, O, Q, R, U LIMIT 5 OFFSET 8' data_spreadsheet = 'https://docs.google.com/spreadsheets/d/1XWJLkAwch5GXAt_7zOFDcg8Wm8Xv29_8PWuuW15qmAE/gviz/tq?gid=0&headers=1&tq=' data_spreadsheet << query_string table_spreadsheet = Daru::View::Table.new(data_spreadsheet, {width: 800}, chart_class: 'Chartwrapper') table_spreadsheet.show_in_iruby
spec/dummy_iruby/Google Charts - Chartwrapper.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!-- BEGIN QUESTION --> # # This markdown cell should also remain. **Look at the plot** of this function: # # $$\Large # f(x) = cos(2x) + e^x$$ # # <!-- END QUESTION --> # <!-- BEGIN QUESTION q1 --> # # Let's test a multi-cell question. # This is the second cell. # This is the third cell. # # <!-- END QUESTION --> # # <!-- BEGIN QUESTION q2 --> # # Let's ensure that this **code cell is included.** plt.plot(x,y**4) # <!-- END QUESTION -->
test/html-output.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## 9.4 设置主题样式 # # 使用Beamer制作幻灯片的一道特色就是有现成的模板的主题样式可供选择和直接使用,其中,主题样式对于幻灯片的演示效果而言十分重要,简言之,主题样式就是幻灯片的“外观”。Beamer中提供的每种主题样式都具有良好的可用性和可读性,这也使得Beamer制作出来的幻灯片看起来十分专业,同时,反复使用的难度也不大。 # # ### 9.4.1 基本介绍 # # 使用Beamer制作幻灯片时,我们可以选择很多已经封装好的幻灯片主题样式,不同样式可以达到不同的视觉效果。其实,使用这些主题样式的方法非常简单。通常来说,在前导代码中插入`\usetheme{}`命令即可,例如使用`Copenhagen`(哥本哈根主题样式)只需要在前导代码中申明`\usetheme{Copenhagen}`,这种方式调用主题样式是非常省事。 # # 在Beamer文档类型中,有几十种主题样式可供选择和使用,比较常用的主题样式包括以下这些: # # - `Berlin`:柏林主题样式,默认样式为蓝色调。 # - `Copenhagen`: 哥本哈根主题样式,默认样式为蓝色调。 # - `CambridgeUS`:美国剑桥主题样式,默认样式为红色调。 # - `Berkeley`:伯克利主题样式,默认样式为蓝色调。 # - `Singapore`:新加坡主题样式。 # - `Warsaw`:默认样式为蓝色调。 # # 【**例9-37**】在`beamer`文档类型中使用`CambridgeUS`主题样式制作一个简单的幻灯片。 # # ```tex # \documentclass{beamer} # \usetheme{CambridgeUS} # # \begin{document} # # \begin{frame}{Example} # # This is a simple example for the CambridgeUS theme. # # \end{frame} # # \end{document} # ``` # # 编译上述代码,得到幻灯片如图9.4.1所示。 # # <p align="center"> # <img align="middle" src="graphics/example_sec2_1.png" width="450" /> # </p> # # <center><b>图9.4.1</b> 编译后的幻灯片效果</center> # # 当然,在这些主题样式基础上,我们也能够调整主题样式的色调,使用`\usecolortheme{}`命令即可,这些色调包括`beetle`、`beaver`、`orchid`、`whale`、`dolphin`等,因此,基于上述几种主题样式,设置特定的颜色主题,即色调后,我们能够得到更多的组合样式,具体可参考[https://hartwork.org/beamer-theme-matrix/](https://hartwork.org/beamer-theme-matrix/)网站提供的组合样式矩阵。 # # 【**例9-38**】在`beamer`文档类型中使用`CambridgeUS`主题样式和`dolphin`色调制作一个简单的幻灯片。 # # ```tex # \documentclass{beamer} # \usetheme{CambridgeUS} # \usecolortheme{dolphin} # # \begin{document} # # \begin{frame}{Example} # # This is a simple example for the CambridgeUS theme with dolphin (color theme). # # \end{frame} # # \end{document} # ``` # # 编译上述代码,得到幻灯片如图9.4.2所示。 # # <p align="center"> # <img align="middle" src="graphics/example_sec2_2.png" width="450" /> # </p> # # <center><b>图9.4.2</b> 编译后的幻灯片效果</center> # # ### 9.5.2 字体设置 # # 实际上,对于幻灯片的文本字体,我们可以调用字体样式对其进行调整,具体来说,在前导代码中要用到的命令为`\usefonttheme{A}`,位置A填写的一般是字体类型,例如`serif`。 # # 【**例9-39**】使用`beamer`文档类型创建一个简单的幻灯片,并在前导代码中申明使用`serif`对应的字体样式。 # # ```tex # \documentclass{beamer} # \usefonttheme{serif} # # \begin{document} # # \begin{frame} # # This is a simple example for using \alert{serif} font theme. # # \end{frame} # # \end{document} # ``` # # 编译上述代码,得到幻灯片如图9.5.3所示。 # # <p align="center"> # <img align="middle" src="graphics/example_sec2_3.png" width="450" /> # </p> # # <center><b>图9.5.3</b> 编译后的幻灯片效果</center> # # 我们知道:在常规文档中,可以使用各种字体对应的宏包达到调用字体的作用,使用规则为`\usepackage{A}`,位置A填写的一般是字体类型,包括serif、avant、bookman、chancery、charter、euler、helvet、mathtime、mathptm、mathptmx、newcent、palatino、pifont、utopia等。 # # 【**例9-40**】使用`beamer`文档类型创建一个简单的幻灯片,并在前导代码中申明使用字体`palatino`对应的宏包。 # # ```tex # \documentclass{beamer} # \usepackage{palatino} # # \begin{document} # # \begin{frame} # # This is a simple example for using \alert{palatino} font. # # \end{frame} # # \end{document} # ``` # # 编译上述代码,得到幻灯片如图9.5.4所示。 # # <p align="center"> # <img align="middle" src="graphics/example_sec2_4.png" width="450" /> # </p> # # <center><b>图9.5.4</b> 编译后的幻灯片效果</center> # # ### 9.5.3 表格字体大小 # # 在Beamer中制作表格,当我们想对表头或者表格内容文字大小进行调整时,可以使用在前导代码中申明使用`caption`宏包,即`\usepackage{caption}`,然后设置具体的字体大小即可,如`\captionsetup{font = scriptsize, labelfont = scriptsize}`可以将表头和表格内容字体大小调整为`scriptsize`。 # # 【**例9-41**】使用`\begin{table} \end{table}`环境创建一个简单表格,并使用`caption`宏包将表头字体大小设置为`Large`、将表格内容字体大小设置为`large`。 # # ```tex # \documentclass{beamer} # \usepackage{booktabs} # \usepackage{caption} # \captionsetup{font = large, labelfont = Large} # # \begin{document} # # \begin{frame} # # \begin{table} # \caption{A simple table.} # \begin{tabular}{l|ccc} # \toprule # & \textbf{header3} & \textbf{header4} & \textbf{header5} \\ # \midrule # \textbf{header1} & cell1 & cell2 & cell3 \\ # \midrule # \textbf{header2} & cell4 & cell5 & cell6 \\ # \bottomrule # \end{tabular} # \end{table} # # \end{frame} # # \end{document} # ``` # # 编译上述代码,得到幻灯片如图9.5.5所示。 # # <p align="center"> # <img align="middle" src="graphics/example_sec2_4_0.png" width="450" /> # </p> # # <center><b>图9.5.5</b> 编译后的幻灯片效果</center> # # 其中,单就设置表头字体大小而言,除了使用`caption`宏包之外,还可以通过对幻灯片设置全局参数达到调整字体大小的效果,例如`\setbeamerfont{caption}{size = \Large}`。 # # ### 9.5.4 样式调整 # # 在Beamer文档类型中,除了可以使用各种主题样式,另外也可以根据幻灯片组成部分,分别对侧边栏、导航栏以及Logo等进行调整。其中,侧边栏是由所选幻灯片主题样式自动生成的,主要用于显示幻灯片目录。有时为了显示幻灯片的层次,使用侧边栏进行目录索引。 # # 【**例9-42**】使用`Berkeley`主题样式,并将侧边栏显示在右侧。 # # ```tex # \documentclass{beamer} # \PassOptionsToPackage{right}{beamerouterthemesidebar} # \usetheme{Berkeley} # \usefonttheme{professionalfonts} # # \begin{document} # # \begin{frame} # \frametitle{Parent function} # \framesubtitle{A short list} # # Please check out the following parent function list. # \begin{enumerate} # \item $y=x$ # \item $y=|x|$ # \item $y=x^{2}$ # \item $y=x^{3}$ # \item $y=x^{b}$ # \end{enumerate} # # \end{frame} # # \end{document} # ``` # # 编译上述代码,得到幻灯片如图9.5.6所示。 # # <p align="center"> # <img align="middle" src="graphics/example_sec2_6.png" width="450" /> # </p> # # <center><b>图9.5.6</b> 编译后的幻灯片效果</center> # # 很多时候我们会发现,在各类学术汇报中,幻灯片的首页通常会有主讲人所在的研究机构Logo。在Beamer文档类型中,有`\logo`和`\titlegraphic`两个命令可供使用,使用`\logo`命令添加的Logo会在每一页幻灯片中都显示,而使用`\titlegraphic`命令添加的Logo只出现在标题页。 # # 【**例9-43**】使用`\logo`命令在幻灯片中添加Logo。 # # ```tex # \documentclass{beamer} # \usefonttheme{professionalfonts} # # \title{A Simple Beamer Example} # \author{<NAME>} # \institute{Author's Institute} # # \logo{\includegraphics[width=2cm]{logopolito}} # # \begin{document} # # \begin{frame} # \titlepage # \end{frame} # # \begin{frame}{Parent function}{A short list} # Please check out the following parent function list. # \begin{enumerate} # \item $y=x$ # \item $y=|x|$ # \item $y=x^{2}$ # \item $y=x^{3}$ # \item $y=x^{b}$ # \end{enumerate} # \end{frame} # # \end{document} # ``` # # 编译上述代码,得到幻灯片如图9.5.7所示。 # # <p align="center"> # <table> # <tr> # <td><img align="middle" src="graphics/example_sec2_7_0.png" width="450"></td> # <td><img align="middle" src="graphics/example_sec2_7_1.png" width="450"></td> # </tr> # </table> # </p> # # <center><b>图9.5.7</b> 编译后的幻灯片效果</center> # # 【**例9-44**】使用`\titlegraphic`命令在幻灯片的标题页添加Logo。 # # ```tex # \documentclass{beamer} # \usefonttheme{professionalfonts} # # \title{A Simple Beamer Example} # \author{Author's Name} # \institute{Author's Institute} # # \titlegraphic{\includegraphics[width=2cm]{logopolito}\hspace*{4.75cm}~ # \includegraphics[width=2cm]{logopolito} # } # # \begin{document} # # \begin{frame} # \titlepage # \end{frame} # # \begin{frame}{Parent function}{A short list} # Please check out the following parent function list. # \begin{enumerate} # \item $y=x$ # \item $y=|x|$ # \item $y=x^{2}$ # \item $y=x^{3}$ # \item $y=x^{b}$ # \end{enumerate} # \end{frame} # # \end{document} # ``` # # 编译上述代码,得到幻灯片如图9.5.8所示。 # # <p align="center"> # <table> # <tr> # <td><img align="middle" src="graphics/example_sec2_8_0.png" width="450"></td> # <td><img align="middle" src="graphics/example_sec2_8_1.png" width="450"></td> # </tr> # </table> # </p> # # <center><b>图9.5.8</b> 编译后的幻灯片效果</center> # # ### 参考资料 # # - <NAME>, <NAME>. [Fun with Beamer: An Epic Quest To Create the Perfect Presentation](http://web.mit.edu/rsi/www/pdfs/beamer-tutorial.pdf), June 28, 2017. # - [Beamer: change size of figure caption](https://tex.stackexchange.com/questions/52132). # - [logo in the first page only](https://tex.stackexchange.com/questions/61051). # 【回放】[**9.3 块与盒子——添加框元素**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-9/section3.ipynb) # # 【继续】[**9.5 插入程序源代码**](https://nbviewer.jupyter.org/github/xinychen/latex-cookbook/blob/main/chapter-9/section5.ipynb) # ### License # # <div class="alert alert-block alert-danger"> # <b>This work is released under the MIT license.</b> # </div>
chapter-9/section4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Some badges # # In github's notebook view, these links do not work # # [![build status](http://img.shields.io/travis/altair-viz/altair/master.svg?style=flat)](https://travis-ci.org/altair-viz/altair) # [![Binder](https://beta.mybinder.org/badge.svg)](https://beta.mybinder.org/v2/gh/altair-viz/altair_notebooks/master) # # - Using ``target=_blank``: <a href="https://travis-ci.org/altair-viz/altair" target="_blank"><img src="http://img.shields.io/travis/altair-viz/altair/master.svg?style=flat"></a> # - Using ``target=_parent``: <a href="https://travis-ci.org/altair-viz/altair" target="_parent"><img src="http://img.shields.io/travis/altair-viz/altair/master.svg?style=flat"></a>
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Scoring customer propensity to purchase # Training a model based on a shoppers actions on a website to identify the best prospects who did not purchase yesterday. # # **Notes** : This data is sampled and all UserIDs are dummies. # # <br><br> # ### Introduction # We have many visitors to our website every day, some purchase but many do not. We spend money re-targeting past visitors, we'd like to optomise this activity by targeting the visitors who are more likely to convert. To do this, we've taken data showing which parts of our website users interacted with, our questions are:- # <br><br> # 1.Which of these interactiuons effect a users likelyhood to purchase?<br> # 2.Can we score visitors from yesterday who did not purchase, to see who the most valauve prospects are? # import pandas as pd import numpy as np import os import matplotlib.pylab as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import classification_report import sklearn.metrics train = pd.read_csv('Customer_propensity_to_purchase/training_sample.csv') train.head(5) train.dtypes train.describe() train.info() # Here we can see a clear snspshot of the data, we have 1's or 0's in the columns, indicating wheather or not a user interacted with these areas of the website. The last colum shows whether the user ordered or not, this will be important! # ## Is there any correlation # In order to answer our first question, we can start by exploring any correlation between there individual website actions and an order, since we have all thes fields in our data. # # We've got quite a few fields, so let's start with a heatmap to view correlations... corr = train.corr() corr train.corr()['ordered'] corr = train.corr() plt.figure(figsize=(16, 14)) sns.heatmap(corr, vmax=0.5, center=0, square=True, linewidths=2, cmap='Blues') plt.show() # Interesting - there seems to be a strong correlation between visitors who ordered and visitors who saw the checkout, this makes sense! There are also strong correlations for people who cheked out delivery times and added items to their shopping cart - let's get a closer look at the correlations for orders... # Alright! Looks like our initial insights from the heatmap were correct, users who checked out the delivery options on a product detail page have an almost 80% correlation to orders, there's definately something in here we can use! But wait...it looks like there isn't much correlation between users on a mobile and orders, so we should proabley remove this field from our predictor. # ## Let's get predicting! # First we build our predictor and targets variables, we're going to drop 'ordered' form our predictors, as it is our target variable. We'll also remove 'UserID', as it has no impact on likelyhood to order and 'device_mobile' as we've seen it has a negative correlation to orders. # Droping columns with low correlation predictors = train.drop(['ordered','UserID','device_mobile'], axis=1) targets = train.ordered predictors.columns # Let's take a look at our predictor columns to check we've included everything we wanted, and not left in something we shouldn't have... predictors.corr() X_train, X_test, y_train, y_test = train_test_split(predictors, targets, test_size=.3) print( "Predictor - Training : ", X_train.shape, "Predictor - Testing : ", X_test.shape ) # For our model we are going to use a naise bayes classififer, below we instantiate it, fit it, then predict using it, then we an analyse the accuracy of our predictions... # # # + from sklearn.naive_bayes import GaussianNB classifier=GaussianNB() classifier=classifier.fit(X_train,y_train) predictions=classifier.predict(X_test) #Analyze accuracy of predictions sklearn.metrics.confusion_matrix(y_test,predictions) # - # And apply an accuracy score to our model... sklearn.metrics.accuracy_score(y_test, predictions) yesterday_prospects = pd.read_csv('Customer_propensity_to_purchase/training_sample.csv') yesterday_prospects.head(5) #Now let's explore this DataFrame and check everything is as expected... yesterday_prospects.info() # We're going to drop UserID before we predict on this data, so that is matches our training set, but before we do let's pop it into another variable, so we can pull back this identifier later. Once that's done we can drop our unwanted fields and print the head() to check our data...looking good? userids = yesterday_prospects.UserID yesterday_prospects = yesterday_prospects.drop(['ordered','UserID','device_mobile'], axis=1) yesterday_prospects.head(5) yesterday_prospects.shape # Now we'll run our predictions and insert them into a field called 'propensity', print the head, and check it's all come togeather... yesterday_prospects['propensity'] = classifier.predict_proba(yesterday_prospects)[:,1] yesterday_prospects.head() pd.DataFrame(userids) results = pd.concat([userids, yesterday_prospects], axis=1) results.head(10)
Customer_propensity_to_purchase.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys sys.path.append('../python_packages_static/') import os, glob, shutil import numpy as np import pandas as pd import matplotlib.pyplot as plt import flopy import flopy.utils as fu import platform # # 1. MODPATH file setup # ### 1.0 Set paths and pointers to MODFLOW6 executable and namefile home = os.getcwd() upone = os.path.dirname(home) model_ws = os.path.join(upone, 'neversink_mf6') modflow_executable = os.path.join(upone, 'neversink_mf6', 'mf6') # modpath executable in neversink_mf6 directory simname = 'mfsim.nam' # ### 1.1 Make MODFLOW Object # Make a flopy modlfow object using the `.nam` file sim = flopy.mf6.MFSimulation.load(simname, 'mf6', sim_ws=model_ws, exe_name=modflow_executable) # ### 1.2 Run the modflow model # can skip if this if model has already been run in `neversink_mf6` # + #sim.run_simulation() # - # ### 1.3 Load model simulation m = sim.get_model() nlay = m.dis.nlay.data nrow = m.dis.nrow.data ncol = m.dis.ncol.data print(nlay, nrow, ncol) # ### 1.4 Get priority wells and determine MODPATH nodes wells_df = pd.read_csv('../processed_data/2009-2016_ave_pumping.csv') wells_df = wells_df.loc[wells_df.WellType == 'Priority'] priority_wells = wells_df.ID_Well.tolist() priority_wells # + # make well id dict to help keep id's particle_ids = [] idx = 0 for idx in range(0, len(priority_wells)): particle_ids.append(idx) idx += 1 modpath_id_dict = dict(zip(particle_ids, priority_wells)) modpath_id_dict # - # **1.4.1 All Neversink model wells from `stress_period_data`** # + # if we want MODPATH particles in all wells -- we probably don't particle_locations_allwells = m.wel.stress_period_data.get_data()[0]['cellid'] particle_locations_allwells # - # **1.4.2 Priority wells from `well_000.dat` with zero-based adjustment** # + well_dat_df = pd.read_csv('../neversink_mf6/wel_000.dat', delim_whitespace=True) well_dat_df = well_dat_df.loc[well_dat_df.boundname.isin(priority_wells)] well_dat_df['k_zb'] = well_dat_df['#k'] - 1 well_dat_df['i_zb'] = well_dat_df['i'] - 1 well_dat_df['j_zb'] = well_dat_df['j'] - 1 particle_locations = [(x, y, z) for x, y, z in zip(well_dat_df['k_zb'], well_dat_df['i_zb'], well_dat_df['j_zb'])] particle_locations # - # **1.4.3 convert well locations to nodes for modpath** def get_nodes(locs): nodes = [] for k, i, j in locs: nodes.append(k * nrow * ncol + i * ncol + j) return nodes dest_nodes = get_nodes(particle_locations) dest_nodes # these are important for particle tracking # ### 1.4.4 Update nodes in particle tracking script # + get_endpoints_script = '../scripts/get_endpoints.py' update_line = f' dest_nodes = {dest_nodes}\n' infile = open(get_endpoints_script, 'r').readlines() with open(get_endpoints_script, 'w') as ofp: [ofp.write(update_line) if line.startswith(' dest_nodes =') else ofp.write(line) for line in infile] # - # ### 1.5 set starting locations for forward particles # we can use particle locations to set up forward tracking subset of grid. well_i,well_j = well_dat_df['i_zb'],well_dat_df['j_zb'] idm = m.dis.idomain.data[-1] # **Add setup particle zones for NE, W and S priority well "clusters" -- for running on the array** # + partlox_NE = idm.copy().astype(float) partlox_NE[0:150,:]=0 partlox_NE[375:,:]=0 partlox_NE[:,0:375]=0 print('partlox_NE particles in {} % of model cells'.format(round(sum(sum(partlox_NE))/(nlay*nrow*ncol) * 100, 2))) [plt.plot(j,i,'x') for i,j in zip(well_i,well_j)] partlox_NE[partlox_NE>1]=np.nan plt.imshow(partlox_NE, cmap='bone') plt.colorbar() plt.show() # + partlox_W = idm.copy().astype(float) partlox_W[0:-500,:]=0 partlox_W[440:,:]=0 partlox_W[:,350:]=0 print('partlox_W particles in {} % of model cells'.format(round(sum(sum(partlox_W))/(nlay*nrow*ncol) * 100, 2))) [plt.plot(j,i,'x') for i,j in zip(well_i,well_j)] partlox_W[partlox_W>1]=np.nan plt.imshow(partlox_W, cmap='bone') plt.colorbar() plt.show() # + partlox_S = idm.copy().astype(float) partlox_S[0:450,:]=0 partlox_S[625:,:]=0 partlox_S[:,0:325]=0 print('partlox_S particles in {} % of model cells'.format(round(sum(sum(partlox_S))/(nlay*nrow*ncol) * 100, 2))) [plt.plot(j,i,'x') for i,j in zip(well_i,well_j)] partlox_S[partlox_S>1]=np.nan plt.imshow(partlox_S, cmap='bone') plt.colorbar() plt.show() # - # function to get modpath starting particle nodes from 1/0 arrays (1 for particle, 0 for no paricle) def get_starting_nodes(partlox): cells = np.where(partlox>0) pi,pj = cells irch = m.rch.irch.data pk = irch[pi,pj] forward_cells = list(zip(pk,pi,pj)) forward_nodes = get_nodes(forward_cells) return forward_nodes forward_nodes_partlox_NE = get_starting_nodes(partlox_NE) forward_nodes_partlox_W = get_starting_nodes(partlox_W) forward_nodes_partlox_S = get_starting_nodes(partlox_S) all_forward_nodes = [forward_nodes_partlox_NE,forward_nodes_partlox_W,forward_nodes_partlox_S] # ### 1.6 Create NE, W and S zone modpath files # flag if setting up files for run on linux setup_for_linux = True # + # create NE, W and S zone modpath files mpnamf_zones = ['neversink_mp_forward_weak_NE', 'neversink_mp_forward_weak_W', 'neversink_mp_forward_weak_S'] if setup_for_linux is not True: if sys.platform == 'win32': modpath_executable = 'mp7.exe' # path to windows MODPATH7 executable, assume same directory as modpath files else: modpath_executable='mp7' # path to mac/linux MODPATH7 executable, assume same directory as modpath files if setup_for_linux is True: modpath_executable='mp7' for zone, forward_nodes in zip(mpnamf_zones, all_forward_nodes): mp = flopy.modpath.Modpath7.create_mp7( modelname=zone, trackdir='forward', flowmodel=m, model_ws=model_ws, rowcelldivisions=1, columncelldivisions=1, layercelldivisions=1, nodes=forward_nodes, exe_name=modpath_executable ) # write modpath datasets mp.write_input() print('Wrote {} MODPATH input files'.format(zone)) # - # fix mpbas to set iface for SFR (we don't care about EVT) -- same as above for cf in glob.glob('../neversink_mf6/*.mpbas'): cbas = [i.strip() for i in open(cf, 'r').readlines()] with open(cf, 'w') as ofp: [ofp.write('{}\n'.format(i.replace('EVT','SFR'))) for i in cbas] # modify to make models weak sink and set Drape = 1 option for placing particles for zone in mpnamf_zones: with open('../neversink_mf6/{}.mpsim'.format(zone), 'r') as ofp: inmpsim = ofp.readlines() inmpsim[3] = '4 1 2 1 0 0\n' tmp = inmpsim[22].strip().split() inmpsim[22] = ' '.join(tmp[:2]) + ' 1\n' with open('../neversink_mf6/{}.mpsim'.format(zone), 'w') as ofp: ofp.writelines(inmpsim) print('{}.mpsim updated to weak sink setting'.format(zone))
notebooks_workflow_complete/4.0_setup_modpath-forward.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab_type="code" # !pip install -r https://raw.githubusercontent.com/datamllab/automl-in-action-notebooks/master/requirements.txt # + [markdown] colab_type="text" # ## Load MNIST dataset # # + colab_type="code" from tensorflow.keras.datasets import cifar10, fashion_mnist, mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() print("Training image shape:", x_train.shape) # (60000, 28, 28) print("Training label shape:", y_train.shape) # (60000,) print("First five training labels:", y_train[:5]) # array([5 0 4 1 9], dtype=uint8) # + [markdown] colab_type="text" # ## ResNetBlock # # + colab_type="code" import timeit import autokeras as ak input_node = ak.ImageInput() output_node = ak.Normalization()(input_node) output_node = ak.ResNetBlock()(output_node) output_node = ak.ClassificationHead()(output_node) resnet_auto_model = ak.AutoModel( inputs=input_node, outputs=output_node, max_trials=3, overwrite=True, seed=42 ) start_time = timeit.default_timer() # Use the first 100 training samples for 1 epoch with batch_size=8 as a quick demo. # You may run with the full dataset with 10 epochs and a larger batch size, but expect a longer training time. resnet_auto_model.fit(x_train[:100], y_train[:100], epochs=1, batch_size=8) stop_time = timeit.default_timer() print("Total time: {time} seconds.".format(time=round(stop_time - start_time, 2))) # + [markdown] colab_type="text" # ### Get the summarized results during the tuning process # # + colab_type="code" resnet_auto_model.tuner.results_summary() # + [markdown] colab_type="text" # ### Display best model # # + colab_type="code" best_resnet_model = resnet_auto_model.export_model() best_resnet_model.summary() # + [markdown] colab_type="text" # ### Evaluate the best resnet model on the test data. # # + colab_type="code" # Only evaluating the first 100 samples as a quick demo test_loss, test_acc = resnet_auto_model.evaluate( x_test[:100], y_test[:100], batch_size=8 ) print("Accuracy: {accuracy}%".format(accuracy=round(test_acc * 100, 2))) # + [markdown] colab_type="text" # ## XceptionBlock # # + colab_type="code" import autokeras as ak input_node = ak.ImageInput() output_node = ak.Normalization()(input_node) output_node = ak.XceptionBlock()(output_node) output_node = ak.ClassificationHead()(output_node) xception_auto_model = ak.AutoModel( inputs=input_node, outputs=output_node, max_trials=3, overwrite=True, seed=42 ) start_time = timeit.default_timer() # Use the first 100 training samples for 1 epoch with batch_size=8 as a quick demo. # You may run with the full dataset with 10 epochs and a larger batch size, but expect a longer training time. xception_auto_model.fit(x_train[:100], y_train[:100], epochs=1, batch_size=8) stop_time = timeit.default_timer() print("Total time: {time} seconds.".format(time=round(stop_time - start_time, 2))) # + [markdown] colab_type="text" # ### Display the best xception model # # + colab_type="code" import tensorflow as tf best_xception_model = xception_auto_model.export_model() tf.keras.utils.plot_model( best_xception_model, show_shapes=True, expand_nested=True ) # rankdir='LR' # + colab_type="code" best_xception_model.summary() # + [markdown] colab_type="text" # ### Evaluate the best xception model on the test data. # # + colab_type="code" # Only evaluating the first 100 samples as a quick demo test_loss, test_acc = resnet_auto_model.evaluate(x_test[:100], y_test[:100]) print("Accuracy: {accuracy}%".format(accuracy=round(test_acc * 100, 2))) # + [markdown] colab_type="text" # ## HyperBlock for image classification (ImageBlock) # # + colab_type="code" import timeit import autokeras as ak input_node = ak.ImageInput() output_node = ak.ImageBlock( # Normalize the dataset. normalize=True, # Do not do data augmentation. augment=False, )(input_node) output_node = ak.ClassificationHead(dropout=0.0)(output_node) auto_model = ak.AutoModel( inputs=input_node, outputs=output_node, max_trials=3, overwrite=True, seed=42 ) start_time = timeit.default_timer() # Use the first 100 training samples for 1 epoch and batch_size=8 as a quick demo. # You may run with the full dataset with 10 epochs with a larger batch size, but expect a longer training time. auto_model.fit(x_train[:100], y_train[:100], epochs=1, batch_size=8) stop_time = timeit.default_timer() print("Total time: {time} seconds.".format(time=round(stop_time - start_time, 2))) # + colab_type="code" auto_model.tuner.results_summary(num_trials=1) # + colab_type="code" best_model = auto_model.export_model() best_model.summary() # + colab_type="code" # Only evaluating the first 100 samples as a quick demo test_loss, test_acc = auto_model.evaluate(x_test[:100], y_test[:100], batch_size=8) print("Accuracy: {accuracy}%".format(accuracy=round(test_acc * 100, 2)))
5.3.1-Automated-Pipeline-Search-Image-Model-Selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp tabular.core # - #export from fastai2.torch_basics import * from fastai2.data.all import * from nbdev.showdoc import * #export pd.set_option('mode.chained_assignment','raise') # # Tabular core # # > Basic function to preprocess tabular data before assembling it in a `DataBunch`. # ## Initial preprocessing #export def make_date(df, date_field): "Make sure `df[date_field]` is of the right date type." field_dtype = df[date_field].dtype if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype): field_dtype = np.datetime64 if not np.issubdtype(field_dtype, np.datetime64): df[date_field] = pd.to_datetime(df[date_field], infer_datetime_format=True) df = pd.DataFrame({'date': ['2019-12-04', '2019-11-29', '2019-11-15', '2019-10-24']}) make_date(df, 'date') test_eq(df['date'].dtype, np.dtype('datetime64[ns]')) #export def add_datepart(df, field_name, prefix=None, drop=True, time=False): "Helper function that adds columns relevant to a date in the column `field_name` of `df`." make_date(df, field_name) field = df[field_name] prefix = ifnone(prefix, re.sub('[Dd]ate$', '', field_name)) attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'] if time: attr = attr + ['Hour', 'Minute', 'Second'] for n in attr: df[prefix + n] = getattr(field.dt, n.lower()) df[prefix + 'Elapsed'] = field.astype(np.int64) // 10 ** 9 if drop: df.drop(field_name, axis=1, inplace=True) return df df = pd.DataFrame({'date': ['2019-12-04', '2019-11-29', '2019-11-15', '2019-10-24']}) df = add_datepart(df, 'date') test_eq(df.columns, ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start', 'Elapsed']) df.head() #export def _get_elapsed(df,field_names, date_field, base_field, prefix): for f in field_names: day1 = np.timedelta64(1, 'D') last_date,last_base,res = np.datetime64(),None,[] for b,v,d in zip(df[base_field].values, df[f].values, df[date_field].values): if last_base is None or b != last_base: last_date,last_base = np.datetime64(),b if v: last_date = d res.append(((d-last_date).astype('timedelta64[D]') / day1)) df[prefix + f] = res return df #export def add_elapsed_times(df, field_names, date_field, base_field): "Add in `df` for each event in `field_names` the elapsed time according to `date_field` grouped by `base_field`" field_names = list(L(field_names)) #Make sure date_field is a date and base_field a bool df[field_names] = df[field_names].astype('bool') make_date(df, date_field) work_df = df[field_names + [date_field, base_field]] work_df = work_df.sort_values([base_field, date_field]) work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'After') work_df = work_df.sort_values([base_field, date_field], ascending=[True, False]) work_df = _get_elapsed(work_df, field_names, date_field, base_field, 'Before') for a in ['After' + f for f in field_names] + ['Before' + f for f in field_names]: work_df[a] = work_df[a].fillna(0).astype(int) for a,s in zip([True, False], ['_bw', '_fw']): work_df = work_df.set_index(date_field) tmp = (work_df[[base_field] + field_names].sort_index(ascending=a) .groupby(base_field).rolling(7, min_periods=1).sum()) tmp.drop(base_field,1,inplace=True) tmp.reset_index(inplace=True) work_df.reset_index(inplace=True) work_df = work_df.merge(tmp, 'left', [date_field, base_field], suffixes=['', s]) work_df.drop(field_names,1,inplace=True) return df.merge(work_df, 'left', [date_field, base_field]) df = pd.DataFrame({'date': ['2019-12-04', '2019-11-29', '2019-11-15', '2019-10-24'], 'event': [False, True, False, True], 'base': [1,1,2,2]}) df = add_elapsed_times(df, ['event'], 'date', 'base') df #export def cont_cat_split(df, max_card=20, dep_var=None): "Helper function that returns column names of cont and cat variables from given `df`." cont_names, cat_names = [], [] for label in df: if label == dep_var: continue if df[label].dtype == int and df[label].unique().shape[0] > max_card or df[label].dtype == float: cont_names.append(label) else: cat_names.append(label) return cont_names, cat_names # ## Tabular - #export class _TabIloc: "Get/set rows by iloc and cols by name" def __init__(self,to): self.to = to def __getitem__(self, idxs): df = self.to.items if isinstance(idxs,tuple): rows,cols = idxs cols = df.columns.isin(cols) if is_listy(cols) else df.columns.get_loc(cols) else: rows,cols = idxs,slice(None) return self.to.new(df.iloc[rows, cols]) # + #export class Tabular(CollBase, GetAttr, FilteredBase): "A `DataFrame` wrapper that knows which cols are cont/cat/y, and returns rows in `__getitem__`" _default,with_cont='procs',True def __init__(self, df, procs=None, cat_names=None, cont_names=None, y_names=None, block_y=CategoryBlock, splits=None, do_setup=True): if splits is None: splits=[range_of(df)] df = df.iloc[sum(splits, [])].copy() self.databunch = delegates(self._dl_type.__init__)(self.databunch) super().__init__(df) self.y_names = L(y_names) if block_y is not None: if callable(block_y): block_y = block_y() procs = L(procs) + block_y.type_tfms self.cat_names,self.cont_names,self.procs = L(cat_names),L(cont_names),Pipeline(procs, as_item=True) self.split = len(splits[0]) if do_setup: self.setup() def subset(self, i): return self.new(self.items[slice(0,self.split) if i==0 else slice(self.split,len(self))]) def copy(self): self.items = self.items.copy(); return self def new(self, df): return type(self)(df, do_setup=False, block_y=None, **attrdict(self, 'procs','cat_names','cont_names','y_names')) def show(self, max_n=10, **kwargs): display_df(self.all_cols[:max_n]) def setup(self): self.procs.setup(self) def process(self): self.procs(self) def loc(self): return self.items.loc def iloc(self): return _TabIloc(self) def targ(self): return self.items[self.y_names] def all_col_names (self): return self.cat_names + self.cont_names + self.y_names def n_subsets(self): return 2 def new_empty(self): return self.new(self.items[:1]) properties(Tabular,'loc','iloc','targ','all_col_names','n_subsets') # - #export class TabularPandas(Tabular): def transform(self, cols, f): self[cols] = self[cols].transform(f) # + #export def _add_prop(cls, nm): @property def f(o): return o[list(getattr(o,nm+'_names'))] @f.setter def fset(o, v): o[getattr(o,nm+'_names')] = v setattr(cls, nm+'s', f) setattr(cls, nm+'s', fset) _add_prop(Tabular, 'cat') _add_prop(Tabular, 'cont') _add_prop(Tabular, 'y') _add_prop(Tabular, 'all_col') # - df = pd.DataFrame({'a':[0,1,2,0,2], 'b':[0,0,0,0,1]}) to = TabularPandas(df, cat_names='a') t = pickle.loads(pickle.dumps(to)) test_eq(t.items,to.items) test_eq(to.all_cols,to[['a']]) to.show() # only shows 'a' since that's the only col in `TabularPandas` #export class TabularProc(InplaceTransform): "Base class to write a non-lazy tabular processor for dataframes" def setup(self, items=None): super().setup(getattr(items,'train',items)) # Procs are called as soon as data is available return self(items.items if isinstance(items,DataSource) else items) #export def _apply_cats (voc, add, c): if not is_categorical_dtype(c): return pd.Categorical(c, categories=voc[c.name][add:]).codes+add return c.cat.codes+add #if is_categorical_dtype(c) else c.map(voc[c.name].o2i) def _decode_cats(voc, c): return c.map(dict(enumerate(voc[c.name].items))) #export class Categorify(TabularProc): "Transform the categorical variables to that type." order = 1 def setups(self, to): self.classes = {n:CategoryMap(to.iloc[:,n].items, add_na=(n in to.cat_names)) for n in to.cat_names} def encodes(self, to): to.transform(to.cat_names, partial(_apply_cats, self.classes, 1)) def decodes(self, to): to.transform(to.cat_names, partial(_decode_cats, self.classes)) def __getitem__(self,k): return self.classes[k] # + #export @Categorize def setups(self, to:Tabular): if len(to.y_names) > 0: self.vocab = CategoryMap(getattr(to, 'train', to).iloc[:,to.y_names[0]].items) self.c = len(self.vocab) return self(to) @Categorize def encodes(self, to:Tabular): to.transform(to.y_names, partial(_apply_cats, {n: self.vocab for n in to.y_names}, 0)) return to @Categorize def decodes(self, to:Tabular): to.transform(to.y_names, partial(_decode_cats, {n: self.vocab for n in to.y_names})) return to # - show_doc(Categorify, title_level=3) df = pd.DataFrame({'a':[0,1,2,0,2]}) to = TabularPandas(df, Categorify, 'a') cat = to.procs.categorify test_eq(cat['a'], ['#na#',0,1,2]) test_eq(to['a'], [1,2,3,1,3]) df1 = pd.DataFrame({'a':[1,0,3,-1,2]}) to1 = to.new(df1) to1.process() #Values that weren't in the training df are sent to 0 (na) test_eq(to1['a'], [2,1,0,0,3]) to2 = cat.decode(to1) test_eq(to2['a'], [1,0,'#na#','#na#',2]) #test with splits cat = Categorify() df = pd.DataFrame({'a':[0,1,2,3,2]}) to = TabularPandas(df, cat, 'a', splits=[[0,1,2],[3,4]]) test_eq(cat['a'], ['#na#',0,1,2]) test_eq(to['a'], [1,2,3,0,3]) df = pd.DataFrame({'a':pd.Categorical(['M','H','L','M'], categories=['H','M','L'], ordered=True)}) to = TabularPandas(df, Categorify, 'a') cat = to.procs.categorify test_eq(cat['a'], ['#na#','H','M','L']) test_eq(to.items.a, [2,1,3,2]) to2 = cat.decode(to) test_eq(to2['a'], ['M','H','L','M']) #test with targets cat = Categorify() df = pd.DataFrame({'a':[0,1,2,3,2], 'b': ['a', 'b', 'a', 'b', 'b']}) to = TabularPandas(df, cat, 'a', splits=[[0,1,2],[3,4]], y_names='b') test_eq(to.vocab, ['a', 'b']) test_eq(to['b'], [0,1,0,1,1]) to2 = to.procs.decode(to) test_eq(to2['b'], ['a', 'b', 'a', 'b', 'b']) #test with targets and train cat = Categorify() df = pd.DataFrame({'a':[0,1,2,3,2], 'b': ['a', 'b', 'a', 'c', 'b']}) to = TabularPandas(df, cat, 'a', splits=[[0,1,2],[3,4]], y_names='b') test_eq(to.vocab, ['a', 'b']) #export class NormalizeTab(TabularProc): "Normalize the continuous variables." order = 2 def setups(self, dsrc): self.means,self.stds = dsrc.conts.mean(),dsrc.conts.std(ddof=0)+1e-7 def encodes(self, to): to.conts = (to.conts-self.means) / self.stds def decodes(self, to): to.conts = (to.conts*self.stds ) + self.means # + #export @Normalize def setups(self, to:Tabular): self.means,self.stds = getattr(to, 'train', to).conts.mean(),getattr(to, 'train', to).conts.std(ddof=0)+1e-7 return self(to) @Normalize def encodes(self, to:Tabular): to.conts = (to.conts-self.means) / self.stds return to @Normalize def decodes(self, to:Tabular): to.conts = (to.conts*self.stds ) + self.means return to # - norm = Normalize() df = pd.DataFrame({'a':[0,1,2,3,4]}) to = TabularPandas(df, norm, cont_names='a') x = np.array([0,1,2,3,4]) m,s = x.mean(),x.std() test_eq(norm.means['a'], m) test_close(norm.stds['a'], s) test_close(to['a'].values, (x-m)/s) df1 = pd.DataFrame({'a':[5,6,7]}) to1 = to.new(df1) to1.process() test_close(to1['a'].values, (np.array([5,6,7])-m)/s) to2 = norm.decode(to1) test_close(to2['a'].values, [5,6,7]) norm = Normalize() df = pd.DataFrame({'a':[0,1,2,3,4]}) to = TabularPandas(df, norm, cont_names='a', splits=[[0,1,2],[3,4]]) x = np.array([0,1,2]) m,s = x.mean(),x.std() test_eq(norm.means['a'], m) test_close(norm.stds['a'], s) test_close(to['a'].values, (np.array([0,1,2,3,4])-m)/s) #export class FillStrategy: "Namespace containing the various filling strategies." def median (c,fill): return c.median() def constant(c,fill): return fill def mode (c,fill): return c.dropna().value_counts().idxmax() #export class FillMissing(TabularProc): "Fill the missing values in continuous columns." def __init__(self, fill_strategy=FillStrategy.median, add_col=True, fill_vals=None): if fill_vals is None: fill_vals = defaultdict(int) store_attr(self, 'fill_strategy,add_col,fill_vals') def setups(self, dsrc): self.na_dict = {n:self.fill_strategy(dsrc[n], self.fill_vals[n]) for n in pd.isnull(dsrc.conts).any().keys()} def encodes(self, to): missing = pd.isnull(to.conts) for n in missing.any().keys(): assert n in self.na_dict, f"nan values in `{n}` but not in setup training set" to[n].fillna(self.na_dict[n], inplace=True) if self.add_col: to.loc[:,n+'_na'] = missing[n] if n+'_na' not in to.cat_names: to.cat_names.append(n+'_na') show_doc(FillMissing, title_level=3) # + fill1,fill2,fill3 = (FillMissing(fill_strategy=s) for s in [FillStrategy.median, FillStrategy.constant, FillStrategy.mode]) df = pd.DataFrame({'a':[0,1,np.nan,1,2,3,4]}) df1 = df.copy(); df2 = df.copy() tos = TabularPandas(df, fill1, cont_names='a'),TabularPandas(df1, fill2, cont_names='a'),TabularPandas(df2, fill3, cont_names='a') test_eq(fill1.na_dict, {'a': 1.5}) test_eq(fill2.na_dict, {'a': 0}) test_eq(fill3.na_dict, {'a': 1.0}) for t in tos: test_eq(t.cat_names, ['a_na']) for to_,v in zip(tos, [1.5, 0., 1.]): test_eq(to_['a'].values, np.array([0, 1, v, 1, 2, 3, 4])) test_eq(to_['a_na'].values, np.array([0, 0, 1, 0, 0, 0, 0])) # - dfa = pd.DataFrame({'a':[np.nan,0,np.nan]}) tos = [t.new(o) for t,o in zip(tos,(dfa,dfa.copy(),dfa.copy()))] for t in tos: t.process() for to_,v in zip(tos, [1.5, 0., 1.]): test_eq(to_['a'].values, np.array([v, 0, v])) test_eq(to_['a_na'].values, np.array([1, 0, 1])) # ## TabularPandas Pipelines - # + procs = [Normalize, Categorify, FillMissing, noop] df = pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,1,np.nan,1,2,3,4]}) to = TabularPandas(df, procs, cat_names='a', cont_names='b') #Test setup and apply on df_main test_eq(to.cat_names, ['a', 'b_na']) test_eq(to['a'], [1,2,3,2,2,3,1]) test_eq(to['b_na'], [1,1,2,1,1,1,1]) x = np.array([0,1,1.5,1,2,3,4]) m,s = x.mean(),x.std() test_close(to['b'].values, (x-m)/s) test_eq(to.classes, {'a': ['#na#',0,1,2], 'b_na': ['#na#',False,True]}) # + #Test apply on y_names df = pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,1,np.nan,1,2,3,4], 'c': ['b','a','b','a','a','b','a']}) to = TabularPandas(df, procs, 'a', 'b', y_names='c') test_eq(to.cat_names, ['a', 'b_na']) test_eq(to['a'], [1,2,3,2,2,3,1]) test_eq(to['b_na'], [1,1,2,1,1,1,1]) test_eq(to['c'], [1,0,1,0,0,1,0]) x = np.array([0,1,1.5,1,2,3,4]) m,s = x.mean(),x.std() test_close(to['b'].values, (x-m)/s) test_eq(to.classes, {'a': ['#na#',0,1,2], 'b_na': ['#na#',False,True]}) test_eq(to.vocab, ['a','b']) # + df = pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,1,np.nan,1,2,3,4], 'c': ['b','a','b','a','a','b','a']}) to = TabularPandas(df, procs, 'a', 'b', y_names='c') test_eq(to.cat_names, ['a', 'b_na']) test_eq(to['a'], [1,2,3,2,2,3,1]) test_eq(df.a.dtype,int) test_eq(to['b_na'], [1,1,2,1,1,1,1]) test_eq(to['c'], [1,0,1,0,0,1,0]) # + df = pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,np.nan,1,1,2,3,4], 'c': ['b','a','b','a','a','b','a']}) to = TabularPandas(df, procs, cat_names='a', cont_names='b', y_names='c', splits=[[0,1,4,6], [2,3,5]]) test_eq(to.cat_names, ['a', 'b_na']) test_eq(to['a'], [1,2,2,1,0,2,0]) test_eq(df.a.dtype,int) test_eq(to['b_na'], [1,2,1,1,1,1,1]) test_eq(to['c'], [1,0,0,0,1,0,1]) # - #export def _maybe_expand(o): return o[:,None] if o.ndim==1 else o #export class ReadTabBatch(ItemTransform): order = -1 #run before cuda def __init__(self, to): self.to = to # TODO: use float for cont targ def encodes(self, to): if not to.with_cont: return tensor(to.cats).long(), tensor(to.targ) return tensor(to.cats).long(),tensor(to.conts).float(), tensor(to.targ) def decodes(self, o): o = [_maybe_expand(o_) for o_ in to_np(o) if o_.size != 0] vals = np.concatenate(o, axis=1) df = pd.DataFrame(vals, columns=self.to.all_col_names) to = self.to.new(df) to = self.to.procs.decode(to) return to #export @typedispatch def show_batch(x: Tabular, y, its, max_n=10, ctxs=None): x.show() # + #export @delegates() class TabDataLoader(TfmdDL): do_item = noops def __init__(self, dataset, bs=16, shuffle=False, after_batch=None, num_workers=0, **kwargs): if after_batch is None: after_batch = L(TransformBlock().batch_tfms)+ReadTabBatch(dataset) super().__init__(dataset, bs=bs, shuffle=shuffle, after_batch=after_batch, num_workers=num_workers, **kwargs) def create_batch(self, b): return self.dataset.iloc[b] TabularPandas._dl_type = TabDataLoader # - # ## Integration example path = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(path/'adult.csv') df_main,df_test = df.iloc[:10000].copy(),df.iloc[10000:].copy() df_main.head() cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'] cont_names = ['age', 'fnlwgt', 'education-num'] procs = [Categorify, FillMissing, Normalize] splits = RandomSplitter()(range_of(df_main)) # %time to = TabularPandas(df_main, procs, cat_names, cont_names, y_names="salary", splits=splits) dbch = to.databunch() dbch.valid_dl.show_batch() to_tst = to.new(df_test) to_tst.process() to_tst.all_cols.head() # ## Other target types # ### Multi-label categories # #### one-hot encoded label def _mock_multi_label(df): sal,sex,white = [],[],[] for row in df.itertuples(): sal.append(row.salary == '>=50k') sex.append(row.sex == ' Male') white.append(row.race == ' White') df['salary'] = np.array(sal) df['male'] = np.array(sex) df['white'] = np.array(white) return df path = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(path/'adult.csv') df_main,df_test = df.iloc[:10000].copy(),df.iloc[10000:].copy() df_main = _mock_multi_label(df_main) df_main.head() # + #export @EncodedMultiCategorize def encodes(self, to:Tabular): return to @EncodedMultiCategorize def decodes(self, to:Tabular): to.transform(to.y_names, lambda c: c==1) return to # - cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'] cont_names = ['age', 'fnlwgt', 'education-num'] procs = [Categorify, FillMissing, Normalize] splits = RandomSplitter()(range_of(df_main)) y_names=["salary", "male", "white"] # %time to = TabularPandas(df_main, procs, cat_names, cont_names, y_names=y_names, block_y=MultiCategoryBlock(encoded=True, vocab=y_names), splits=splits) dbch = to.databunch() dbch.valid_dl.show_batch() # #### Not one-hot encoded def _mock_multi_label(df): targ = [] for row in df.itertuples(): labels = [] if row.salary == '>=50k': labels.append('>50k') if row.sex == ' Male': labels.append('male') if row.race == ' White': labels.append('white') targ.append(' '.join(labels)) df['target'] = np.array(targ) return df path = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(path/'adult.csv') df_main,df_test = df.iloc[:10000].copy(),df.iloc[10000:].copy() df_main = _mock_multi_label(df_main) df_main.head() # + @MultiCategorize def encodes(self, to:Tabular): #to.transform(to.y_names, partial(_apply_cats, {n: self.vocab for n in to.y_names}, 0)) return to @MultiCategorize def decodes(self, to:Tabular): #to.transform(to.y_names, partial(_decode_cats, {n: self.vocab for n in to.y_names})) return to # - cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'] cont_names = ['age', 'fnlwgt', 'education-num'] procs = [Categorify, FillMissing, Normalize] splits = RandomSplitter()(range_of(df_main)) # %time to = TabularPandas(df_main, procs, cat_names, cont_names, y_names="target", block_y=MultiCategoryBlock(), splits=splits) to.procs[2].vocab # ### Regression path = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(path/'adult.csv') df_main,df_test = df.iloc[:10000].copy(),df.iloc[10000:].copy() df_main = _mock_multi_label(df_main) cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'] cont_names = ['fnlwgt', 'education-num'] procs = [Categorify, FillMissing, Normalize] splits = RandomSplitter()(range_of(df_main)) # %time to = TabularPandas(df_main, procs, cat_names, cont_names, y_names='age', block_y=TransformBlock(), splits=splits) to.procs[-1].means dbch = to.databunch() dbch.valid_dl.show_batch() # ## Not being used now - for multi-modal # + class TensorTabular(Tuple): def get_ctxs(self, max_n=10, **kwargs): n_samples = min(self[0].shape[0], max_n) df = pd.DataFrame(index = range(n_samples)) return [df.iloc[i] for i in range(n_samples)] def display(self, ctxs): display_df(pd.DataFrame(ctxs)) class TabularLine(pd.Series): "A line of a dataframe that knows how to show itself" def show(self, ctx=None, **kwargs): return self if ctx is None else ctx.append(self) class ReadTabLine(ItemTransform): def __init__(self, proc): self.proc = proc def encodes(self, row): cats,conts = (o.map(row.__getitem__) for o in (self.proc.cat_names,self.proc.cont_names)) return TensorTabular(tensor(cats).long(),tensor(conts).float()) def decodes(self, o): to = TabularPandas(o, self.proc.cat_names, self.proc.cont_names, self.proc.y_names) to = self.proc.decode(to) return TabularLine(pd.Series({c: v for v,c in zip(to.items[0]+to.items[1], self.proc.cat_names+self.proc.cont_names)})) class ReadTabTarget(ItemTransform): def __init__(self, proc): self.proc = proc def encodes(self, row): return row[self.proc.y_names].astype(np.int64) def decodes(self, o): return Category(self.proc.classes[self.proc.y_names][o]) # + # tds = TfmdDS(to.items, tfms=[[ReadTabLine(proc)], ReadTabTarget(proc)]) # enc = tds[1] # test_eq(enc[0][0], tensor([2,1])) # test_close(enc[0][1], tensor([-0.628828])) # test_eq(enc[1], 1) # dec = tds.decode(enc) # assert isinstance(dec[0], TabularLine) # test_close(dec[0], pd.Series({'a': 1, 'b_na': False, 'b': 1})) # test_eq(dec[1], 'a') # test_stdout(lambda: print(show_at(tds, 1)), """a 1 # b_na False # b 1 # category a # dtype: object""") # - # ## Export - #hide from nbdev.export import notebook2script notebook2script()
nbs/40_tabular.core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np data = pd.read_csv('house_price.csv') data.head() for col in data.columns: print(col, ':', data[col].dtypes) # # Non-graphical EDA data.describe() data.corr() data['SalePrice'].mean() data['SalePrice'].median() data['SalePrice'].mode() data['SalePrice'].var() data['SalePrice'].std() data['SalePrice'].min() data['SalePrice'].max() data['SalePrice'].quantile(q=0.25) data['SalePrice'].quantile(q=0.75) np.cov(data['LotArea'], data['OverallQual']) np.corrcoef(data['LotArea'], data['OverallQual']) # # Graphical EDA import seaborn as sns # ### Histogram sns.displot(x='YearBuilt', kde=True, data=data) sns.histplot(x='YearBuilt', y='SalePrice', data=data) # ### Box plot sns.boxplot(x='YearBuilt', data=data) sns.boxplot(x='YearBuilt', y='SaleCondition', data=data) # ### Violin Plot sns.violinplot(x='SalePrice', data=data) sns.violinplot(x='YearBuilt', y='CentralAir', data=data) # ### Bar plot sns.countplot(x='SaleCondition', data=data) sns.countplot(x='SaleCondition', hue='CentralAir', data=data) # ### Line plot sns.lineplot(x=data.loc[0:100].index, y='MSSubClass', data=data.loc[0:100]) # ### Scatter plot sns.scatterplot(x="LotFrontage", y="SalePrice", data=data) sns.scatterplot(x="LotFrontage", y="SalePrice", hue="CentralAir", data=data) # ### Bubble plot sns.scatterplot(x="LotFrontage", y="SalePrice", data=data, size="YearBuilt") # ### Pair plot sns.pairplot(data=data[['LotFrontage', 'SalePrice', 'YearBuilt']]) sns.pairplot(data=data[['LotFrontage', 'SalePrice', 'YearBuilt', 'CentralAir']], hue='CentralAir') # ### Joint Plot sns.jointplot(x='SalePrice', y='YearBuilt', data=data) sns.jointplot(x='SalePrice', y='YearBuilt', data=data, hue='CentralAir') # ### Heat map heat_data = pd.pivot_table(data, index='MoSold', columns='YrSold', aggfunc=np.count_nonzero)['SalePrice'].fillna(0) heat_data sns.heatmap(heat_data)
House-price-EDA/House-price-EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import librosa import librosa.display df = pd.read_excel('audio_path.xlsx') del df['Unnamed: 0'] df.head() #If all the above code run successfully it will print a dataframe conatining given column name i.e. path and Interference # + def cal_mfcc(path): X,sample_rate = librosa.load(path,res_type = 'kaiser_fast') mfcc = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0) feature = mfcc return feature df['mfcc'] = df['path'].apply(cal_mfcc) df.head() #If all
AUDIO_TWR/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-west-2:236514542706:image/datascience-1.0 # --- # # Use Your Own Inference Code with Amazon SageMaker XGBoost Algorithm # _**Customized inference for computing SHAP values with Amazon SageMaker XGBoost script mode**_ # # --- # # ## Contents # 1. [Introduction](#Introduction) # 2. [Setup](#Setup) # 3. [Training the XGBoost model](#Training-the-XGBoost-model) # 4. [Deploying the XGBoost endpoint](#Deploying-the-XGBoost-endpoint) # # --- # ## Introduction # # This notebook shows how you can configure the SageMaker XGBoost model server by defining the following three functions in the Python source file you pass to the XGBoost constructor in the SageMaker Python SDK: # - `input_fn`: Takes request data and deserializes the data into an object for prediction, # - `predict_fn`: Takes the deserialized request object and performs inference against the loaded model, and # - `output_fn`: Takes the result of prediction and serializes this according to the response content type. # We will write a customized inference script that is designed to illustrate how [SHAP](https://github.com/slundberg/shap) values enable the interpretion of XGBoost models. # # We use the [Abalone data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html), originally from the [UCI data repository](https://archive.ics.uci.edu/ml/datasets/abalone). More details about the original dataset can be found [here](https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names). In this libsvm converted version, the nominal feature (Male/Female/Infant) has been converted into a real valued feature as required by XGBoost. Age of abalone is to be predicted from eight physical measurements. # # This notebook uses the Abalone dataset to deploy a model server that returns SHAP values, which enable us to create model explanation such as the following plots that show each features contributing to push the model output from the base value. # # <table><tr> # <td> <img src="images/shap_young_abalone.png" alt="Drawing"/> </td> # <td> <img src="images/shap_old_abalone.png" alt="Drawing"/> </td> # </tr></table> # # --- # # ## Setup # # This notebook was tested in Amazon SageMaker Studio on a ml.t3.medium instance with Python 3 (Data Science) kernel. # # Let's start by specifying: # 1. The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting. # 2. The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regex with a the appropriate full IAM role arn string(s). # + # %%time import io import os import boto3 import sagemaker import time role = sagemaker.get_execution_role() region = boto3.Session().region_name # S3 bucket for saving code and model artifacts. # Feel free to specify a different bucket here if you wish. bucket = sagemaker.Session().default_bucket() prefix = 'sagemaker/DEMO-xgboost-inference-script-mode' # - # ### Fetching the dataset # # The following methods download the Abalone dataset and upload files to S3. # %%time s3 = boto3.client("s3") # Load the dataset FILE_DATA = 'abalone' s3.download_file("sagemaker-sample-files", f"datasets/tabular/uci_abalone/abalone.libsvm", FILE_DATA) sagemaker.Session().upload_data(FILE_DATA, bucket=bucket, key_prefix=prefix+'/train') # ## Training the XGBoost model # # SageMaker can now run an XGboost script using the XGBoost estimator. A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to `model_dir` so that it can be hosted later. In this notebook, we use the same training script [abalone.py](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_abalone/abalone.py) from [Regression with Amazon SageMaker XGBoost algorithm](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_abalone/xgboost_abalone_dist_script_mode.ipynb). Refer to [Regression with Amazon SageMaker XGBoost algorithm](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_abalone/xgboost_abalone_dist_script_mode.ipynb) for details on the training script. # # After setting training parameters, we kick off training, and poll for status until training is completed, which in this example, takes between few minutes. # # To run our training script on SageMaker, we construct a `sagemaker.xgboost.estimator.XGBoost` estimator, which accepts several constructor arguments: # # * __entry_point__: The path to the Python script SageMaker runs for training and prediction. # * __role__: Role ARN # * __framework_version__: SageMaker XGBoost version you want to use for executing your model training code, e.g., `0.90-1`, `0.90-2`, `1.0-1`, or `1.2-1`. # * __train_instance_type__ *(optional)*: The type of SageMaker instances for training. __Note__: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types. # * __sagemaker_session__ *(optional)*: The session used to train on Sagemaker. # * __hyperparameters__ *(optional)*: A dictionary passed to the train function as hyperparameters. # + from sagemaker.inputs import TrainingInput from sagemaker.xgboost.estimator import XGBoost job_name = 'DEMO-xgboost-inference-script-mode-' + time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) print("Training job", job_name) hyperparameters = { "max_depth": "5", "eta": "0.2", "gamma": "4", "min_child_weight": "6", "subsample": "0.7", "objective": "reg:squarederror", "num_round": "50", "verbosity": "2", } instance_type = "ml.c5.xlarge" xgb_script_mode_estimator = XGBoost( entry_point="abalone.py", hyperparameters=hyperparameters, role=role, instance_count=1, instance_type=instance_type, framework_version="1.2-1", output_path="s3://{}/{}/{}/output".format(bucket, prefix, job_name), ) content_type = "text/libsvm" train_input = TrainingInput("s3://{}/{}/{}/".format(bucket, prefix, "train"), content_type=content_type) # - # ### Train XGBoost Estimator on Abalone Data # # Training is as simple as calling `fit` on the Estimator. This will start a SageMaker Training job that will download the data, invoke the entry point code (in the provided script file), and save any model artifacts that the script creates. In this case, the script requires a `train` and a `validation` channel. Since we only created a `train` channel, we re-use it for validation. xgb_script_mode_estimator.fit({'train': train_input, 'validation': train_input}, job_name=job_name) # ## Deploying the XGBoost endpoint # # After training, we can host the newly created model in SageMaker, and create an Amazon SageMaker endpoint – a hosted and managed prediction service that we can use to perform inference. If you call `deploy` after you call `fit` on an XGBoost estimator, it will create a SageMaker endpoint using the training script (i.e., `entry_point`). You can also optionally specify other functions to customize the behavior of deserialization of the input request (`input_fn()`), serialization of the predictions (`output_fn()`), and how predictions are made (`predict_fn()`). If any of these functions are not specified, the endpoint will use the default functions in the SageMaker XGBoost container. See the [SageMaker Python SDK documentation](https://sagemaker.readthedocs.io/en/stable/frameworks/xgboost/using_xgboost.html#sagemaker-xgboost-model-server) for details. # # In this notebook, we will run a separate inference script and customize the endpoint to return [SHAP](https://github.com/slundberg/shap) values in addition to predictions. The inference script that we will run in this notebook is provided as the accompanying file (`inference.py`) and also shown below: # # ```python # import json # import os # import pickle as pkl # # import numpy as np # # import sagemaker_xgboost_container.encoder as xgb_encoders # # # def model_fn(model_dir): # """ # Deserialize and return fitted model. # """ # model_file = "xgboost-model" # booster = pkl.load(open(os.path.join(model_dir, model_file), "rb")) # return booster # # # def input_fn(request_body, request_content_type): # """ # The SageMaker XGBoost model server receives the request data body and the content type, # and invokes the `input_fn`. # # Return a DMatrix (an object that can be passed to predict_fn). # """ # if request_content_type == "text/libsvm": # return xgb_encoders.libsvm_to_dmatrix(request_body) # else: # raise ValueError( # "Content type {} is not supported.".format(request_content_type) # ) # # # def predict_fn(input_data, model): # """ # SageMaker XGBoost model server invokes `predict_fn` on the return value of `input_fn`. # # Return a two-dimensional NumPy array where the first columns are predictions # and the remaining columns are the feature contributions (SHAP values) for that prediction. # """ # prediction = model.predict(input_data) # feature_contribs = model.predict(input_data, pred_contribs=True, validate_features=False) # output = np.hstack((prediction[:, np.newaxis], feature_contribs)) # return output # # # def output_fn(predictions, content_type): # """ # After invoking predict_fn, the model server invokes `output_fn`. # """ # if content_type == "text/csv": # return ','.join(str(x) for x in predictions[0]) # else: # raise ValueError("Content type {} is not supported.".format(content_type)) # ``` # # ### transform_fn # # If you would rather not structure your code around the three methods described above, you can instead define your own `transform_fn` to handle inference requests. An error is thrown if a `transform_fn` is present in conjunction with any `input_fn`, `predict_fn`, and/or `output_fn`. In our case, the `transform_fn` would look as follows: # ```python # def transform_fn(model, request_body, content_type, accept_type): # dmatrix = xgb_encoders.libsvm_to_dmatrix(request_body) # prediction = model.predict(dmatrix) # feature_contribs = model.predict(dmatrix, pred_contribs=True, validate_features=False) # output = np.hstack((prediction[:, np.newaxis], feature_contribs)) # return ','.join(str(x) for x in predictions[0]) # ``` # where `model` is the model object loaded by `model_fn`, `request_body` is the data from the inference request, `content_type` is the content type of the request, and `accept_type` is the request content type for the response. # # # ### Deploy to an endpoint # # Since the inference script is separate from the training script, here we use `XGBoostModel` to create a model from s3 artifacts and specify `inference.py` as the `entry_point`. # + from sagemaker.xgboost.model import XGBoostModel model_data = xgb_script_mode_estimator.model_data print(model_data) xgb_inference_model = XGBoostModel( model_data=model_data, role=role, entry_point="inference.py", framework_version="1.2-1", ) # - predictor = xgb_inference_model.deploy( initial_instance_count=1, instance_type="ml.c5.xlarge", ) # ### Explain the model's predictions on each data point # + import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns def plot_feature_contributions(prediction): attribute_names = [ "Sex", # nominal / -- / M, F, and I (infant) "Length", # continuous / mm / Longest shell measurement "Diameter", # continuous / mm / perpendicular to length "Height", # continuous / mm / with meat in shell "Whole weight", # continuous / grams / whole abalone "Shucked weight", # continuous / grams / weight of meat "Viscera weight", # continuous / grams / gut weight (after bleeding) "Shell weight", # continuous / grams / after being dried ] prediction, _, *shap_values, bias = prediction if len(shap_values) != len(attribute_names): raise ValueError("Length mismatch between shap values and attribute names.") df = pd.DataFrame(data=[shap_values], index=["SHAP"], columns=attribute_names).T df.sort_values(by="SHAP", inplace=True) df["bar_start"] = bias + df.SHAP.cumsum().shift().fillna(0.0) df["bar_end"] = df.bar_start + df.SHAP df[["bar_start", "bar_end"]] = np.sort(df[["bar_start", "bar_end"]].values) df["hue"] = df.SHAP.apply(lambda x: 0 if x > 0 else 1) sns.set(style="white") ax1 = sns.barplot(x=df.bar_end, y=df.index, data=df, orient="h", palette="vlag") for idx, patch in enumerate(ax1.patches): x_val = patch.get_x() + patch.get_width() + 0.8 y_val = patch.get_y() + patch.get_height() / 2 shap_value = df.SHAP.values[idx] value = "{0}{1:.2f}".format("+" if shap_value > 0 else "-", shap_value) ax1.annotate(value, (x_val, y_val), ha="right", va="center") ax2 = sns.barplot(x=df.bar_start, y=df.index, data=df, orient="h", color="#FFFFFF") ax2.set_xlim( df[["bar_start", "bar_end"]].values.min() - 1, df[["bar_start", "bar_end"]].values.max() + 1 ) ax2.axvline(x=bias, color="#000000", alpha=0.2, linestyle="--", linewidth=1) ax2.set_title("base value: {0:.1f} → model output: {1:.1f}".format(bias, prediction)) ax2.set_xlabel("Abalone age") sns.despine(left=True, bottom=True) plt.tight_layout() plt.show() def predict_and_plot(predictor, libsvm_str): label, *features = libsvm_str.strip().split() predictions = predictor.predict(" ".join(["-99"] + features)) # use dummy label -99 np_array = np.array([float(x) for x in predictions[0]]) plot_feature_contributions(np_array) # - # The below figure shows features each contributing to push the model output from the base value (9.9 rings) to the model output (6.9 rings). The primary indicator for a young abalone according to the model is low shell weight, which decreases the prediction by 3.0 rings from the base value of 9.9 rings. Whole weight and shucked weight are also powerful indicators. The whole weight pushes the prediction lower by 0.84 rings, while shucked weight pushes the prediction higher by 1.6 rings. a_young_abalone = "6 1:3 2:0.37 3:0.29 4:0.095 5:0.249 6:0.1045 7:0.058 8:0.067" predict_and_plot(predictor, a_young_abalone) # The second example shows feature contributions for another sample, an old abalone. We again see that the primary indicator for the age of abalone according to the model is shell weight, which increases the model prediction by 2.36 rings. Whole weight and shucked weight also contribute significantly, and they both push the model's prediction higher. an_old_abalone = "15 1:1 2:0.655 3:0.53 4:0.175 5:1.2635 6:0.486 7:0.2635 8:0.415" predict_and_plot(predictor, an_old_abalone) # ### (Optional) Delete the Endpoint # # If you're done with this exercise, please run the `delete_endpoint` line in the cell below. This will remove the hosted endpoint and avoid any charges from a stray instance being left on. predictor.delete_endpoint()
introduction_to_amazon_algorithms/xgboost_abalone/xgboost_inferenece_script_mode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import boto3 polly = boto3.client('polly') # Use Amazon Polly to convert text to speech res = polly.synthesize_speech( Text = "Hello, how are you?", OutputFormat = 'mp3', VoiceId = 'Joanna') # Save the response from Amazon Polly into the mp3 file audiofile = 'myaudio.mp3' file = open(audiofile, 'wb') file.write(res['AudioStream'].read()) file.close() # Play the mp3 file import IPython IPython.display.Audio(audiofile)
Amazon-Polly/amazon_polly.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # + # #!/usr/bin/env python import numpy as np import json from funcy import project # %run ../Python_files/util.py tmc_ref_speed_dict =zload('../temp_files/tmc_ref_speed_dict_journal.pkz') # AM: 7:00 am - 9:00 am # MD: 11:00 am - 13:00 pm # PM: 17:00 pm - 19:00 pm # NT: 21:00 pm - 23:00 pm data_folder = '/home/jzh/INRIX/All_INRIX_2012_filtered_journal/' # load the dict for each month def month_dict(month, idx): if month == 1: with open(data_folder + 'tmc_Jan_%s_day_hour_minute_speed_travelTime_dict_journal.json'%(idx), 'r') as json_file: tmc_Jan_day_hour_minute_speed_travelTime_dict = json.load(json_file) return tmc_Jan_day_hour_minute_speed_travelTime_dict elif month == 4: with open(data_folder + 'tmc_Apr_%s_day_hour_minute_speed_travelTime_dict_journal.json'%(idx), 'r') as json_file: tmc_Apr_day_hour_minute_speed_travelTime_dict = json.load(json_file) return tmc_Apr_day_hour_minute_speed_travelTime_dict elif month == 7: with open(data_folder + 'tmc_Jul_%s_day_hour_minute_speed_travelTime_dict_journal.json'%(idx), 'r') as json_file: tmc_Jul_day_hour_minute_speed_travelTime_dict = json.load(json_file) return tmc_Jul_day_hour_minute_speed_travelTime_dict elif month == 10: with open(data_folder + 'tmc_Oct_%s_day_hour_minute_speed_travelTime_dict_journal.json'%(idx), 'r') as json_file: tmc_Oct_day_hour_minute_speed_travelTime_dict = json.load(json_file) return tmc_Oct_day_hour_minute_speed_travelTime_dict else: raise IOError("Invalid input; please input another month.") # filter speed dict for each month (tmc, day, hour, minute), corresponding to different periods def filter_dict(month, idx): month_dict_ = month_dict(month, idx) filtered_month_AM_dict = {} filtered_month_MD_dict = {} filtered_month_PM_dict = {} filtered_month_NT_dict = {} AM_keys = [] MD_keys = [] PM_keys = [] NT_keys = [] for tmc in tmc_ref_speed_dict.keys(): days_ = days(month) for day in range(days_)[1:]: for hour in [7, 8, 11, 12, 17, 18, 21, 22]: if hour == 7 or hour == 8: for minute in range(60): key = str(tmc) + '_' + str(month) + '_' + str(day) + '_' + str(hour) + '_' + str(minute) AM_keys.append(key) if hour == 11 or hour == 12: for minute in range(60): key = str(tmc) + '_' + str(month) + '_' + str(day) + '_' + str(hour) + '_' + str(minute) MD_keys.append(key) if hour == 17 or hour == 18: for minute in range(60): key = str(tmc) + '_' + str(month) + '_' + str(day) + '_' + str(hour) + '_' + str(minute) PM_keys.append(key) if hour == 21 or hour == 22: for minute in range(60): key = str(tmc) + '_' + str(month) + '_' + str(day) + '_' + str(hour) + '_' + str(minute) NT_keys.append(key) filtered_month_AM_dict = project(month_dict_, AM_keys) filtered_month_MD_dict = project(month_dict_, MD_keys) filtered_month_PM_dict = project(month_dict_, PM_keys) filtered_month_NT_dict = project(month_dict_, NT_keys) # Writing JSON data input_file_AM = data_folder + 'filtered_month_%s_%s_AM_dict_journal.json' %(month, idx) with open(input_file_AM, 'w') as json_file_AM: json.dump(filtered_month_AM_dict, json_file_AM) input_file_MD = data_folder + 'filtered_month_%s_%s_MD_dict_journal.json' %(month, idx) with open(input_file_MD, 'w') as json_file_MD: json.dump(filtered_month_MD_dict, json_file_MD) input_file_PM = data_folder + 'filtered_month_%s_%s_PM_dict_journal.json' %(month, idx) with open(input_file_PM, 'w') as json_file_PM: json.dump(filtered_month_PM_dict, json_file_PM) input_file_NT = data_folder + 'filtered_month_%s_%s_NT_dict_journal.json' %(month, idx) with open(input_file_NT, 'w') as json_file_NT: json.dump(filtered_month_NT_dict, json_file_NT) # - for month in [1, 4, 7, 10]: for idx in range(9)[1:]: filter_dict(month, idx)
01_INRIX_data_preprocessing_journal18/INRIX_data_preprocessing_07_extract_speed_data_filter_dict_journal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Linear basis function models with PyMC4 # + import logging import pymc4 as pm import numpy as np import arviz as az import tensorflow as tf import tensorflow_probability as tfp print(pm.__version__) print(tf.__version__) print(tfp.__version__) # Mute Tensorflow warnings ... logging.getLogger('tensorflow').setLevel(logging.ERROR) # - # ## Linear basis function models # # The following is a PyMC4 implementation of [Bayesian regression with linear basis function models](https://nbviewer.jupyter.org/github/krasserm/bayesian-machine-learning/blob/dev/bayesian-linear-regression/bayesian_linear_regression.ipynb). To recap, a linear regression model is a linear function of the parameters but not necessarily of the input. Input $x$ can be expanded with a set of non-linear basis functions $\phi_j(x)$, where $(\phi_1(x), \dots, \phi_M(x))^T = \boldsymbol\phi(x)$, for modeling a non-linear relationship between input $x$ and a function value $y$. # # $$ # y(x, \mathbf{w}) = w_0 + \sum_{j=1}^{M}{w_j \phi_j(x)} = w_0 + \mathbf{w}_{1:}^T \boldsymbol\phi(x) \tag{1} # $$ # # For simplicity I'm using a scalar input $x$ here. Target variable $t$ is given by the deterministic function $y(x, \mathbf{w})$ and Gaussian noise $\epsilon$. # # $$ # t = y(x, \mathbf{w}) + \epsilon \tag{2} # $$ # # Here, we can choose between polynomial and Gaussian basis functions for expanding input $x$. # + from functools import partial from scipy.stats import norm def polynomial_basis(x, power): return x ** power def gaussian_basis(x, mu, sigma): return norm(loc=mu, scale=sigma).pdf(x).astype(np.float32) def _expand(x, bf, bf_args): return np.stack([bf(x, bf_arg) for bf_arg in bf_args], axis=1) def expand_polynomial(x, degree=3): return _expand(x, bf=polynomial_basis, bf_args=range(1, degree + 1)) def expand_gaussian(x, mus=np.linspace(0, 1, 9), sigma=0.3): return _expand(x, bf=partial(gaussian_basis, sigma=sigma), bf_args=mus) # Choose between polynomial and Gaussian expansion # (by switching the comment on the following two lines) expand = expand_polynomial #expand = expand_gaussian # - # For example, to expand two input values `[0.5, 1.5]` into a polynomial design matrix of degree `3` we can use expand_polynomial(np.array([0.5, 1.5]), degree=3) # The power of `0` is omitted here and covered by a $w_0$ in the model. # ## Example dataset # # The example dataset consists of `N` noisy samples from a sinusoidal function `f`. # + import matplotlib.pyplot as plt # %matplotlib inline from bayesian_linear_regression_util import ( plot_data, plot_truth ) def f(x, noise=0): """Sinusoidal function with optional Gaussian noise.""" return 0.5 + np.sin(2 * np.pi * x) + np.random.normal(scale=noise, size=x.shape) # Number of samples N = 10 # Constant noise noise = 0.3 # Noisy samples x = np.linspace(0, 1, N, dtype=np.float32) t = f(x, noise=noise) # Noise-free ground truth x_test = np.linspace(0, 1, 100).astype(np.float32) y_true = f(x_test) plot_data(x, t) plot_truth(x_test, y_true) # - # ## Implementation with PyMC4 # # ### Model definition # The model definition directly follows from Eq. $(1)$ and Eq. $(2)$ with normal priors over parameters. The size of parameter vector `w_r` ($\mathbf{w}_{1:}$ in Eq. $(1)$) is determined by the number of basis functions and set via the `batch_stack` parameter. With the above default settings, it is 3 for polynomial expansion and 9 for Gaussian expansion. # + import tensorflow as tf @pm.model def model(Phi, t, sigma=noise): """Linear model generator. Args: - Phi: design matrix (N,M) - t: noisy target values (N,) - sigma: known noise of t """ w_0 = yield pm.Normal(name='w_0', loc=0, scale=10) w_r = yield pm.Normal(name='w_r', loc=0, scale=10, batch_stack=Phi.shape[1]) mu = w_0 + tf.tensordot(w_r, Phi.T, axes=1) yield pm.Normal(name='t_obs', loc=mu, scale=sigma, observed=t) # - # ### Inference # Tensorflow will automatically run inference on a GPU if available. With the current version of PyMC4, inference on a GPU is quite slow compared to a multi-core CPU (need to investigate that in more detail). To enforce inference on a CPU set environment variable `CUDA_VISIBLE_DEVICES` to an empty value. There is no progress bar visible yet during sampling but the following shouldn't take longer than a 1 minute. trace = pm.sample(model(expand(x), t), num_chains=3, burn_in=100, num_samples=1000) az.plot_trace(trace); az.plot_posterior(trace, var_names="model/w_0"); az.plot_posterior(trace, var_names="model/w_r"); # ### Prediction # # To obtain posterior predictive samples for a test set `x_test` we simply call the model generator function again with the expanded test set. This is a nice improvement over PyMC3 which required to setup a shared Theano variable for setting test set values. Target values are ignored during predictive sampling, only the shape of the target array `t` matters. draws_posterior = pm.sample_posterior_predictive(model(expand(x_test), t=np.zeros_like(x_test)), trace, inplace=False) draws_posterior.posterior_predictive # The predictive mean and standard deviation is obtained by averaging over chains (axis `0`) and predictive samples (axis `1`) for each of the 100 data points in `x_test` (axis `2`). # + predictive_samples = draws_posterior.posterior_predictive.data_vars['model/t_obs'].values m = np.mean(predictive_samples, axis=(0, 1)) s = np.std(predictive_samples, axis=(0, 1)) # - # These statistics can be used to plot model predictions and their uncertainties (together with the ground truth and the noisy training dataset). # + plt.fill_between(x_test, m + s, m - s, alpha = 0.5, label='Predictive std. dev.') plt.plot(x_test, m, label='Predictive mean'); plot_data(x, t) plot_truth(x_test, y_true, label=None) plt.legend(); # - # Try running the example again with Gaussian expansion i.e. setting `expand = expand_gaussian` and see how it compares to polynomial expansion. Also try running with a different number of basis functions by overriding the default arguments of `expand_polynomial` and `expand_gaussian`. You can find more PyMC4 examples in the [notebooks](https://github.com/pymc-devs/pymc4/tree/master/notebooks) diretory of the PyMC4 project.
bayesian-linear-regression/bayesian_linear_regression_pymc4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''base'': conda)' # language: python # name: python37664bitbasecondaaf8fdd0c69a6418182a62a0cf729af6c # --- # # EDA # ## Importing Libraries and Data import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv(r"D:\Utkarsh Mathur\Career\Data Science\Datasets\melbourne housing snapshot\melb_data.csv") # ## Initial View of the Data # 1. **Rooms**: Number of rooms<br> # <br> # 2. **Price** : Price in dollars<br> # <br> # 3. **Method**: <br> # **S** - property sold; **SP** - property sold prior; **PI** - property passed in; **PN** - sold prior not disclosed; **SN** - sold not disclosed; **NB** - no bid; **VB** - vendor bid; **W** - withdrawn prior to auction; **SA** - sold after auction; **SS** - sold after auction price not disclosed. **N/A** - price or highest bid not available. <br> # <br> # 4. **Type**: <br> # **br** - bedroom(s); **h** - house,cottage,villa, semi,terrace; **u** - unit, duplex; **t** - townhouse; dev site - development site; **o res** - other residential. <br> # <br> # 5. **SellerG**: Real Estate Agent<br> # <br> # 6. **Date**: Date sold <br> # <br> # 7. **Distance**: Distance from CBD<br> # <br> # 8. **Regionname**: General Region (West, North West, North, North east …etc)<br> # <br> # 9. **Propertycount**: Number of properties that exist in the suburb.<br> # <br> # 10. **Bedroom2** : Scraped # of Bedrooms (from different source)<br> # <br> # 11. **Bathroom**: Number of Bathrooms<br> # <br> # 12. **Car**: Number of carspots<br> # <br> # 13. **Landsize**: Land Size<br> # <br> # 14. **BuildingArea**: Building Size<br> # <br> # 15. **CouncilArea**: Governing council for the area<br> data.head() data.describe(include="all") data.isnull().sum() data.shape data.columns # ## Information Stored in all the Columns # List of all the suburbs data.Suburb.value_counts() # No. of Rooms. data.Rooms.value_counts() # Types of houses data.Type.value_counts() # Methods through which the houses have been sold. data.Method.value_counts() data.Postcode.value_counts() # The names of the sellers or brokers of the houses. data.SellerG.value_counts() # That the distance distribution of houses from CBD data.Distance.describe() # List of all the postal codes included. data.Postcode.value_counts() # No. of scraped bedrooms in the house data.Bedroom2.value_counts() # No. of bathrooms data.Bathroom.value_counts() # Data of the car parking spots in the house. data.Car.value_counts(), data.Car.mean(), data.Car.median() # The value trend of Landsize of the houses. data.Landsize.describe() # The general trends of Council Area data.CouncilArea.value_counts() # Now we know that lattitude and longitude are location specific so we need not worry how it is changing. # The list of region names data.Regionname.value_counts() # Understanding Propertycount column data.Propertycount.value_counts() data.Propertycount.describe() # And finally the trend of prices of the houses data.Price.describe() # ## Filling the Data null values data.Car = data.Car.fillna(data.Car.median()) data.isnull().sum() # ## Data Preparation # Initially the data has 20 features and a price columns. But we need to remove some of the uncessary and overlapping features so as to make a better data.<br> # <br> # I'm decribing why all the features are removed:<br> # 1) **Date** :- As the data is from a very short span of time the variance in prices with time will be negligible.<br> # 2) **BuildingArea** :- Due to large number of empty data.<br> # 3) **YearBuilt** :- Due to large number of empty data.<br> # 4) **CouncilArea** :- Again in a state the council area doesnot makes much of a difference in the prices of houses.<br> # 5) **Address** :- As address is a string which is different for every house, it does not make sense to use it in prediciting price.<br> data1 = data.drop(['Address','BuildingArea', 'YearBuilt', 'CouncilArea', 'Date'], axis=1) data1.head() from sklearn.preprocessing import LabelEncoder lb = LabelEncoder() data1['Suburb'] = lb.fit_transform(data1['Suburb']) data1['Type'] = lb.fit_transform(data1['Type']) data1['Method'] = lb.fit_transform(data1['Method']) data1['SellerG'] = lb.fit_transform(data1['SellerG']) data1['Regionname'] = lb.fit_transform(data1['Regionname']) data1.head() data1.describe(include="all") # ## Visualizing the data sns.barplot(x='Rooms', y='Price', data=data1) sns.barplot(x='Type', y='Price', data=data1) sns.barplot(x='Method', y='Price', data=data1) sns.barplot(x='Bedroom2', y='Price', data=data1) sns.barplot(x='Bathroom', y='Price', data=data1) sns.barplot(x='Car', y='Price', data=data1) sns.barplot(x='Regionname', y='Price', data=data1)
EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Il mio foglio degli esercizi # # ## Lezione 1 # # ### Una prova con **print()** print("<NAME>") nome = "Marco" print(nome) cognome = "<NAME>" print(cognome) print(nome + cognome) print(nome + " " + cognome) # # Esercizio # + a = 5 b = 5 b = 0 a = 3
Lezioni/Esercizi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import calendar import imageio import joblib from shutil import copy2, rmtree from datetime import datetime, timedelta from IPython.display import clear_output from data_helpers import * from constant import * plt.style.use('seaborn-whitegrid') data_path = r'C:\Users\jupol\Desktop\TensorTut\covid\legacyCovidMexico' data_dir = os.listdir(data_path) sufx_dict = { 'Deaths': 'Casos_Diarios_Estado_Nacional_Defunciones_2020', 'Confirmed': 'Casos_Diarios_Estado_Nacional_Confirmados_2020', 'Suspicious': 'Casos_Diarios_Estado_Nacional_Sospechosos_2020', 'Negatives': 'Casos_Diarios_Estado_Nacional_Negativos_2020', 'Actives': 'Casos_Diarios_Estado_Nacional_Activos_2020', 'Hospitalized':'Casos_Diarios_Estado_Nacional_Hospitalizados_2020', 'Ongoing':'Casos_Diarios_Estado_Nacional_Ambulantes_2020' } # + def plot_single_discrete(index, plot_index, plot_data, state, file, dtype, max_day,trim): plt.close('all') plt.rcParams['figure.figsize'] = (14,6) plt.rcParams['figure.constrained_layout.use']=True plt.ylabel(f'Number of {dtype} Cases', fontsize=18) plt.ylim(0,max_day+(max_day*0.3)) plt.xlim(trim,len(plot_index)) plt.xticks(rotation=70) if type(state) == list: for ind, local in enumerate(state): join_names = join_list_names(state) plt.title(f'Daily {dtype} Cases for Covid19 in {join_names}', fontsize = 20) plt.bar(plot_index, plot_data[ind], label = f'{local}: {max(clean_nan(np.array(plot_data[ind])))}') else: plt.bar(plot_index,plot_data, label = f'Number of {dtype} Cases: {max(clean_nan(np.array(plot_data)))}') plt.title(f'Daily {dtype} Cases for Covid19 in {state}', fontsize = 20) plt.suptitle(f'{calendar.month_name[int(file[-8:-6])]} {file[-6:-4]}', fontsize=22) plt.legend(loc=0,fontsize=20) if type(state) == list: join_names = join_list_names(state) plt.savefig(f'plots/{join_names}/discrete/{dtype}/{file[-12:-8]+"-"+file[-8:-6]+"-"+file[-6:-4]}.jpg') else: plt.savefig(f'plots/{state}/discrete/{dtype}/{file[-12:-8]+"-"+file[-8:-6]+"-"+file[-6:-4]}.jpg') def plot_single_cummulative(index, plot_index, plot_data, state, file, dtype, max_cummulative,trim): if type(plot_data[0]) == list: join_names = join_list_names(state) cummulative_record = [] for local in plot_data: cummulative_record.append(get_cummulative_record(local)) else: cummulative_record = get_cummulative_record(plot_data) plt.close('all') plt.rcParams['figure.figsize'] = (14,6) plt.rcParams['figure.constrained_layout.use']=True if type(state) == list: plt.title(f'Cummulative register of {dtype} Cases by Covid19 in {join_names}', fontsize = 20) else: plt.title(f'Cummulative register of {dtype} Cases by Covid19 in {state}', fontsize = 20) plt.ylabel(f'Number of {dtype} Cases', fontsize=18) plt.ylim(0,max_cummulative+(max_cummulative*0.3)) plt.xticks(rotation=70) plt.xlim(trim,len(plot_index)) if type(state) == list: for ind, local in enumerate(state): plt.plot(plot_index,cummulative_record[ind], label=local+': '+str(max(clean_nan(cummulative_record[ind])))) plt.fill_between(plot_index, cummulative_record[ind], alpha=.2) else: plt.plot(plot_index,cummulative_record, label=dtype+': '+str(max(clean_nan(cummulative_record)))) plt.fill_between(plot_index, cummulative_record, color='b', alpha=.2) plt.suptitle(f'{calendar.month_name[int(file[-8:-6])]} {file[-6:-4]}', fontsize=22) plt.legend(loc=0, fontsize=18) if type(state) == list: plt.savefig(f'plots/{join_names}/cummulative/{dtype}/{file[-12:-8]+"-"+file[-8:-6]+"-"+file[-6:-4]}.jpg') else: plt.savefig(f'plots/{state}/cummulative/{dtype}/{file[-12:-8]+"-"+file[-8:-6]+"-"+file[-6:-4]}.jpg') def plot_multi_discrete(index, plot_index, plot_data, days, dtypes, state, max_day,trim): today = calendar.month_name[int(days[0][-8:-6])]+'-'+ days[0][-6:-4] plt.close('all') plt.rcParams['figure.figsize'] = (14,6) plt.rcParams['figure.constrained_layout.use']=True plt.title(f'Daily {dtypes} Cases for Covid19 in {state}', fontsize = 20) plt.ylabel(f'Number of Cases', fontsize=18) plt.ylim(0,max_day+(max_day*0.3)) plt.xlim(trim,len(plot_index)) plt.xticks(rotation=70) for data in plot_data.keys(): plt.bar(plot_index,plot_data[data], label = f'Number of {data} Cases: {max(clean_nan(np.array(plot_data[data])))}',alpha=0.5) plt.suptitle(f'{today}', fontsize=22) plt.legend(loc=0,fontsize=20) plt.savefig(f'plots/{state}/discrete/{dtypes}/{days[0][-12:-8]}-{days[0][-8:-6]}-{days[0][-6:-4]}.jpg') def plot_multi_cummulative(index, plot_index, plot_data, days, dtypes, state, max_cummulative,trim): today = calendar.month_name[int(days[0][-8:-6])]+'-'+ days[0][-6:-4] cum_data = {key:[] for key in plot_data.keys()} for key in plot_data.keys(): last_valid = int() for i in plot_data[key]: if len(cum_data[key]) == 0: if not np.isnan(i): cum_data[key].append(i) last_valid = i else: cum_data[key].append(np.nan) last_valid = 0 else: if not np.isnan(i): cum_data[key].append(i+last_valid) last_valid += i else: cum_data[key].append(np.nan) plt.close('all') plt.rcParams['figure.figsize'] = (14,6) plt.rcParams['figure.constrained_layout.use']=True plt.title(f'Cummulative register of {dtypes} Cases by Covid19 in {state}', fontsize = 20) plt.ylabel(f'Number of {dtypes} Cases', fontsize=18) plt.ylim(0,max_cummulative+(max_cummulative*0.3)) plt.xticks(rotation=70) plt.xlim(trim,len(plot_index)) for key in cum_data: plt.plot(plot_index,cum_data[key], label=key+': '+str(max(clean_nan(cum_data[key])))) plt.fill_between(plot_index, cum_data[key], alpha=.2) plt.suptitle(f'{calendar.month_name[int(days[0][-8:-6])]} {days[0][-6:-4]}', fontsize=22) plt.legend(loc=0, fontsize=18) plt.savefig(f'plots/{state}/cummulative/{dtypes}/{days[0][-12:-8]}-{days[0][-8:-6]}-{days[0][-6:-4]}.jpg') # + def make_multi_state_plots(index, plot_index, state, file, dtype, max_day, max_cummulative,trim): join_names = join_list_names(state) if not os.path.exists(f'plots/{join_names}/cummulative/{dtype}'): os.makedirs(f'plots/{join_names}/cummulative/{dtype}') if not os.path.exists(f'plots/{join_names}/discrete/{dtype}'): os.makedirs(f'plots/{join_names}/discrete/{dtype}') data = [] for local in state: local_data = pd.read_csv(os.path.join(data_path,file)) local_data = local_data[local_data['nombre'] == local] plot_data = [] for day in index: try: plot_data.append(local_data[str(day)[-11:-9]+str(day)[-15:-11]+str(day)[:4]].values[0]) except: plot_data.append(np.nan) data.append(plot_data) plot_single_cummulative(index, plot_index, data, state, file, dtype, max_cummulative,trim) plot_single_discrete(index, plot_index, data, state, file, dtype, max_day,trim) print(f'Plots of {dtype} for {join_names} ready for day: {calendar.month_name[int(file[-8:-6])]} {file[-6:-4]}') def make_plots(index, plot_index, state, file, dtype, max_day, max_cummulative,trim): if not os.path.exists(f'plots/{state}/cummulative/{dtype}'): os.makedirs(f'plots/{state}/cummulative/{dtype}') if not os.path.exists(f'plots/{state}/discrete/{dtype}'): os.makedirs(f'plots/{state}/discrete/{dtype}') data = pd.read_csv(os.path.join(data_path,file)) data = data[data['nombre'] == state] plot_data = [] for day in index: try: plot_data.append(data[str(day)[-11:-9]+str(day)[-15:-11]+str(day)[:4]].values[0]) except: plot_data.append(np.nan) plot_single_discrete(index, plot_index, plot_data, state, file, dtype, max_day,trim) plot_single_cummulative(index, plot_index, plot_data, state, file, dtype, max_cummulative,trim) print(f'Plots of {dtype} ready for day: {calendar.month_name[int(file[-8:-6])]} {file[-6:-4]}') def make_multi_plots(index, plot_index, state, days, sufx_items, max_day, max_cummulative,trim,dtypes): if not os.path.exists(f'plots/{state}/cummulative/{dtypes}'): os.makedirs(f'plots/{state}/cummulative/{dtypes}') if not os.path.exists(f'plots/{state}/discrete/{dtypes}'): os.makedirs(f'plots/{state}/discrete/{dtypes}') data = [pd.read_csv(os.path.join(data_path,x)) for x in days] data = [x[x['nombre'] == state] for x in data] plot_data = {key:[] for key in sufx_items.keys()} for ind, key in enumerate(plot_data.keys()): for day in index: try: plot_data[key].append(data[ind][str(day)[-11:-9]+str(day)[-15:-11]+str(day)[:4]].values[0]) except: plot_data[key].append(np.nan) plot_multi_discrete(index, plot_index, plot_data, days, dtypes, state, max_day,trim) plot_multi_cummulative(index, plot_index, plot_data, days, dtypes, state, max_cummulative,trim) print(f'Plots of {dtypes} ready for day: {calendar.month_name[int(days[0][-8:-6])]} {days[0][-6:-4]}') # + def get_indexes(sufx, data_dir, state): newest_date = get_max_min(sufx, data_dir, 'max') oldest_date = get_max_min(sufx, data_dir, 'min') newest_date = pd.read_csv(os.path.join( data_path, sufx + str(newest_date)[-14:-12]+str(newest_date)[-11:-9]+'.csv') ) oldest_date = pd.read_csv(os.path.join( data_path, sufx + str(oldest_date)[-14:-12]+str(oldest_date)[-11:-9]+'.csv') ) max_day = get_max_day(state, newest_date) max_cummulative = get_max_cummulative(state, newest_date) index = pd.date_range(start= pd.to_datetime(oldest_date.columns[3], dayfirst=True), end = pd.to_datetime(newest_date.columns[-1], dayfirst=True)) plot_index = [calendar.month_name[int(str(x)[5:-12])] + ' / ' + str(x)[8:-9] for x in index] return index, plot_index, max_day, max_cummulative def get_multi_indexes(files, data_dir, state, sufx_items): newest_date = max([get_max_min(sufx_items[sufx], data_dir, 'max') for sufx in sufx_items.keys()]) oldest_date = min([get_max_min(sufx_items[sufx], data_dir, 'min') for sufx in sufx_items.keys()]) longest_data_check = 0 longest_data = '' max_day = 0 max_cummulative = 0 for dtype in files.keys(): data_check = pd.read_csv(os.path.join( data_path, sufx_items[dtype] + str(newest_date)[-14:-12]+str(newest_date)[-11:-9]+'.csv')) max_discrete = max(data_check[data_check['nombre'] == state].values[0][3:]) sum_cummulative = sum(data_check[data_check['nombre'] == state].values[0][3:]) if len(data_check.columns[3:]) > longest_data_check: longest_data_check == len(data_check.columns[3:]) longest_data = data_check[data_check['nombre'] == state] if max_discrete > max_day: max_day = max_discrete if sum_cummulative > max_cummulative: max_cummulative = sum_cummulative index = pd.date_range(start= pd.to_datetime(longest_data.columns[3], dayfirst=True), end = pd.to_datetime(longest_data.columns[-1], dayfirst=True)) plot_index = [calendar.month_name[int(str(x)[5:-12])] + ' / ' + str(x)[8:-9] for x in index] return index, plot_index, max_day, max_cummulative # - def make_animation(data_dir, dtype, state, trim=0): if type(dtype) == list : sufx_items = {key:sufx_dict[key] for key in dtype} dtypes = [x for x in sufx_items.keys()] dtypes = join_list_names(dtypes) files = {} for key in sufx_items.keys(): files[key] = [file for file in data_dir if file.startswith(sufx_items[key])] if os.path.exists(f'plots/{state}/cummulative/{dtypes}'): plot_files = os.listdir(f'plots/{state}/cummulative/{dtypes}') plot_file_dates = [pd.to_datetime(plot[:-4]) for plot in plot_files] for key in list(files.keys()): filtered_days = [] for item in files[key]: date = pd.to_datetime(item[-12:-8]+'-'+item[-8:-6]+'-'+item[-6:-4]) if date not in plot_file_dates: filtered_days.append(date) if len(filtered_days) == 0: make_mp4(state,dtypes,max(plot_file_dates)) for root, dirs, files in os.walk("plots", topdown=False): for name in files: if '_' in name: os.remove(os.path.join(root,name)) return for key in sufx_items.keys(): files[key] = [sufx_items[key]+str(x)[-14:-12]+str(x)[-11:-9]+'.csv' for x in filtered_days] assert len(set([len(files[key]) for key in files.keys()])) == 1 index, plot_index, max_day, max_cummulative = get_multi_indexes(files, data_dir, state, sufx_items) for instance in range(len(files[dtype[0]])): days = [] for key in files.keys(): days.append(files[key][instance]) make_multi_plots(index, plot_index, state, days, sufx_items, max_day, max_cummulative,trim, dtypes) clear_output(wait=True) plots = [x[:-4] for x in os.listdir(f'plots/{state}/cummulative/{dtypes}')] max_date = str(max(pd.to_datetime(plots)))[:10] make_mp4(state,dtypes,max_date) for root, dirs, files in os.walk("plots", topdown=False): for name in files: if '_' in name: os.remove(os.path.join(root,name)) else: sufx = sufx_dict[dtype] index, plot_index, max_day, max_cummulative = get_indexes(sufx, data_dir, state) files = [file for file in data_dir if file.startswith(sufx)] if type(state)==list: join_names = join_list_names(state) if os.path.exists(f'plots/{join_names}/cummulative/{dtype}'): plot_files = os.listdir(f'plots/{join_names}/cummulative/{dtype}') plot_file_dates = [pd.to_datetime(plot[:-4]) for plot in plot_files] filtered_days = [] for day in files: date = pd.to_datetime(day[-12:-8]+'-'+day[-8:-6]+'-'+day[-6:-4]) if date not in plot_file_dates: filtered_days.append(sufx+str(date)[-14:-12]+str(date)[-11:-9]+'.csv') if len(filtered_days) == 0: make_mp4(state,dtype,max(plot_file_dates)) for root, dirs, files in os.walk("plots", topdown=False): for name in files: if '_' in name: os.remove(os.path.join(root,name)) return files = filtered_days for file in files: make_multi_state_plots(index, plot_index, state, file, dtype, max_day, max_cummulative,trim) clear_output(wait=True) plots = [x[:-4] for x in os.listdir(f'plots/{join_names}/cummulative/{dtype}')] max_date = str(max(pd.to_datetime(plots)))[:10] make_mp4(join_names,dtype,max_date) else: for file in files: make_plots(index, plot_index, state, file, dtype, max_day, max_cummulative,trim) clear_output(wait=True) plots = [x[:-4] for x in os.listdir(f'plots/{state}/cummulative/{dtype}')] max_date = str(max(pd.to_datetime(plots)))[:10] make_mp4(state,dtype,max_date) for root, dirs, files in os.walk("plots", topdown=False): for name in files: if '_' in name: os.remove(os.path.join(root,name)) make_animation(data_dir = data_dir, dtype = ['Ongoing','Hospitalized'], state = 'Nacional', trim=40) make_animation(data_dir = data_dir, dtype = 'Deaths', state = 'Nacional', trim=40) make_animation(data_dir = data_dir, dtype = ['Confirmed','Suspicious'], state = 'Nacional', trim=40) make_animation(data_dir = data_dir, dtype = 'Actives', state = 'Nacional', trim=40) make_animation(data_dir = data_dir, dtype = ['Deaths','Hospitalized'], state = 'Nacional', trim=40) make_animation(data_dir = data_dir, dtype = ['Ongoing','Hospitalized'], state = 'OAXACA', trim=40) make_animation(data_dir = data_dir, dtype = 'Deaths', state = 'OAXACA', trim=40) make_animation(data_dir = data_dir, dtype = ['Confirmed','Suspicious'], state = 'OAXACA', trim=40) make_animation(data_dir = data_dir, dtype = 'Actives', state = 'OAXACA', trim=40) make_animation(data_dir = data_dir, dtype = ['Deaths','Hospitalized'], state = 'OAXACA', trim=40) # + #os.remove("ChangedFile.csv") # - imageio.mimsave(f'results/{state}/{dtype}_discrete_{state}.mp4', dis_images) os.remove("ChangedFile.csv") make_animation(data_dir = data_dir, dtype = ['Hospitalized','Ongoing'], state = 'Oaxaca', trim=40) make_animation(data_dir = data_dir, dtype = 'Actives', state = 'OAXACA', trim=50) make_animation(data_dir = data_dir, dtype ='Actives', state = 'Nacional', trim=10) make_animation(data_dir = data_dir, dtype = ['Confirmed','Actives'], state = 'Nacional', trim=10) make_animation(data_dir = data_dir, dtype = ['Confirmed','Suspicious'], state = 'OAXACA', trim=10) make_animation(data_dir = data_dir, dtype = 'Deaths', state = 'OAXACA', trim=0) d_dir = r'C:\Users\jupol\Desktop\TensorTut\covid\pass' pass_dir = os.listdir(r'C:\Users\jupol\Desktop\TensorTut\covid\pass') dates_pass = {date[4:6]+'-'+date[2:4]+'-'+'20'+date[:2] : date for date in pass_dir} dtypes_states = { 'Defunciones': 'Casos_Diarios_Estado_Nacional_Defunciones_2020', 'Confirmados': 'Casos_Diarios_Estado_Nacional_Confirmados_2020', 'Negativos' : 'Casos_Diarios_Estado_Nacional_Negativos_2020', 'Sospechosos': 'Casos_Diarios_Estado_Nacional_Sospechosos_2020', 'Activos' : 'Casos_Diarios_Estado_Nacional_Activos_2020', 'Hospitalizados': 'Casos_Diarios_Estado_Nacional_Hospitalizados_2020', 'Ambulantes': 'Casos_Diarios_Estado_Nacional_Ambulantes_2020', } dtypes_states = { 'Ambulantes': 'Casos_Diarios_Estado_Nacional_Ambulantes_2020', } def create_data_from_patients_files(actives_window = 14): result = {} for state in cdns_states: result[state] = {} for dtype in dtypes_states.keys(): result[state][dtype] = {} base = pd.read_csv(os.path.join(r'C:\Users\jupol\Desktop\TensorTut\covid\legacyCovidMexico', f'{dtypes_states[dtype]}0518.csv')) state_data = base[base['nombre'] == state] for today in dates_pass.keys(): result[state][dtype][today] = [] if dtype == 'Defunciones': today_result = [] patients = change_df_names(pd.read_csv(os.path.join( d_dir, dates_pass[today] ), encoding='ANSI')) if state != 'Nacional': patients = patients[patients['treated_at'] == inverse_dict_for_name_states[state] ] patients = patients[patients['day_of_death'] != '9999-99-99' ] patients = patients[patients['result'] == 1] patients = pd.to_datetime(patients['day_of_death'].copy()) if len(patients) == 0: min_date = pd.to_datetime(state_data.columns[3],dayfirst=True) else: min_date = min(min(patients),pd.to_datetime(state_data.columns[3],dayfirst=True)) local_index = pd.date_range(start = min_date, end = pd.to_datetime(today, dayfirst=True)) for day in pd.to_datetime(local_index): try: today_result.append(list(patients).count(day)) except: today_result.append(0) result[state][dtype][today] = today_result result[state][dtype][today+'_localindex'] = local_index print(f' Done for: {state}-{dtype}-{today} ') clear_output(wait=True) if dtype == 'Confirmados': today_result = [] patients = change_df_names(pd.read_csv(os.path.join( d_dir, dates_pass[today] ), encoding='ANSI')) if state != 'Nacional': patients = patients[patients['treated_at'] == inverse_dict_for_name_states[state] ] patients = patients[patients['result'] == 1 ] patients = pd.to_datetime(patients['onset_symptoms'].copy()) if len(patients) == 0: min_date = pd.to_datetime(state_data.columns[3],dayfirst=True) else: min_date = min(min(patients),pd.to_datetime(state_data.columns[3],dayfirst=True)) local_index = pd.date_range(start = min_date, end = pd.to_datetime(today, dayfirst=True)) for day in pd.to_datetime(local_index): try: today_result.append(list(patients).count(day)) except: today_result.append(0) result[state][dtype][today] = today_result result[state][dtype][today+'_localindex'] = local_index print(f' Done for: {state}-{dtype}-{today} ') clear_output(wait=True) if dtype == 'Negativos': today_result = [] patients = change_df_names(pd.read_csv(os.path.join( d_dir, dates_pass[today] ), encoding='ANSI')) if state != 'Nacional': patients = patients[patients['treated_at'] == inverse_dict_for_name_states[state] ] patients = patients[patients['result'] == 2 ] patients = pd.to_datetime(patients['onset_symptoms'].copy()) if len(patients) == 0: min_date = pd.to_datetime(state_data.columns[3],dayfirst=True) else: min_date = min(min(patients),pd.to_datetime(state_data.columns[3],dayfirst=True)) local_index = pd.date_range(start = min_date, end = pd.to_datetime(today, dayfirst=True)) for day in pd.to_datetime(local_index): try: today_result.append(list(patients).count(day)) except: today_result.append(0) result[state][dtype][today] = today_result result[state][dtype][today+'_localindex'] = local_index print(f' Done for: {state}-{dtype}-{today} ') clear_output(wait=True) if dtype == 'Sospechosos': today_result = [] patients = change_df_names(pd.read_csv(os.path.join( d_dir, dates_pass[today] ), encoding='ANSI')) if state != 'Nacional': patients = patients[patients['treated_at'] == inverse_dict_for_name_states[state] ] patients = patients[patients['result'] == 3 ] patients = pd.to_datetime(patients['onset_symptoms'].copy()) if len(patients) == 0: min_date = pd.to_datetime(state_data.columns[3],dayfirst=True) else: min_date = min(min(patients),pd.to_datetime(state_data.columns[3],dayfirst=True)) local_index = pd.date_range(start = min_date, end = pd.to_datetime(today, dayfirst=True)) for day in pd.to_datetime(local_index): try: today_result.append(list(patients).count(day)) except: today_result.append(0) result[state][dtype][today] = today_result result[state][dtype][today+'_localindex'] = local_index print(f' Done for: {state}-{dtype}-{today} ') clear_output(wait=True) if dtype == 'Activos': today_result = [] patients = change_df_names(pd.read_csv(os.path.join( d_dir, dates_pass[today] ), encoding='ANSI')) if state != 'Nacional': patients = patients[patients['treated_at'] == inverse_dict_for_name_states[state] ] patients = patients[patients['result'] == 1 ] patients['onset_symptoms'] = pd.to_datetime(patients['onset_symptoms']) if len(patients) == 0: min_date = pd.to_datetime(state_data.columns[3],dayfirst=True) else: min_date = min(min(patients['onset_symptoms']),pd.to_datetime(state_data.columns[3],dayfirst=True)) local_index = pd.date_range(start = min_date, end = pd.to_datetime(today, dayfirst=True)) active_patients = {key:0 for key in local_index} for ind, day_active in enumerate(patients['onset_symptoms']): for _ in range(actives_window): if day_active not in local_index: break elif patients['day_of_death'].iloc[ind] != '9999-99-99' and day_active > pd.to_datetime(patients['day_of_death'].iloc[ind]): break else: active_patients[day_active] +=1 day_active = day_active + timedelta(days=1) for date in local_index: today_result.append(active_patients[date]) result[state][dtype][today] = today_result result[state][dtype][today+'_localindex'] = local_index print(f' Done for: {state}-{dtype}-{today} ') clear_output(wait=True) if dtype == 'Hospitalizados': today_result = [] patients = change_df_names(pd.read_csv(os.path.join( d_dir, dates_pass[today] ), encoding='ANSI')) if state != 'Nacional': patients = patients[patients['treated_at'] == inverse_dict_for_name_states[state] ] patients = patients[patients['result'] == 1 ] patients = patients[patients['patient_type'] == 2 ] patients = pd.to_datetime(patients['admission_date']) if len(patients) == 0: min_date = pd.to_datetime(state_data.columns[3],dayfirst=True) else: min_date = min(min(patients),pd.to_datetime(state_data.columns[3],dayfirst=True)) local_index = pd.date_range(start = min_date, end = pd.to_datetime(today, dayfirst=True)) for day in pd.to_datetime(local_index): try: today_result.append(list(patients).count(day)) except: today_result.append(0) result[state][dtype][today] = today_result result[state][dtype][today+'_localindex'] = local_index print(f' Done for: {state}-{dtype}-{today} ') clear_output(wait=True) if dtype == 'Ambulantes': today_result = [] patients = change_df_names(pd.read_csv(os.path.join( d_dir, dates_pass[today] ), encoding='ANSI')) if state != 'Nacional': patients = patients[patients['treated_at'] == inverse_dict_for_name_states[state] ] patients = patients[patients['result'] == 1 ] patients = patients[patients['patient_type'] == 1 ] patients = pd.to_datetime(patients['admission_date']) if len(patients) == 0: min_date = pd.to_datetime(state_data.columns[3],dayfirst=True) else: min_date = min(min(patients),pd.to_datetime(state_data.columns[3],dayfirst=True)) local_index = pd.date_range(start = min_date, end = pd.to_datetime(today, dayfirst=True)) for day in pd.to_datetime(local_index): try: today_result.append(list(patients).count(day)) except: today_result.append(0) result[state][dtype][today] = today_result result[state][dtype][today+'_localindex'] = local_index print(f' Done for: {state}-{dtype}-{today} ') clear_output(wait=True) joblib.dump(result,'result_hosp.pkl') for dtype in dtypes_states.keys(): for date in dates_pass.keys(): base = pd.read_csv(os.path.join(r'C:\Users\jupol\Desktop\TensorTut\covid\legacyCovidMexico', f'{dtypes_states[dtype]}0518.csv')) data = {} for state in result.keys(): data[state] = {} data[state] = result[state][dtype][date] max_len = max([len(data[x]) for x in result.keys()]) for state in result.keys(): if len(data[state]) != max_len: data[state] = [0]*( max_len - len(data[state]) ) +data[state] today = pd.to_datetime(date, dayfirst=True) data = pd.DataFrame.from_dict(data).T index = pd.date_range(start=(today - timedelta(days=len(data.columns)-1) ), end = today) index=[str(date)[8:10]+'-'+str(date)[5:7]+'-'+str(date)[:4] for date in index] data.columns = index data['nombre'] = data.index data['poblacion'] = base['poblacion'].values.copy() data['cve_ent'] = base['cve_ent'].values.copy() order = ['cve_ent','poblacion', 'nombre']+index data = data[order] data = data.reset_index(drop = True) data.to_csv(f'faltantes/{dtypes_states[dtype]}{str(today)[5:7]+str(today)[8:10]}.csv', encoding='ANSI',index=False) create_data_from_patients_files() a = pd.read_csv(r'C:\Users\jupol\Desktop\TensorTut\covid\legacyCovidMexico\200615COVID19MEXICO.csv',encoding='ANSI') a= change_df_names(a) _hosp = a[a['patient_type']==2] _nhosp = a[a['patient_type']==1] _hosp = _hosp[_hosp['result'] == 1] _nhosp =_nhosp[_nhosp['result'] == 1] hosp = pd.to_datetime(_hosp['admission_date']) nhosp = pd.to_datetime(_nhosp['admission_date']) max_hosp = max(hosp) min_hosp = min(hosp) max_nhosp = max(nhosp) min_nhosp = min(nhosp) hosp_index = pd.date_range(start=min_hosp, end= max_hosp) nhosp_index = pd.date_range(start=min_nhosp, end= max_nhosp) patient_data_keys('patient_type') # + hos = [] nos = [] for date in pd.to_datetime(local_index): print(date) clear_output(wait=True) hos.append(list(hosp).count(date)) for date in pd.to_datetime(local_index): print('second',date) clear_output(wait=True) nos.append(list(nhosp).count(date)) # - plt.close('all') plt.rcParams['figure.figsize'] = (14,6) plt.rcParams['figure.constrained_layout.use']=True plt.scatter(local_index,hos, label ='sí') plt.scatter(local_index,nos, label = 'no') plt.xticks(rotation=90) plt.legend() plt.show() b = pd.read_csv(r'C:\Users\jupol\Desktop\TensorTut\covid\legacyCovidMexico\Casos_Diarios_Estado_Nacional_Hospitalizados_20200615.csv') a = pd.read_csv(r'C:\Users\jupol\Desktop\TensorTut\covid\mex animations\faltantes\Casos_Diarios_Estado_Nacional_ambulantes_20200615.csv') a = a[a['nombre']=='Nacional'] b = b[b['nombre']=='Nacional'] plt.plot(a.values[0][3:]) plt.plot(b.values[0][3:]) b = b[b['nombre']=='Nacional'] b.values[0][3:] plt.scatter(b.columns[3:],b.values[0][3:]) plt.xticks(rotation=90) plt.close('all') plt.rcParams['figure.figsize'] = (14,6) plt.rcParams['figure.constrained_layout.use']=True plt.plot([str(x) for x in nhosp_index],nos) # + patients = pd.to_datetime(patients['day_of_death'].copy()) if len(patients) == 0: min_date = pd.to_datetime(state_data.columns[3],dayfirst=True) else: min_date = min(min(patients),pd.to_datetime(state_data.columns[3],dayfirst=True)) local_index = pd.date_range(start = min_date, end = pd.to_datetime(today, dayfirst=True)) for day in pd.to_datetime(local_index): try: today_result.append(list(patients).count(day)) except: today_result.append(0) # - plt.plot([str(x) for x in d_hosp.keys()],list(d_hosp.values())) d_hosp patient_data_keys('patient_type') a.keys()
Animations for Mexico Covid19 official Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Transfer Learning # # Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using [VGGNet](https://arxiv.org/pdf/1409.1556.pdf) trained on the [ImageNet dataset](http://www.image-net.org/) as a feature extractor. Below is a diagram of the VGGNet architecture. # # <img src="assets/cnnarchitecture.jpg" width=700px> # # VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes. # # You can read more about transfer learning from [the CS231n course notes](http://cs231n.github.io/transfer-learning/#tf). # # ## Pretrained VGGNet # # We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. # # This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. # + from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm vgg_dir = 'tensorflow_vgg/' # Make sure vgg exists if not isdir(vgg_dir): raise Exception("VGG directory doesn't exist!") class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(vgg_dir + "vgg16.npy"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar: urlretrieve( 'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy', vgg_dir + 'vgg16.npy', pbar.hook) else: print("Parameter file already exists!") # - # ## Flower power # # Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the [TensorFlow inception tutorial](https://www.tensorflow.org/tutorials/image_retraining). # + import tarfile dataset_folder_path = 'flower_photos' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile('flower_photos.tar.gz'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar: urlretrieve( 'http://download.tensorflow.org/example_images/flower_photos.tgz', 'flower_photos.tar.gz', pbar.hook) if not isdir(dataset_folder_path): with tarfile.open('flower_photos.tar.gz') as tar: tar.extractall() tar.close() # - # ## ConvNet Codes # # Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier. # # Here we're using the `vgg16` module from `tensorflow_vgg`. The network takes images of size $244 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from [the source code](https://github.com/machrisaa/tensorflow-vgg/blob/master/vgg16.py): # # ``` # self.conv1_1 = self.conv_layer(bgr, "conv1_1") # self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2") # self.pool1 = self.max_pool(self.conv1_2, 'pool1') # # self.conv2_1 = self.conv_layer(self.pool1, "conv2_1") # self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2") # self.pool2 = self.max_pool(self.conv2_2, 'pool2') # # self.conv3_1 = self.conv_layer(self.pool2, "conv3_1") # self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2") # self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3") # self.pool3 = self.max_pool(self.conv3_3, 'pool3') # # self.conv4_1 = self.conv_layer(self.pool3, "conv4_1") # self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2") # self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3") # self.pool4 = self.max_pool(self.conv4_3, 'pool4') # # self.conv5_1 = self.conv_layer(self.pool4, "conv5_1") # self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2") # self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3") # self.pool5 = self.max_pool(self.conv5_3, 'pool5') # # self.fc6 = self.fc_layer(self.pool5, "fc6") # self.relu6 = tf.nn.relu(self.fc6) # ``` # # So what we want are the values of the first fully connected layer, after being ReLUd (`self.relu6`). To build the network, we use # # ``` # with tf.Session() as sess: # vgg = vgg16.Vgg16() # input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) # with tf.name_scope("content_vgg"): # vgg.build(input_) # ``` # # This creates the `vgg` object, then builds the graph with `vgg.build(input_)`. Then to get the values from the layer, # # ``` # feed_dict = {input_: images} # codes = sess.run(vgg.relu6, feed_dict=feed_dict) # ``` # + import os import numpy as np import tensorflow as tf from tensorflow_vgg import vgg16 from tensorflow_vgg import utils # - data_dir = 'flower_photos/' contents = os.listdir(data_dir) classes = [each for each in contents if os.path.isdir(data_dir + each)] # Below I'm running images through the VGG network in batches. # + # Set the batch size higher if you can fit in in your GPU memory batch_size = 10 codes_list = [] labels = [] batch = [] codes = None with tf.Session() as sess: vgg = vgg16.Vgg16() input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) with tf.name_scope("content_vgg"): vgg.build(input_) for each in classes: print("Starting {} images".format(each)) class_path = data_dir + each files = os.listdir(class_path) for ii, file in enumerate(files, 1): # Add images to the current batch # utils.load_image crops the input images for us, from the center img = utils.load_image(os.path.join(class_path, file)) batch.append(img.reshape((1, 224, 224, 3))) labels.append(each) # Running the batch through the network to get the codes if ii % batch_size == 0 or ii == len(files): images = np.concatenate(batch) feed_dict = {input_: images} codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict) # Here I'm building an array of the codes if codes is None: codes = codes_batch else: codes = np.concatenate((codes, codes_batch)) # Reset to start building the next batch batch = [] print('{} images processed'.format(ii)) # + # write codes to file with open('codes', 'w') as f: codes.tofile(f) # write labels to file import csv with open('labels', 'w') as f: writer = csv.writer(f, delimiter='\n') writer.writerow(labels) # - # ## Building the Classifier # # Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work. # + # read codes and labels from file import csv with open('labels') as f: reader = csv.reader(f, delimiter='\n') labels = np.array([each for each in reader if len(each) > 0]).squeeze() with open('codes') as f: codes = np.fromfile(f, dtype=np.float32) codes = codes.reshape((len(labels), -1)) # - # ### Data prep # # As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels! # # > **Exercise:** From scikit-learn, use [LabelBinarizer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelBinarizer.html) to create one-hot encoded vectors from the labels. # + from sklearn.preprocessing import LabelBinarizer lb = LabelBinarizer() lb.fit(labels) labels_vecs = lb.transform(labels) # - # Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use [`StratifiedShuffleSplit`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) from scikit-learn. # # You can create the splitter like so: # ``` # ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2) # ``` # Then split the data with # ``` # splitter = ss.split(x, y) # ``` # # `ss.split` returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use `next(splitter)` to get the indices. Be sure to read the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedShuffleSplit.html) and the [user guide](http://scikit-learn.org/stable/modules/cross_validation.html#random-permutations-cross-validation-a-k-a-shuffle-split). # # > **Exercise:** Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets. # + from sklearn.model_selection import StratifiedShuffleSplit ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2) train_idx, val_idx = next(ss.split(codes, labels_vecs)) half_val_len = int(len(val_idx)/2) val_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:] train_x, train_y = codes[train_idx], labels_vecs[train_idx] val_x, val_y = codes[val_idx], labels_vecs[val_idx] test_x, test_y = codes[test_idx], labels_vecs[test_idx] # - print("Train shapes (x, y):", train_x.shape, train_y.shape) print("Validation shapes (x, y):", val_x.shape, val_y.shape) print("Test shapes (x, y):", test_x.shape, test_y.shape) # If you did it right, you should see these sizes for the training sets: # # ``` # Train shapes (x, y): (2936, 4096) (2936, 5) # Validation shapes (x, y): (367, 4096) (367, 5) # Test shapes (x, y): (367, 4096) (367, 5) # ``` # ### Classifier layers # # Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network. # # > **Exercise:** With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost. # + inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]]) labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]]) fc = tf.contrib.layers.fully_connected(inputs_, 256) logits = tf.contrib.layers.fully_connected(fc, labels_vecs.shape[1], activation_fn=None) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels_, logits=logits) cost = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer().minimize(cost) predicted = tf.nn.softmax(logits) correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # - # ### Batches! # # Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data. def get_batches(x, y, n_batches=10): """ Return a generator that yields batches from arrays x and y. """ batch_size = len(x)//n_batches for ii in range(0, n_batches*batch_size, batch_size): # If we're not on the last batch, grab data with size batch_size if ii != (n_batches-1)*batch_size: X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size] # On the last batch, grab the rest of the data else: X, Y = x[ii:], y[ii:] # I love generators yield X, Y # ### Training # # Here, we'll train the network. # # > **Exercise:** So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. epochs = 10 iteration = 0 saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for x, y in get_batches(train_x, train_y): feed = {inputs_: x, labels_: y} loss, _ = sess.run([cost, optimizer], feed_dict=feed) print("Epoch: {}/{}".format(e+1, epochs), "Iteration: {}".format(iteration), "Training loss: {:.5f}".format(loss)) iteration += 1 if iteration % 5 == 0: feed = {inputs_: val_x, labels_: val_y} val_acc = sess.run(accuracy, feed_dict=feed) print("Epoch: {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Validation Acc: {:.4f}".format(val_acc)) saver.save(sess, "checkpoints/flowers.ckpt") # ### Testing # # Below you see the test accuracy. You can also see the predictions returned for images. with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) feed = {inputs_: test_x, labels_: test_y} test_acc = sess.run(accuracy, feed_dict=feed) print("Test accuracy: {:.4f}".format(test_acc)) # + # %matplotlib inline import matplotlib.pyplot as plt from scipy.ndimage import imread # - # Below, feel free to choose images and see how the trained classifier predicts the flowers in them. test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg' test_img = imread(test_img_path) plt.imshow(test_img) # Run this cell if you don't have a vgg graph built with tf.Session() as sess: input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) vgg = vgg16.Vgg16() vgg.build(input_) # + with tf.Session() as sess: img = utils.load_image(test_img_path) img = img.reshape((1, 224, 224, 3)) feed_dict = {input_: img} code = sess.run(vgg.relu6, feed_dict=feed_dict) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) feed = {inputs_: code} prediction = sess.run(predicted, feed_dict=feed).squeeze() # - plt.imshow(test_img) plt.barh(np.arange(5), prediction) _ = plt.yticks(np.arange(5), lb.classes_)
Transfer_Learning_Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # in notebook '42. Lipid metabolism-I' we tackled the production of the first few fatty acids that are needed. To prevent that notebook from becoming too large, here I will include the biosynthesis of the last fatty acid chains that our model needs to make. # # Here I will ensure the Iso FAs can be made, both even and uneven. # import cameo import pandas as pd import cobra.io from cobra import Reaction, Metabolite model = cobra.io.read_sbml_model('../model/p-thermo.xml') model_e_coli = cameo.load_model('iML1515') model_b_sub = cameo.load_model('iYO844') # ### Iso-Branched, even chained fatty acids # In this category, we need to ensure the production of iso-C15:0 and isoC17:0 fatty acids (i.e. the main chain is even length). In general, the even chain iso fatty acids are made from leucine that is converted into a primer molecule that can then go through the cycles of elongation until the final chain length is achieved. # # The leucine is converted into 3-Methylbutanoyl-CoA (aka Isovaleryl-CoA, Kegg ID C02939). Checking in the model, we already have this metaboltite: ivcoa_c. I will check it is properly converted from leucine first. Leucine should be converted into 4mop_c and then into the ivcoa_c metabolite. The conversion of 4mop_c into ivcoa_c is correctly present, the conversion of leucine into 4mop_c is also there. So now I need to add a reaction that converts ivcoa_c into ivACP, which can then enter the cycles of elongation. # #add ivACP_c meta,bolite model.add_metabolites(Metabolite(id='ivACP_c', name = '3-Methylbutanoyl-ACP', compartment= 'c', formula = 'C16H29N2O8PRS', charge = -1)) model.metabolites.ivACP_c.annotation['sbo'] = 'SBO:0000176' # + #then add conversion of ivcoa into ivACP # - model.add_reaction(Reaction(id='IVCOATA', name = '3-methylbutyryl-CoA:[acyl-carrier-protein] transferase')) model.reactions.IVCOATA.annotation['sbo'] = 'SBO:0000176' model.reactions.IVCOATA.add_metabolites({ model.metabolites.ACP_c:-1, model.metabolites.ivcoa_c:-1, model.metabolites.coa_c:1, model.metabolites.ivACP_c:1, model.metabolites.h_c:1 }) model.reactions.IVCOATA.bounds = (-1000,1000) cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Now that the primer molecule is ready, we can start the elongation process with malonyl-ACP to iso-C6:0. #add condenstation of s3bmcoa-ACP with malonyl ACP to 3-oxo3mnoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o3mhACP_c', name = '3-oxo3methylhexanoyl-ACP', compartment = 'c', charge =-1, formula = 'C18H31N2O9PRS')) model.metabolites.get_by_id('3o3mhACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3O3MAS70', name = '3-oxo3methylhexanoyl-[acyl-carrier-protein] synthase (iso-C7:0)')) model.reactions.get_by_id('3O3MAS70').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAS70').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAS70').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.ivACP_c:-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o3mhACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-3-methylheptanoyl-[acp] met model.add_metabolites(Metabolite(id='3h3mhACP_c', name = '3-Hydroxy-3-methylhexanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C18H33N2O9PRS')) model.metabolites.get_by_id('3h3mhACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3O3MAR70', name = ' 3-oxo-3-methyl-acyl-[acyl-carrier-protein] reductase (iso-C7:0)')) model.reactions.get_by_id('3O3MAR70').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAR70').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAR70').add_metabolites({ model.metabolites.get_by_id('3o3mhACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h3mhACP_c'):1, model.metabolites.h_c:-1 }) #next add the t3mh2eACP_c metabolite model.add_metabolites(Metabolite(id='t3mh2eACP_c', name = 'Trans-3-methylhex-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C18H31N2O8PRS')) model.metabolites.get_by_id('t3mh2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3H3MAD70', name = '3-hydroxy-3-methylacyl-[acyl-carrier-protein] dehydratase (iso-C7:0)')) model.reactions.get_by_id('3H3MAD70').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3H3MAD70').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3H3MAD70').add_metabolites({ model.metabolites.get_by_id('3h3mhACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t3mh2eACP_c:1 }) #last step: convert to 3-methylhexanoyl-ACP which can start the next cycle again #add both with NADH and NADPH. #first add the 3-methylhexanoyl-ACP metabolite model.add_metabolites(Metabolite(id='3mhexACP_c', name = '3-methyl-hexanoyl-ACP(iso-C7:0ACP)', compartment = 'c', charge = -1, formula = 'C18H33N2O8PRS' )) model.metabolites.get_by_id('3mhexACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH: E3MAR70x model.add_reaction(Reaction(id='E3MAR70x', name = '3-Methyl-Hexanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.E3MAR70x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR70x.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR70x.add_metabolites({ model.metabolites.t3mh2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('3mhexACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH: E3MAR70y model.add_reaction(Reaction(id='E3MAR70y', name = '3-Methyl-Hexanoyl-[acp]:NADp+ trans-2-oxidoreductase')) model.reactions.E3MAR70y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR70y.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR70y.add_metabolites({ model.metabolites.t3mh2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3mhexACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished to iso-C7:0. Now onto iso-C9:0. #add condenstation of 3mhexACP with malonyl ACP to 3-oxo-3methyl-octanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o3moACP_c', name = '3-oxo3methyloctanoyl-ACP', compartment = 'c', charge =-1, formula = 'C20H35N2O9PRS')) model.metabolites.get_by_id('3o3moACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3O3MAS90', name = '3-oxo3methyloctanoyl-[acyl-carrier-protein] synthase (iso-C9:0)')) model.reactions.get_by_id('3O3MAS90').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAS90').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAS90').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('3mhexACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o3moACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-3-methyloctanoyl-[acp] met model.add_metabolites(Metabolite(id='3h3moACP_c', name = '3-Hydroxy-3-methyloctanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C20H37N2O9PRS')) model.metabolites.get_by_id('3h3moACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3O3MAR90', name = ' 3-oxo-3-methyl-acyl-[acyl-carrier-protein] reductase (iso-C9:0)')) model.reactions.get_by_id('3O3MAR90').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAR90').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAR90').add_metabolites({ model.metabolites.get_by_id('3o3moACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h3moACP_c'):1, model.metabolites.h_c:-1 }) #next add the t3mh2eACP_c metabolite model.add_metabolites(Metabolite(id='t3mo2eACP_c', name = 'Trans-3-methyloct-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C20H35N2O8PRS')) model.metabolites.get_by_id('t3mo2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3H3MAD90', name = '3-hydroxy-3-methylacyl-[acyl-carrier-protein] dehydratase (iso-C9:0)')) model.reactions.get_by_id('3H3MAD90').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3H3MAD90').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3H3MAD90').add_metabolites({ model.metabolites.get_by_id('3h3moACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t3mo2eACP_c:1 }) #last step: convert to 3-methyloctanoyl-ACP which can start the next cycle again #add both with NADH and NADPH. #first add the 3-methyloctanoyl-ACP metabolite model.add_metabolites(Metabolite(id='3moctACP_c', name = '3-methyl-octanoyl-ACP(iso-C9:0ACP)', compartment = 'c', charge = -1, formula = 'C20H37N2O8PRS' )) model.metabolites.get_by_id('3moctACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH: EMAR90x model.add_reaction(Reaction(id='E3MAR90x', name = '3-Methyl-Octanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.E3MAR90x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR90x.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR90x.add_metabolites({ model.metabolites.t3mo2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('3moctACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH: E3MAR90y model.add_reaction(Reaction(id='E3MAR90y', name = '3-Methyl-Octanoyl-[acp]:NADp+ trans-2-oxidoreductase')) model.reactions.E3MAR90y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR90y.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR90y.add_metabolites({ model.metabolites.t3mo2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3moctACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished to iso-C9:0, now onto iso-C11:0. #add condenstation of 3moctACP with malonyl ACP to 3-oxo-3-methyl-decanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o3mdACP_c', name = '3-oxo3methyldecanoyl-ACP', compartment = 'c', charge =-1, formula = 'C22H39N2O9PRS')) model.metabolites.get_by_id('3o3mdACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3O3MAS110', name = '3-oxo3methyldecanoyl-[acyl-carrier-protein] synthase (iso-C11:0)')) model.reactions.get_by_id('3O3MAS110').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAS110').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAS110').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('3moctACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o3mdACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-3-methyldecanoyl-[acp] met model.add_metabolites(Metabolite(id='3h3mdACP_c', name = '3-Hydroxy-3-methyldecanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C22H41N2O9PRS')) model.metabolites.get_by_id('3h3mdACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3O3MAR110', name = ' 3-oxo-3-methyl-acyl-[acyl-carrier-protein] reductase (iso-C11:0)')) model.reactions.get_by_id('3O3MAR110').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAR110').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAR110').add_metabolites({ model.metabolites.get_by_id('3o3mdACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h3mdACP_c'):1, model.metabolites.h_c:-1 }) #next add the t3md2eACP_c metabolite model.add_metabolites(Metabolite(id='t3md2eACP_c', name = 'Trans-3-methyldec-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C22H39N2O8PRS')) model.metabolites.get_by_id('t3md2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3H3MAD110', name = '3-hydroxy-3-methylacyl-[acyl-carrier-protein] dehydratase (iso-C11:0)')) model.reactions.get_by_id('3H3MAD110').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3H3MAD110').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3H3MAD110').add_metabolites({ model.metabolites.get_by_id('3h3mdACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t3md2eACP_c:1 }) #last step: convert to 3-methyldecanoyl-ACP which can start the next cycle again #add both with NADH and NADPH. #first add the 3-methyldecanoyl-ACP metabolite model.add_metabolites(Metabolite(id='3mdecACP_c', name = '3-methyl-decanoyl-ACP(iso-C11:0ACP)', compartment = 'c', charge = -1, formula = 'C22H41N2O8PRS' )) model.metabolites.get_by_id('3mdecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH: E3MAR90x model.add_reaction(Reaction(id='E3MAR110x', name = '3-Methyl-Decanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.E3MAR110x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR110x.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR110x.add_metabolites({ model.metabolites.t3md2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('3mdecACP_c'):1, model.metabolites.h_c:-1 }) model.reactions.E3MAR110x.check_mass_balance() #add reaction with NADH: E3MAR110y model.add_reaction(Reaction(id='E3MAR110y', name = '3-Methyl-Decanoyl-[acp]:NADp+ trans-2-oxidoreductase')) model.reactions.E3MAR110y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR110y.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR110y.add_metabolites({ model.metabolites.t3md2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3mdecACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished to isoC11:0. Now onto isoC13:0. #add condenstation of 3mdecACP with malonyl ACP to 3-oxo-3-methyl-dodecanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o3mddACP_c', name = '3-oxo3methyldodecanoyl-ACP', compartment = 'c', charge =-1, formula = 'C24H43N2O9PRS')) model.metabolites.get_by_id('3o3mddACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3O3MAS130', name = '3-oxo3methyldodecanoyl-[acyl-carrier-protein] synthase (iso-C13:0)')) model.reactions.get_by_id('3O3MAS130').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAS130').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAS130').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('3mdecACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o3mddACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-3-methyldodecanoyl-[acp] met model.add_metabolites(Metabolite(id='3h3mddACP_c', name = '3-Hydroxy-3-methyldodecanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C24H45N2O9PRS')) model.metabolites.get_by_id('3h3mddACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3O3MAR130', name = ' 3-oxo-3-methyl-acyl-[acyl-carrier-protein] reductase (iso-C13:0)')) model.reactions.get_by_id('3O3MAR130').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAR130').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAR130').add_metabolites({ model.metabolites.get_by_id('3o3mddACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h3mddACP_c'):1, model.metabolites.h_c:-1 }) #next add the t3md2eACP_c metabolite model.add_metabolites(Metabolite(id='t3mdd2eACP_c', name = 'Trans-3-methyldodec-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C24H43N2O8PRS')) model.metabolites.get_by_id('t3mdd2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3H3MAD130', name = '3-hydroxy-3-methylacyl-[acyl-carrier-protein] dehydratase (iso-C13:0)')) model.reactions.get_by_id('3H3MAD130').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3H3MAD130').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3H3MAD130').add_metabolites({ model.metabolites.get_by_id('3h3mddACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t3mdd2eACP_c:1 }) #last step: convert to 3-methyldodecanoyl-ACP which can start the next cycle again #add both with NADH and NADPH. #first add the 3-methyldodecanoyl-ACP metabolite model.add_metabolites(Metabolite(id='3mddecACP_c', name = '3-methyl-dodecanoyl-ACP(iso-C13:0ACP)', compartment = 'c', charge = -1, formula = 'C24H45N2O8PRS' )) model.metabolites.get_by_id('3mddecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH: E3MAR1300x model.add_reaction(Reaction(id='E3MAR130x', name = '3-Methyl-Dodecanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.E3MAR130x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR130x.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR130x.add_metabolites({ model.metabolites.t3mdd2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('3mddecACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH: E3MAR130y model.add_reaction(Reaction(id='E3MAR130y', name = '3-Methyl-Dodecanoyl-[acp]:NADp+ trans-2-oxidoreductase')) model.reactions.E3MAR130y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR130y.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR130y.add_metabolites({ model.metabolites.t3mdd2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3mddecACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished up to iso-C13:0. Now onto isoC15:0. #add condenstation of 3mddecACP with malonyl ACP to 3-oxo-3-methyl-tetradecanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o3mtdACP_c', name = '3-oxo3methyltetradecanoyl-ACP', compartment = 'c', charge =-1, formula = 'C26H47N2O9PRS')) model.metabolites.get_by_id('3o3mtdACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3O3MAS150', name = '3-oxo3methyltetradecanoyl-[acyl-carrier-protein] synthase (iso-C15:0)')) model.reactions.get_by_id('3O3MAS150').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAS150').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAS150').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('3mddecACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o3mtdACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-3-methyltetradecanoyl-[acp] met model.add_metabolites(Metabolite(id='3h3mtdACP_c', name = '3-Hydroxy-3-methyltetradecanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C26H49N2O9PRS')) model.metabolites.get_by_id('3h3mtdACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3O3MAR150', name = ' 3-oxo-3-methyl-acyl-[acyl-carrier-protein] reductase (iso-C15:0)')) model.reactions.get_by_id('3O3MAR150').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAR150').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAR150').add_metabolites({ model.metabolites.get_by_id('3o3mtdACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h3mtdACP_c'):1, model.metabolites.h_c:-1 }) #next add the t3mtd2eACP_c metabolite model.add_metabolites(Metabolite(id='t3mtd2eACP_c', name = 'Trans-3-methyltetradec-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C26H47N2O8PRS')) model.metabolites.get_by_id('t3mtd2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3H3MAD150', name = '3-hydroxy-3-methylacyl-[acyl-carrier-protein] dehydratase (iso-C15:0)')) model.reactions.get_by_id('3H3MAD150').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3H3MAD150').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3H3MAD150').add_metabolites({ model.metabolites.get_by_id('3h3mtdACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t3mtd2eACP_c:1 }) #last step: convert to 3-methyltetradecanoyl-ACP which can start the next cycle again #add both with NADH and NADPH. #first add the 3-methyltetradecanoyl-ACP metabolite model.add_metabolites(Metabolite(id='3mtdecACP_c', name = '3-methyl-tetradecanoyl-ACP(iso-C15:0ACP)', compartment = 'c', charge = -1, formula = 'C26H49N2O8PRS' )) model.metabolites.get_by_id('3mtdecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH: E3MAR1300x model.add_reaction(Reaction(id='E3MAR150x', name = '3-Methyl-Tetradecanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.E3MAR150x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR150x.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR150x.add_metabolites({ model.metabolites.t3mtd2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('3mtdecACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH: E3MAR150y model.add_reaction(Reaction(id='E3MAR150y', name = '3-Methyl-Tetradecanoyl-[acp]:NADp+ trans-2-oxidoreductase')) model.reactions.E3MAR150y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR150y.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR150y.add_metabolites({ model.metabolites.t3mtd2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3mtdecACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished to iso-C15:0. Now onto the last, iso-C17:0. #add condenstation of 3mtdecACP with malonyl ACP to 3-oxo-3-methyl-hexadecanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o3mhdACP_c', name = '3-oxo3methylhexadecanoyl-ACP', compartment = 'c', charge =-1, formula = 'C28H51N2O9PRS')) model.metabolites.get_by_id('3o3mhdACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3O3MAS170', name = '3-oxo3methylhexadecanoyl-[acyl-carrier-protein] synthase (iso-C17:0)')) model.reactions.get_by_id('3O3MAS170').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAS170').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAS170').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('3mtdecACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o3mhdACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-3-methylhexadecanoyl-[acp] met model.add_metabolites(Metabolite(id='3h3mhdACP_c', name = '3-Hydroxy-3-methylhexadecanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C28H53N2O9PRS')) model.metabolites.get_by_id('3h3mhdACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3O3MAR170', name = ' 3-oxo-3-methyl-acyl-[acyl-carrier-protein] reductase (iso-C17:0)')) model.reactions.get_by_id('3O3MAR170').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3O3MAR170').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3O3MAR170').add_metabolites({ model.metabolites.get_by_id('3o3mhdACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h3mhdACP_c'):1, model.metabolites.h_c:-1 }) #next add the t2mhd2eACP_c metabolite model.add_metabolites(Metabolite(id='t3mhd2eACP_c', name = 'Trans-3-methylhexadec-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C28H51N2O8PRS')) model.metabolites.get_by_id('t3mhd2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3H3MAD170', name = '3-hydroxy-3-methylacyl-[acyl-carrier-protein] dehydratase (iso-C17:0)')) model.reactions.get_by_id('3H3MAD170').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3H3MAD170').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3H3MAD170').add_metabolites({ model.metabolites.get_by_id('3h3mhdACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t3mhd2eACP_c:1 }) #last step: convert to 3-methylhexadecanoyl-ACP #add both with NADH and NADPH. #first add the 3-methylhexadecanoyl-ACP metabolite model.add_metabolites(Metabolite(id='3mhdecACP_c', name = '3-methyl-hexadecanoyl-ACP(iso-C17:0ACP)', compartment = 'c', charge = -1, formula = 'C28H53N2O8PRS' )) model.metabolites.get_by_id('3mhdecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH: E3MAR170x model.add_reaction(Reaction(id='E3MAR170x', name = '3-Methyl-Hexadecanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.E3MAR170x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR170x.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR170x.add_metabolites({ model.metabolites.t3mhd2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('3mhdecACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH: E3MAR170y model.add_reaction(Reaction(id='E3MAR170y', name = '3-Methyl-Hexadecanoyl-[acp]:NADp+ trans-2-oxidoreductase')) model.reactions.E3MAR170y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.E3MAR170y.gene_reaction_rule = 'RTMO12345' model.reactions.E3MAR170y.add_metabolites({ model.metabolites.t3mhd2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3mhdecACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished until iso-C17:0. # Just check the model prediction is still oke. model.optimize() # ## Iso-Branched, odd chained fatty acids¶ # In this category, we need to ensure the production of iso-C14:0 and iso-C16:0 fatty acids (i.e. the main chain is odd length). In general, the odd chain iso fatty acids are made from valine that is converted into a primer molecule that can then go through the cycles of elongation until the final chain length is achieved. # # In the case here, the valine is converted into isobutryl-coa, which gets bound to an ACP and can start cycles of elongation. the ibcoa_c metabolite is already present. I will check it is properly made from valine, and than make its conversion into ibACP_c, before the elongation cycles start. Seems the conversion from Valine is possible and correct, and so I will just add the conversion of ibcoa_c into ibACP_c. And from there start the elongation cycles. #add ibACP_c meta,bolite model.add_metabolites(Metabolite(id='ibACP_c', name = '2-Methylpropanoyl-ACP', compartment= 'c', formula = 'C15H27N2O8PRS', charge = -1)) model.metabolites.ibACP_c.annotation['sbo'] = 'SBO:0000176' # + #then add conversion of ibcoa into ibACP # - model.add_reaction(Reaction(id='IBCOATA', name = 'isobutyryl-CoA:[acyl-carrier-protein] transferase')) model.reactions.IBCOATA.annotation['sbo'] = 'SBO:0000176' model.reactions.IBCOATA.add_metabolites({ model.metabolites.ACP_c:-1, model.metabolites.ibcoa_c:-1, model.metabolites.coa_c:1, model.metabolites.ibACP_c:1, model.metabolites.h_c:1 }) model.reactions.IBCOATA.bounds = (-1000,1000) cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished the primer molecule, now it can condense with Malonyl-CoA to give iso-C6:0. #add condenstation of 2-methyl-prop-ACP with malonyl ACP to 3-oxo-2-methyl pentanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o2mpentACP_c', name = '3-oxo-2-methylpentanoyl-ACP', compartment = 'c', charge =-1, formula = 'C17H29O9N2PRS')) model.metabolites.get_by_id('3o2mpentACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3OMAS60', name = '3-oxo-2-Methylpentanoyl-[acyl-carrier-protein] synthase (iso-C6:0)')) model.reactions.get_by_id('3OMAS60').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3OMAS60').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAS60').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.ibACP_c:-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o2mpentACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-2-methylpentanoyl-[acp] met model.add_metabolites(Metabolite(id='3h2mpentACP_c', name = '3-Hydroxy-2-methylpentanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C17H31O9N2PRS')) model.metabolites.get_by_id('3h2mpentACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3OMAR60', name = ' 3-oxo-2-methylacyl-[acyl-carrier-protein] reductase (iso-C6:0)')) model.reactions.get_by_id('3OMAR60').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FAsynthase: RTMO12345 model.reactions.get_by_id('3OMAR60').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAR60').add_metabolites({ model.metabolites.get_by_id('3o2mpentACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h2mpentACP_c'):1, model.metabolites.h_c:-1 }) #next add the tpent2eACP_c metabolite model.add_metabolites(Metabolite(id='t2mpent2eACP_c', name = 'Trans-2-methyl-Pent-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C17H29O8N2PRS')) model.metabolites.get_by_id('t2mpent2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3HMAD60', name = '3-hydroxy-2-methyl-acyl-[acyl-carrier-protein] dehydratase (iso-C6:0)')) model.reactions.get_by_id('3HMAD60').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3HMAD60').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3HMAD60').add_metabolites({ model.metabolites.get_by_id('3h2mpentACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t2mpent2eACP_c:1 }) #last step: convert to 2-methyl-pentanoyl-ACP which can start the next cycle again #add both with NADH and NADPH. #first add the 2-methyl-pentanoyl-ACP metabolite model.add_metabolites(Metabolite(id='2mpentACP_c', name = '2-methyl-pentanoyl-ACP(iso-C65:0ACP)', compartment = 'c', charge = -1, formula = 'C17H31O8N2PRS' )) model.metabolites.get_by_id('2mpentACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH: EMAR60x model.add_reaction(Reaction(id='EMAR60x', name = '2-methyl-pentanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.EMAR60x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.EMAR60x.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR60x.add_metabolites({ model.metabolites.t2mpent2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('2mpentACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH: EMAR60Y model.add_reaction(Reaction(id='EMAR60y', name = '2-methyl-pentanoyl-[acp]:NADp+ trans-2-oxidoreductase')) model.reactions.EMAR60y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FAsynthase: RTMO12345 model.reactions.EMAR60y.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR60y.add_metabolites({ model.metabolites.t2mpent2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('2mpentACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # finished to iso-C:60. now onto iso-c8:0. model = cobra.io.read_sbml_model('../model/p-thermo.xml') #add condenstation of 2mpent-ACP with malonyl ACP to 3-oxo-2-methyl-heptanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o2mheptACP_c', name = '3-oxo-2-methylheptanoyl-ACP', compartment = 'c', charge =-1, formula = 'C19H33O9N2PRS')) model.metabolites.get_by_id('3o2mheptACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3OMAS80', name = '3-oxo-2-methylheptanoyl-[acyl-carrier-protein] synthase (iso-C8:0)')) model.reactions.get_by_id('3OMAS80').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FAget_by_id('3OAS50')synthase: RTMO12345 model.reactions.get_by_id('3OMAS80').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAS80').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('2mpentACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o2mheptACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-2-methylheptanoyl-[acp] met model.add_metabolites(Metabolite(id='3h2mheptACP_c', name = '3-Hydroxy-2-methyl-heptanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C19H35O9N2PRS')) model.metabolites.get_by_id('3h2mheptACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3OMAR80', name = ' 3-oxoacyl-[acyl-carrier-protein] reductase (iso-C8:0)')) model.reactions.get_by_id('3OMAR80').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FAget_by_id('3OAS70')synthase: RTMO12345 model.reactions.get_by_id('3OMAR80').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAR80').add_metabolites({ model.metabolites.get_by_id('3o2mheptACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h2mheptACP_c'):1, model.metabolites.h_c:-1 }) #next add the tpent2eACP_c metabolite model.add_metabolites(Metabolite(id='t2mhept2eACP_c', name = 'Trans-2-methyl-Hept-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C19H33O8N2PRS')) model.metabolites.get_by_id('t2mhept2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3HMAD80', name = '3-hydroxyacyl-[acyl-carrier-protein] dehydratase (iso-C8:0)')) model.reactions.get_by_id('3HMAD80').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3HMAD80').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3HMAD80').add_metabolites({ model.metabolites.get_by_id('3h2mheptACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t2mhept2eACP_c:1 }) #last step: convert to 2-methyl-Heptanoyl-ACP which can start the next cycle again #add both with NADH and NADPH. #first add the 2-methyl-heptanoyl-ACP metabolite model.add_metabolites(Metabolite(id='2mheptACP_c', name = '2-methyl-heptanoyl-ACP(iso-C8:0ACP)', compartment = 'c', charge = -1, formula = 'C19H35O8N2PRS' )) model.metabolites.get_by_id('2mheptACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH model.add_reaction(Reaction(id='EMAR80x', name = '2-methyl-heptanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.EMAR80x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.EMAR80x.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR80x.add_metabolites({ model.metabolites.t2mhept2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('2mheptACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH model.add_reaction(Reaction(id='EMAR80y', name = '2-methyl-heptanoyl-[acp]:NADP+ trans-2-oxidoreductase')) model.reactions.EMAR80y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.EMAR80y.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR80y.add_metabolites({ model.metabolites.t2mhept2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('2mheptACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished until iso-C8:0. Now onto iso-C10:0. #add condenstation of 2mhept-ACP with malonyl ACP to 3-oxo-2-methyl-nonanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o2mnonACP_c', name = '3-oxo-2-methyl-nonanoyl-ACP', compartment = 'c', charge =-1, formula = 'C21H37O9N2PRS')) model.metabolites.get_by_id('3o2mnonACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3OMAS100', name = '3-oxo-2-methyl-nonanoyl-[acyl-carrier-protein] synthase (iso-C10:0)')) model.reactions.get_by_id('3OMAS100').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3OMAS100').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAS100').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('2mheptACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o2mnonACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-2-methyl-nonanoyl-[acp] met model.add_metabolites(Metabolite(id='3h2mnonACP_c', name = '3-Hydroxy-2-methyl-nonanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C21H39O9N2PRS')) model.metabolites.get_by_id('3h2mnonACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3OMAR100', name = ' 3-oxoacyl-[acyl-carrier-protein] reductase (iso-C10:0)')) model.reactions.get_by_id('3OMAR100').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase, as done in the whole model: RTMO12345 model.reactions.get_by_id('3OMAR100').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAR100').add_metabolites({ model.metabolites.get_by_id('3o2mnonACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h2mnonACP_c'):1, model.metabolites.h_c:-1 }) #next add the t2mpent2eACP_c metabolite model.add_metabolites(Metabolite(id='t2mnon2eACP_c', name = 'Trans-2-methyl-Non-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C21H37O8N2PRS')) model.metabolites.get_by_id('t2mnon2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3HMAD100', name = '3-hydroxyacyl-[acyl-carrier-protein] dehydratase (iso-C10:0)')) model.reactions.get_by_id('3HMAD100').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3HMAD100').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3HMAD100').add_metabolites({ model.metabolites.get_by_id('3h2mnonACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t2mnon2eACP_c:1 }) #add both with NADH and NADPH. #first add the 2-methyl-nonanoyl-ACP metabolite model.add_metabolites(Metabolite(id='2mnonACP_c', name = '2-methyl-nonanoyl-ACP(iso-C10:0ACP)', compartment = 'c', charge = -1, formula = 'C21H39O8N2PRS' )) model.metabolites.get_by_id('2mnonACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH model.add_reaction(Reaction(id='EMAR100x', name = '2-methylnonanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.EMAR100x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.EMAR100x.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR100x.add_metabolites({ model.metabolites.t2mnon2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('2mnonACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH model.add_reaction(Reaction(id='EMAR100y', name = '2-methyl-nonanoyl-[acp]:NADP+ trans-2-oxidoreductase')) model.reactions.EMAR100y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.EMAR100y.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR100y.add_metabolites({ model.metabolites.t2mnon2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('2mnonACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished until iso-C10:0, now move onto iso -C12:0. #add condenstation of 2-methyl-non-ACP with malonyl ACP to 3-oxo2-methyl-undecanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o2mudecACP_c', name = '3-oxo-2-methyl-undecanoyl-ACP', compartment = 'c', charge =-1, formula = 'C23H41O9N2PRS')) model.metabolites.get_by_id('3o2mudecACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3OMAS120', name = '3-oxo-2-methyl-undecanoyl-[acyl-carrier-protein] synthase (iso-C12:0)')) model.reactions.get_by_id('3OMAS120').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3OMAS120').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAS120').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('2mnonACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o2mudecACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-2-methyl-undecanoyl-[acp] met model.add_metabolites(Metabolite(id='3h2mudecACP_c', name = '3-Hydroxy-2-methyl-undecanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C23H43O9N2PRS')) model.metabolites.get_by_id('3h2mudecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3OMAR120', name = ' 3-oxoacyl-[acyl-carrier-protein] reductase (iso-C12:0)')) model.reactions.get_by_id('3OMAR120').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase, as done in the whole model: RTMO12345 model.reactions.get_by_id('3OMAR120').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAR120').add_metabolites({ model.metabolites.get_by_id('3o2mudecACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h2mudecACP_c'):1, model.metabolites.h_c:-1 }) #next add the t2mudec2eACP_c metabolite model.add_metabolites(Metabolite(id='t2mudec2eACP_c', name = 'Trans-2-methyl-Undec-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C23H41O8N2PRS')) model.metabolites.get_by_id('t2mudec2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3HMAD120', name = '3-hydroxyacyl-[acyl-carrier-protein] dehydratase (iso-C12:0)')) model.reactions.get_by_id('3HMAD120').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3HMAD120').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3HMAD120').add_metabolites({ model.metabolites.get_by_id('3h2mudecACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t2mudec2eACP_c:1 }) #add both with NADH and NADPH. #first add the 2-methylundecanoyl-ACP metabolite model.add_metabolites(Metabolite(id='2mudecACP_c', name = '2-Methyl-Undecanoyl-ACP(iso-C12:0ACP)', compartment = 'c', charge = -1, formula = 'C23H43O8N2PRS' )) model.metabolites.get_by_id('2mudecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH model.add_reaction(Reaction(id='EMAR120x', name = '2-methyl-Undecanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.EMAR120x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FAsynthase: RTMO12345 model.reactions.EMAR120x.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR120x.add_metabolites({ model.metabolites.t2mudec2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('2mudecACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH model.add_reaction(Reaction(id='EMAR120y', name = '2-Methyl-Undecanoyl-[acp]:NADP+ trans-2-oxidoreductase')) model.reactions.EMAR120y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.EMAR120y.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR120y.add_metabolites({ model.metabolites.t2mudec2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('2mudecACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # Finished to isoC12:0, now onto isoC14:0. #add condenstation of 2mnon-ACP with malonyl ACP to 3-oxo-2-methyltridecanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o2mtridecACP_c', name = '3-oxo-2-methyltridecanoyl-ACP', compartment = 'c', charge =-1, formula = 'C25H45O9N2PRS')) model.metabolites.get_by_id('3o2mtridecACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3OMAS140', name = '3-oxo-2-methyl-tridecanoyl-[acyl-carrier-protein] synthase (iso-C15:0)')) model.reactions.get_by_id('3OMAS140').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3OMAS140').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAS140').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('2mudecACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o2mtridecACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-2-methyl-tridecanoyl-[acp] met model.add_metabolites(Metabolite(id='3h2mtridecACP_c', name = '3-Hydroxy-2-methyl-tridecanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C25H47O9N2PRS')) model.metabolites.get_by_id('3h2mtridecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3OMAR140', name = ' 3-oxoacyl-[acyl-carrier-protein] reductase (iso-C14:0)')) model.reactions.get_by_id('3OMAR140').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase, as done in the whole model: RTMO12345 model.reactions.get_by_id('3OMAR140').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAR140').add_metabolites({ model.metabolites.get_by_id('3o2mtridecACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h2mtridecACP_c'):1, model.metabolites.h_c:-1 }) #next add the 2mtpent2eACP_c metabolite model.add_metabolites(Metabolite(id='t2mtridec2eACP_c', name = 'Trans-2-methyl-Tridec-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C25H45O8N2PRS')) model.metabolites.get_by_id('t2mtridec2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3HMAD140', name = '3-hydroxyacyl-[acyl-carrier-protein] dehydratase (iso-C14:0)')) model.reactions.get_by_id('3HMAD140').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3HMAD140').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3HMAD140').add_metabolites({ model.metabolites.get_by_id('3h2mtridecACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t2mtridec2eACP_c:1 }) #add the last reaction both with NADH and NADPH. #first add the 2-methyl-tridecanoyl-ACP metabolite model.add_metabolites(Metabolite(id='2mtridecACP_c', name = '2-methyl-Tridecanoyl-ACP(iso-C14:0ACP)', compartment = 'c', charge = -1, formula = 'C25H47O8N2PRS' )) model.metabolites.get_by_id('2mtridecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH model.add_reaction(Reaction(id='EMAR140x', name = '2-methyl-Tridecanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.EMAR140x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.EMAR140x.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR140x.add_metabolites({ model.metabolites.t2mtridec2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('2mtridecACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH model.add_reaction(Reaction(id='EMAR140y', name = '2-Methyl-Tridecanoyl-[acp]:NADP+ trans-2-oxidoreductase')) model.reactions.EMAR140y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FAsynthase: RTMO12345 model.reactions.EMAR140y.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR140y.add_metabolites({ model.metabolites.t2mtridec2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('2mtridecACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # finished up to isoC14:0, now move onto the final iso C16:0 fatty acid. #add condenstation of 2mtridec-ACP with malonyl ACP to 3-oxo-2-methylpentadecanoyl-ACP #add metabolite model.add_metabolites(Metabolite(id='3o2mpdecACP_c', name = '3-oxo-2-methyl-pentadecanoyl-ACP', compartment = 'c', charge =-1, formula = 'C27H49O9N2PRS')) model.metabolites.get_by_id('3o2mpdecACP_c').annotation['sbo'] = 'SBO:0000176' #add reaction model.add_reaction(Reaction(id='3OMAS160', name = '3-oxo-2-methyl-pentadecanoyl-[acyl-carrier-protein] synthase (iso-C16:0)')) model.reactions.get_by_id('3OMAS160').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3OMAS160').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAS160').add_metabolites({ model.metabolites.malACP_c:-1, model.metabolites.get_by_id('2mtridecACP_c'):-1, model.metabolites.ACP_c:1, model.metabolites.co2_c:1, model.metabolites.get_by_id('3o2mpdecACP_c'):1, model.metabolites.h_c:-1 }) #next add reaction with NADPH #first add 3-Hydroxy-2-methyl-pentadecanoyl-[acp] met model.add_metabolites(Metabolite(id='3h2mpdecACP_c', name = '3-Hydroxy-2-methyl-pentadecanoyl-[acp]', compartment = 'c', charge = -1, formula = 'C27H51O9N2PRS')) model.metabolites.get_by_id('3h2mpdecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3OMAR160', name = ' 3-oxoacyl-[acyl-carrier-protein] reductase (iso-C16:0)')) model.reactions.get_by_id('3OMAR160').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase, as done in the whole model: RTMO12345 model.reactions.get_by_id('3OMAR160').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3OMAR160').add_metabolites({ model.metabolites.get_by_id('3o2mpdecACP_c'):-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('3h2mpdecACP_c'):1, model.metabolites.h_c:-1 }) #next add the next metabolite model.add_metabolites(Metabolite(id='t2mpdec2eACP_c', name = 'Trans-2-methyl-Pentadec-2-enoyl-[acyl-carrier protein]', compartment = 'c', charge = -1, formula = 'C27H49O8N2PRS')) model.metabolites.get_by_id('t2mpdec2eACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction model.add_reaction(Reaction(id='3HMAD160', name = '3-hydroxy-methyl-acyl-[acyl-carrier-protein] dehydratase (iso-C16:0)')) model.reactions.get_by_id('3HMAD160').annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.get_by_id('3HMAD160').gene_reaction_rule = 'RTMO12345' model.reactions.get_by_id('3HMAD160').add_metabolites({ model.metabolites.get_by_id('3h2mpdecACP_c'):-1, model.metabolites.h2o_c:1, model.metabolites.t2mpdec2eACP_c:1 }) #add the last reaction both with NADH and NADPH. #first add the 2-methyl-pentadecanoyl-ACP metabolite model.add_metabolites(Metabolite(id='2mpdecACP_c', name = '2-Methyl-Pentadecanoyl-ACP(iso-C16:0ACP)', compartment = 'c', charge = -1, formula = 'C27H51O8N2PRS' )) model.metabolites.get_by_id('2mpdecACP_c').annotation['sbo'] = 'SBO:0000247' #add reaction with NADH model.add_reaction(Reaction(id='EMAR160x', name = '2-methyl-Pentadecanoyl-[acp]:NAD+ trans-2-oxidoreductase')) model.reactions.EMAR160x.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FA synthase: RTMO12345 model.reactions.EMAR160x.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR160x.add_metabolites({ model.metabolites.t2mpdec2eACP_c:-1, model.metabolites.nadh_c:-1, model.metabolites.nad_c:1, model.metabolites.get_by_id('2mpdecACP_c'):1, model.metabolites.h_c:-1 }) #add reaction with NADH model.add_reaction(Reaction(id='EMAR160y', name = '2-Methyl-Pentadecanoyl-[acp]:NADP+ trans-2-oxidoreductase')) model.reactions.EMAR160y.annotation['sbo'] = 'SBO:0000176' #assume GPR is the same FAsynthase: RTMO12345 model.reactions.EMAR160y.gene_reaction_rule = 'RTMO12345' model.reactions.EMAR160y.add_metabolites({ model.metabolites.t2mpdec2eACP_c:-1, model.metabolites.nadph_c:-1, model.metabolites.nadp_c:1, model.metabolites.get_by_id('2mpdecACP_c'):1, model.metabolites.h_c:-1 }) #save&commit cobra.io.write_sbml_model(model,'../model/p-thermo.xml') # finished: # - isoC15:0: met 3mtdecACP_c # - isoC17:0: met 3mhdecACP_c # - iso C14:0: met 2mtridecACP_c # - iso C16:0: met 2mpdecACP_c #check no change in simulation model.optimize()
notebooks/45. Lipid Metabolism - II.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from astropy.table import Table, Column import numpy as np from numpy.linalg import inv import pandas as pd # %matplotlib inline import matplotlib.pyplot as plt import os import urllib.request import corner os.chdir("/Users/<NAME>/Documents/Python Scripts") os.getcwd() # + Table1 = np.loadtxt(fname = "/Users/<NAME>/Documents/Python Scripts/Table1.txt") column_names = ['ID', 'x', 'y','Sigma y', 'Sigma x', 'Rho xy'] #Table1 = pd.DataFrame(Table1, columns=column_names) # - print(Table1) # + b = 213.2734919759614 m = 1.0767475241683222 print(m,b) # + def f(t): return t*m +b t3 = np.arange(0.0, 300.0, 0.1) t4 = np.arange(0.0, 300.0, 0.02) # - error = Table1[:,3] plt.errorbar(Table1[:,1], Table1[:,2], yerr=error, fmt='ko',capsize=4) plt.plot(t4, f(t4), 'k') np.size(Table1[:,0]) # Numero de datos Table1[:,1] # x true b # aplha "true" m # beta_x "true" np.mean(Table1[:,3]) # eps true Table1[:,2] # z obs #f(t) # z "true" plt.figure(figsize=(12,6)) plt.subplot(1,1,1) #1 = tamaño normal (entero), 2 = la mitad plt.scatter(Table1[:,1], Table1[:,2], marker='o') plt.xlabel('X') plt.ylabel('Z') # + def lnprior(p): # The parameters are stored as a vector of values, so unpack them alpha,betax,eps = p # We're using only uniform priors, and only eps has a lower bound if eps <= 0: return -inf return 0 def lnlike(p, x, z, zerr): alpha,betax,eps = p model = alpha + betax*x # the likelihood is sum of the lot of normal distributions denom = np.power(zerr,2) + np.power(np.exp(eps),2)*np.power(model,2) lp = -0.5*sum(np.power((z - model),2)/denom + np.log(2*np.pi*denom)) return lp def lnprob(p, x, z): lp = lnprior(p) if not isfinite(lp): return -inf return lp + lnlike(p, x, z) # - import scipy.optimize as opt nll = lambda *args: -lnlike(*args) print(nll) result = opt.minimize(nll, [b, m, np.mean(Table1[:,3])], args=(Table1[:,1], Table1[:,2], Table1[:,3])) print(result['x']) from numpy import * Nwalker,Ndim = 50,3 p0 = [result['x']+1.e-4*random.randn(Ndim) for i in range(Nwalker)] import emcee sampler = emcee.EnsembleSampler(Nwalker,Ndim,lnprob, args=(Table1[:,1],Table1[:,2])) pos,prob,state = sampler.run_mcmc(p0, 500) # + #res=plot(sampler.chain[:,:,0].T, '-', color='k', alpha=0.3) #axhline(alpha_true, color='blue') # - sampler.reset() pos,prob,state = sampler.run_mcmc(pos, 1000) # + m_alpha,m_betax,m_eps = median(sampler.flatchain, axis=0) plt.figure(figsize=(12,6)) plt.subplot(1,2,1) plt.plot(Table1[:,1], Table1[:,2]-m_alpha, 'o') plt.xlabel('X') plt.ylabel('Z - alpha - beta_y y') # Now plot the model xx = array([Table1[:,1].min(), Table1[:,1].max()]) plt.plot(xx, xx*m_betax) plt.plot(xx, xx*m_betax + m_eps, '--', color='k') plt.plot(xx, xx*m_betax - m_eps, '--', color='k') # - tmp = corner.corner(sampler.flatchain, labels=['alpha','betax','eps'], truths=[b, m, np.mean(Table1[:,3])])
Data analysis recipes - Fitting a model to data/Section 3 - Pruning outliers/Exercise 6 (jk).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: fe_test # language: python # name: fe_test # --- # ## Cardinality # # The values of a categorical variable are selected from a group of categories, also called labels. For example, in the variable _gender_ the categories or labels are male and female, whereas in the variable _city_ the labels can be London, Manchester, Brighton and so on. # # Different categorical variables contain different number of labels or categories. The variable gender contains only 2 labels, but a variable like city or postcode, can contain a huge number of different labels. # # The number of different labels within a categorical variable is known as cardinality. A high number of labels within a variable is known as __high cardinality__. # # # ### Are multiple labels in a categorical variable a problem? # # High cardinality may pose the following problems: # # - Variables with too many labels tend to dominate over those with only a few labels, particularly in **Tree based** algorithms. # # - A big number of labels within a variable may introduce noise with little, if any, information, therefore making machine learning models prone to over-fit. # # - Some of the labels may only be present in the training data set, but not in the test set, therefore machine learning algorithms may over-fit to the training set. # # - Contrarily, some labels may appear only in the test set, therefore leaving the machine learning algorithms unable to perform a calculation over the new (unseen) observation. # # # In particular, **tree methods can be biased towards variables with lots of labels** (variables with high cardinality). Thus, their performance may be affected by high cardinality. # # Below, I will show the effect of high cardinality of variables on the performance of different machine learning algorithms, and how a quick fix to reduce the number of labels, without any sort of data insight, already helps to boost performance. # ## In this Demo: # # We will: # # - Learn how to quantify cardinality # - See examples of high and low cardinality variables # - Understand the effect of cardinality when preparing train and test sets # - Visualise the effect of cardinality on Machine Learning Model performance # # We will use the Titanic dataset. # # - To download the dataset, please refer to the **Datasets** lecture in **Section 1** of the course. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # to build machine learning models from sklearn.linear_model import LogisticRegression from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier # to evaluate the models from sklearn.metrics import roc_auc_score # to separate data into train and test from sklearn.model_selection import train_test_split # + # let's load the titanic dataset data = pd.read_csv('../titanic.csv') data.head() # - # The categorical variables in this dataset are Name, Sex, Ticket, Cabin and Embarked. # # --------------- # **Note** that Ticket and Cabin contain both letters and numbers, so they could be treated as Mixed Variables. For this demonstration, I will treat them as categorical. # + # let's inspect the cardinality, this is the number # of different labels, for the different categorical variables print('Number of categories in the variable Name: {}'.format( len(data.name.unique()))) print('Number of categories in the variable Gender: {}'.format( len(data.sex.unique()))) print('Number of categories in the variable Ticket: {}'.format( len(data.ticket.unique()))) print('Number of categories in the variable Cabin: {}'.format( len(data.cabin.unique()))) print('Number of categories in the variable Embarked: {}'.format( len(data.embarked.unique()))) print('Total number of passengers in the Titanic: {}'.format(len(data))) # - # While the variable Sex contains only 2 categories and Embarked 4 (low cardinality), the variables Ticket, Name and Cabin, as expected, contain a huge number of different labels (high cardinality). # # To demonstrate the effect of high cardinality in train and test sets and machine learning performance, I will work with the variable Cabin. I will create a new variable with reduced cardinality. # + # let's explore the values / categories of Cabin # we know from the previous cell that there are 148 # different cabins, therefore the variable # is highly cardinal data.cabin.unique() # - # Let's now reduce the cardinality of the variable. How? instead of using the entire **cabin** value, I will capture only the # first letter. # # ***Rationale***: the first letter indicates the deck on which the cabin was located, and is therefore an indication of both social class status and proximity to the surface of the Titanic. Both are known to improve the probability of survival. # + # let's capture the first letter of Cabin data['Cabin_reduced'] = data['cabin'].astype(str).str[0] data[['cabin', 'Cabin_reduced']].head() # + print('Number of categories in the variable Cabin: {}'.format( len(data.cabin.unique()))) print('Number of categories in the variable Cabin reduced: {}'.format( len(data.Cabin_reduced.unique()))) # - # We reduced the number of different labels from 182 to 9. # + # let's separate into training and testing set # in order to build machine learning models use_cols = ['cabin', 'Cabin_reduced', 'sex'] # this functions comes from scikit-learn X_train, X_test, y_train, y_test = train_test_split( data[use_cols], data['survived'], test_size=0.3, random_state=0) X_train.shape, X_test.shape # - # ### High cardinality leads to uneven distribution of categories in train and test sets # # When a variable is highly cardinal, often some categories land only on the training set, or only on the testing set. If present only in the training set, they may lead to over-fitting. If present only on the testing set, the machine learning algorithm will not know how to handle them, as it has not seen them during training. # + # Let's find out labels present only in the training set unique_to_train_set = [ x for x in X_train.cabin.unique() if x not in X_test.cabin.unique() ] len(unique_to_train_set) # - # There are 113 Cabins only present in the training set, and not in the testing set. # + # Let's find out labels present only in the test set unique_to_test_set = [ x for x in X_test.cabin.unique() if x not in X_train.cabin.unique() ] len(unique_to_test_set) # - # Variables with high cardinality tend to have values (i.e., categories) present in the training set, that are not present in the test set, and vice versa. This will bring problems at the time of training (due to over-fitting) and scoring of new data (how should the model deal with unseen categories?). # # This problem is almost overcome by reducing the cardinality of the variable. See below. # + # Let's find out labels present only in the training set # for Cabin with reduced cardinality unique_to_train_set = [ x for x in X_train['Cabin_reduced'].unique() if x not in X_test['Cabin_reduced'].unique() ] len(unique_to_train_set) # + # Let's find out labels present only in the test set # for Cabin with reduced cardinality unique_to_test_set = [ x for x in X_test['Cabin_reduced'].unique() if x not in X_train['Cabin_reduced'].unique() ] len(unique_to_test_set) # - # Observe how by reducing the cardinality there is now only 1 label in the training set that is not present in the test set. And no label in the test set that is not contained in the training set as well. # # ### Effect of cardinality on Machine Learning Model Performance # # In order to evaluate the effect of categorical variables in machine learning models, I will quickly replace the categories by numbers. See below. # + # Let's re-map Cabin into numbers so we can use it to train ML models # I will replace each cabin by a number # to quickly demonstrate the effect of # labels on machine learning algorithms ############## # Note: this is neither the only nor the best # way to encode categorical variables into numbers # there is more on these techniques in the section # "Encoding categorical variales" ############## cabin_dict = {k: i for i, k in enumerate(X_train.cabin.unique(), 0)} cabin_dict # + # replace the labels in Cabin, using the dic created above X_train.loc[:, 'Cabin_mapped'] = X_train.loc[:, 'cabin'].map(cabin_dict) X_test.loc[:, 'Cabin_mapped'] = X_test.loc[:, 'cabin'].map(cabin_dict) X_train[['Cabin_mapped', 'cabin']].head(10) # - # We see how NaN takes the value 0 in the new variable, E36 takes the value 1, C68 takes the value 2, and so on. # + # Now I will replace the letters in the reduced cabin variable # with the same procedure # create replace dictionary cabin_dict = {k: i for i, k in enumerate(X_train['Cabin_reduced'].unique(), 0)} # replace labels by numbers with dictionary X_train.loc[:, 'Cabin_reduced'] = X_train.loc[:, 'Cabin_reduced'].map( cabin_dict) X_test.loc[:, 'Cabin_reduced'] = X_test.loc[:, 'Cabin_reduced'].map(cabin_dict) X_train[['Cabin_reduced', 'cabin']].head(20) # - # We see now that E36 and E24 take the same number, 1, because we are capturing only the letter. They both start with E. # + # re-map the categorical variable Sex into numbers X_train.loc[:, 'sex'] = X_train.loc[:, 'sex'].map({'male': 0, 'female': 1}) X_test.loc[:, 'sex'] = X_test.loc[:, 'sex'].map({'male': 0, 'female': 1}) X_train.sex.head() # + # check if there are missing values in these variables X_train[['Cabin_mapped', 'Cabin_reduced', 'sex']].isnull().sum() # - X_test[['Cabin_mapped', 'Cabin_reduced', 'sex']].isnull().sum() # In the test set, there are now 41 missing values for the highly cardinal variable. These were introduced when encoding the categories into numbers. # # How? # # Many categories exist only in the test set. Thus, when we created our encoding dictionary using only the train set, we did not generate a number to replace those labels present only in the test set. As a consequence, they were encoded as NaN. We will see in future notebooks how to tackle this problem. For now, I will fill those missing values with 0. # let's check the number of different categories in the encoded variables len(X_train.Cabin_mapped.unique()), len(X_train.Cabin_reduced.unique()) # From the above we note immediately that from the original 182 cabins in the dataset, only 147 are present in the training set. We also see how we reduced the number of different categories to just 9 in our previous step. # # Let's go ahead and evaluate the effect of labels in machine learning algorithms. # ### Random Forests # + # model built on data with high cardinality for cabin # call the model rf = RandomForestClassifier(n_estimators=200, random_state=39) # train the model rf.fit(X_train[['Cabin_mapped', 'sex']], y_train) # make predictions on train and test set pred_train = rf.predict_proba(X_train[['Cabin_mapped', 'sex']]) pred_test = rf.predict_proba(X_test[['Cabin_mapped', 'sex']].fillna(0)) print('Train set') print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred_train[:,1]))) print('Test set') print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred_test[:,1]))) # - # We observe that the performance of the Random Forests on the training set is quite superior to its performance in the test set. This indicates that the model is over-fitting, which means that it does a great job at predicting the outcome on the dataset it was trained on, but it lacks the power to generalise the prediction to unseen data. # + # model built on data with low cardinality for cabin # call the model rf = RandomForestClassifier(n_estimators=200, random_state=39) # train the model rf.fit(X_train[['Cabin_reduced', 'sex']], y_train) # make predictions on train and test set pred_train = rf.predict_proba(X_train[['Cabin_reduced', 'sex']]) pred_test = rf.predict_proba(X_test[['Cabin_reduced', 'sex']]) print('Train set') print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred_train[:,1]))) print('Test set') print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred_test[:,1]))) # - # We can see now that the Random Forests no longer over-fit to the training set. In addition, the model is much better at generalising the predictions (compare the roc-auc of this model on the test set vs the roc-auc of the model above also in the test set: 0.81 vs 0.80). # # **I would like to point out, that likely we can overcome the effect of high cardinality by adjusting the hyper-parameters of the random forests. That goes beyond the scope of this course. Here, I want to show you that given a same model, with identical hyper-parameters, high cardinality may cause the model to over-fit**. # ### AdaBoost # + # model build on data with plenty of categories in Cabin # call the model ada = AdaBoostClassifier(n_estimators=200, random_state=44) # train the model ada.fit(X_train[['Cabin_mapped', 'sex']], y_train) # make predictions on train and test set pred_train = ada.predict_proba(X_train[['Cabin_mapped', 'sex']]) pred_test = ada.predict_proba(X_test[['Cabin_mapped', 'sex']].fillna(0)) print('Train set') print('Adaboost roc-auc: {}'.format(roc_auc_score(y_train, pred_train[:,1]))) print('Test set') print('Adaboost roc-auc: {}'.format(roc_auc_score(y_test, pred_test[:,1]))) # + # model build on data with fewer categories in Cabin Variable # call the model ada = AdaBoostClassifier(n_estimators=200, random_state=44) # train the model ada.fit(X_train[['Cabin_reduced', 'sex']], y_train) # make predictions on train and test set pred_train = ada.predict_proba(X_train[['Cabin_reduced', 'sex']]) pred_test = ada.predict_proba(X_test[['Cabin_reduced', 'sex']].fillna(0)) print('Train set') print('Adaboost roc-auc: {}'.format(roc_auc_score(y_train, pred_train[:,1]))) print('Test set') print('Adaboost roc-auc: {}'.format(roc_auc_score(y_test, pred_test[:,1]))) # - # Similarly, the Adaboost model trained on the variable with high cardinality is overfit to the train set. Whereas the Adaboost trained on the low cardinal variable is not overfitting and therefore does a better job in generalising the predictions. # # In addition, building an AdaBoost on a model with less categories in Cabin, is a) simpler and b) should a different category in the test set appear, by taking just the front letter of cabin, the ML model will know how to handle it because it was seen during training. # ### Logistic Regression # + # model build on data with plenty of categories in Cabin variable # call the model logit = LogisticRegression(random_state=44, solver='lbfgs') # train the model logit.fit(X_train[['Cabin_mapped', 'sex']], y_train) # make predictions on train and test set pred_train = logit.predict_proba(X_train[['Cabin_mapped', 'sex']]) pred_test = logit.predict_proba(X_test[['Cabin_mapped', 'sex']].fillna(0)) print('Train set') print('Logistic regression roc-auc: {}'.format(roc_auc_score(y_train, pred_train[:,1]))) print('Test set') print('Logistic regression roc-auc: {}'.format(roc_auc_score(y_test, pred_test[:,1]))) # + # model build on data with fewer categories in Cabin Variable # call the model logit = LogisticRegression(random_state=44, solver='lbfgs') # train the model logit.fit(X_train[['Cabin_reduced', 'sex']], y_train) # make predictions on train and test set pred_train = logit.predict_proba(X_train[['Cabin_reduced', 'sex']]) pred_test = logit.predict_proba(X_test[['Cabin_reduced', 'sex']].fillna(0)) print('Train set') print('Logistic regression roc-auc: {}'.format(roc_auc_score(y_train, pred_train[:,1]))) print('Test set') print('Logistic regression roc-auc: {}'.format(roc_auc_score(y_test, pred_test[:,1]))) # - # We can draw the same conclusion for Logistic Regression: reducing the cardinality improves the performance and generalisation of the algorithm. # ### Gradient Boosted Classifier # + # model build on data with plenty of categories in Cabin variable # call the model gbc = GradientBoostingClassifier(n_estimators=300, random_state=44) # train the model gbc.fit(X_train[['Cabin_mapped', 'sex']], y_train) # make predictions on train and test set pred_train = gbc.predict_proba(X_train[['Cabin_mapped', 'sex']]) pred_test = gbc.predict_proba(X_test[['Cabin_mapped', 'sex']].fillna(0)) print('Train set') print('Gradient Boosted Trees roc-auc: {}'.format(roc_auc_score(y_train, pred_train[:,1]))) print('Test set') print('Gradient Boosted Trees roc-auc: {}'.format(roc_auc_score(y_test, pred_test[:,1]))) # + # model build on data with plenty of categories in Cabin variable # call the model gbc = GradientBoostingClassifier(n_estimators=300, random_state=44) # train the model gbc.fit(X_train[['Cabin_reduced', 'sex']], y_train) # make predictions on train and test set pred_train = gbc.predict_proba(X_train[['Cabin_reduced', 'sex']]) pred_test = gbc.predict_proba(X_test[['Cabin_reduced', 'sex']].fillna(0)) print('Train set') print('Gradient Boosted Trees roc-auc: {}'.format(roc_auc_score(y_train, pred_train[:,1]))) print('Test set') print('Gradient Boosted Trees roc-auc: {}'.format(roc_auc_score(y_test, pred_test[:,1]))) # - # Gradient Boosted trees are indeed over-fitting to the training set in those cases where the variable Cabin has a lot of labels. This was expected as tree methods tend to be biased to variables with plenty of categories. # # **That is all for this demonstration. I hope you enjoyed the notebook, and see you in the next one.**
Section-03-Variable-Characteristics/03.2-Cardinality.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Classification # # Example coming from [here](https://automl.github.io/auto-sklearn/master/examples/20_basic/example_classification.html#sphx-glr-examples-20-basic-example-classification-py) # + from pprint import pprint import sklearn.datasets import sklearn.metrics import autosklearn.classification # - X, y = sklearn.datasets.load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = \ sklearn.model_selection.train_test_split(X, y, random_state=1) automl = autosklearn.classification.AutoSklearnClassifier( time_left_for_this_task=120, per_run_time_limit=30, tmp_folder='/tmp/autosklearn_classification_example_tmp', ) automl.fit(X_train, y_train, dataset_name='breast_cancer') print(automl.leaderboard()) pprint(automl.show_models(), indent=4) predictions = automl.predict(X_test) print("Accuracy score:", sklearn.metrics.accuracy_score(y_test, predictions))
Auto-SKLearn_AutoML/Classification.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.5 # language: sage # name: sagemath-9.5 # --- # # Ejercicio 8 # Alumna: <NAME> # Correo: <EMAIL> # Da el número $n$ n = 2465809416452482897062922270410493919443 # ### Apartado primero # Pasa algunos test de primalidad para ver si $n$ es compuesto # #### Test de primalidad de Fermat bases = [2,3,5,7,11,13] for a in bases: print(f"Para la base {a} se tiene que a^(n-1) mod n = {pow(a,n-1,n)}") # A la vista de los resultados se tiene que $n$ es un posible primo de Fermat para las bases $2,3,5,7,11,13$. # #### Test de Solovay Strassen # # + #Código auxiliar de prácticas anteriores # Exponencial rápida def LeftToRightFastExponential(base:Integer, exp:Integer, p:Integer): b = exp.binary() a = base c = 0 acc = 1 n = p for i in b: c = 2*c acc = mod(acc ^2, n) if i == '1': c += 1 acc = mod(acc*a, n ) return acc primos = [2,3,5,7,11] def Jacobi(a :Integer,n :Integer): a %= n result = 1 while a != 0: while is_even(a): a /=2 if(n % 8 in [3, 5]): result *= -1 a, n = n,a if (a % 4 == n % 4 == 3): result *= -1 a %=n return (0,result)[n == 1] def VerboseTest_Solovay_Strassen(b:Integer,n:Integer): congruente = LeftToRightFastExponential(b, (n-1)//2, n) jacobi = Jacobi(b,n) print(f"\nTest Solovay Strassen Base {b} n ={n}") print(f"Por el algoritmo Jacobi (b/n) = {jacobi}") print(f"b^((n-1))/2 modulo n es {congruente}") posible_primo = False if (congruente == jacobi or (congruente-n) == jacobi): print(f"{n} es un posible primo Euler en base {b}") posible_primo = True else: print(f"{n} NO es primo gracias al criterio de Euler en base {b}") posible_primo = False return posible_primo for b in bases: VerboseTest_Solovay_Strassen(b,n) # - # #### Test de <NAME> # + # Código auxiliar de prácticas anteriores ModuloBonito = lambda x,n : (x-n, x)[x< (n//2)] def Test_Miller_Rabin(a: Integer, n:Integer)-> bool: compuesto = False # Calculamos sucesión impar m = (n-1) r = 0 while is_even(m): m //= 2 r += 1 print('\n','='*60) print(f"Test Miller Rabin base a = {a}, n = {n} \n m={m}, n = m * 2^{r}") print('='*60) anterior = 1 # Cálculo de la a-sucesión for i in range(r+1): congruente = pow(a, m * pow(2, i), n) congruente_bonito = ModuloBonito(int(congruente), int(n)) print(f" Para r={i} el término sucesión mod(a^(m 2^r), n) es: {congruente_bonito}") if( congruente_bonito == 1 and abs(anterior) != 1): compuesto = True anterior = congruente_bonito if anterior != 1: compuesto = True salida = ( f"{n} es un posible primo de Miller-Rabin para la base {a}", f"{n} es compuesto" )[compuesto] print(salida, '\n') return compuesto for p in bases: Test_Miller_Rabin(p,n) # - # ## Apartado segundo # # Factoriza $n+1$ encontrando certificados de primalidad para los factores mayores de 10000. # Vamos a factorizar el número usando la Rho Pollard, antes nos damos cuenta de que $n+1$ tiene como divisores a $2^2$ y a $19$ luego el número $m$ con el que vamos a trabajar es: m = (n+1)//4//19 # + # Algoritmo prácticas anteriores def Verbose_Rho_de_Polard(n:Integer, f) -> Integer: '''Devuelve un factor de p o n si es un fracaso ''' mf = lambda x: f(x)% n #print(f"Cálculo de Rho de Polar para n={n}") x = 1 y = 1 d = 1 paso = 0 #print(f"Paso {paso} x={x} y={y}") while d == 1: paso += 1 x = mf(x) y = mf(mf(y)) d = gcd(abs(x-y),n) #print(f"Paso {paso} x={x} y={y} mcd={d}") return d f = lambda x : x^2 +1 ## Versión de sacar todos los factores de manera manual def SacaFactores(numero_factores, n, f ): factores = [] m = n hay_factores = True while numero_factores >= 0 and hay_factores: numero_factores -= 1 d = Verbose_Rho_de_Polard(m,f) if d != m: factores.append(d) m = m // d print(f"Se tiene que n = multiplicar los elementos de {factores} y {m}\n") else: factores.append(m) print('='*60) hay_factores = False return factores factores = SacaFactores(0, m,f ) print(f"Los factores de {m} son {factores}") # - # Luego los factores candidatos son: factores = [2,19, 47128831, 688429143145855402034758463849 ] # Además es necesario obtener un certificado de primalidad de los últimos # El proceso sería volver a aplicar el algoritmo que estamos siguiendo ahora mismo y después dar un testigo, dando como resultado que en efecto ambos son primos. (Vamos a omitir el proceso ya que eso pertenece a prácticas anteriores.) # Código auxiliar prácticas anteriores def BuscaTestigo(n, factores, lista): m = n-1 for i in lista: if pow(i,m, n) == 1: condicion_testigo = True for f in factores: if pow(i,m//f, n) == 1: condicion_testigo = False break if condicion_testigo == True: return i return 0 # + factores_aux = dict() # Esta jerarquía muestra la dependencia de los testigos a calcular factores_que_mostrar = [factores[2], 224423, # ya descompone factores[3], 2840077 , 26297, # ya descompone 265395451 , 47819, 23909, # ya descopone 959397780037, 26489, 3018227, 79427 # ya descompone ] for p in factores_que_mostrar: factores_aux[p] = factor(p-1) # <NAME> print(f"los factores de {p-1} son {factores_aux[p]}") factores_aux[p] = list(map(lambda x:x[0], factores_aux[p])) # vamos a buscar testigos for f in factores_que_mostrar[::-1]: testigo = BuscaTestigo(f, factores_aux[f], [2,3,5,7,11,13,15, 17,19, 23, 29, 31]) mensaje = ( f"{testigo} es un testigo de la primalidad del factor ={f}", f"No se ha encontrado un testigo que demuestre la primalidad del factor = {f}" )[testigo == 0] print(mensaje) # - # A la vista de los resultados los factores : factores # son primos. testigo = BuscaTestigo(n, factores, [2,3,5,7,11,13,15]) mensaje = ( f"{testigo} es un testigo de la primalidad de n={n}", f"No se ha encontrado un testigo que demuestre la primalidad de n= {n}" )[testigo == 0] print(mensaje) # ### Apartado tercero # # Con $P=1$, encuentra el menor $Q$ natural mayor o igual que 2, tal que definan una sucesión de Lucas que certifique la primalidad de $n$. # Nos basaremos en la caracterización de primalidad: # Si encontramos una sucesión de Lucas, con $P$,$Q$ con $d=P^2 - 4Q$ no cuadrado perfecto y se satisface que $(n, 2Qd) = 1$ y el rango de Lucas $w(n)= n \pm 1$ entonces n es primo. # Definimos las funciones que vamos a utilizar: # + def sLucas_modificado(P,Q,r,n): U_0=0 U_1=1 V=0 k=0 aux1=0 aux2=0 num_binario= bin(r)[2:] for i in num_binario: if i=='0': aux1=(2*U_0*U_1-P*U_0**2)%n aux2=(U_1**2-Q*U_0**2)%n U_0=aux1 U_1=aux2 if i=='1': aux1=(U_1**2-Q*U_0**2)%n aux2=(P*U_1**2-2*Q*U_0*U_1)%n U_0=aux1 U_1=aux2 V=(2*U_1-P*U_0)%n return U_0,U_1,V def comprueba_condicionesLucas(sucesion): condicion=False for cont,i in enumerate(sucesion,start=1): if cont==1 and i==0: # solo el primero es congruente a 0 condicion=True else: if i==0: condicion=False return condicion def certificado_sLucas(P,n,factores_primos): sucesion_U=[] Q=1 r=n+1 while comprueba_condicionesLucas(sucesion_U)==False: Q+=1 sucesion_U=[] for i in factores_primos: un,a,v=sLucas_modificado(P,Q,r//i,n) sucesion_U.append(un) return P,Q,sucesion_U factores.insert(0,1 ) # añadimos un uno al principio P=1 P,Q,sucesion=certificado_sLucas(P,n,factores) print("P: ",P,"Q:",Q) cont=0 for i in factores: print(f"U_(n+1)/{i}, =",sucesion[cont]) cont+=1 # - # Para $P=1$ y $Q=2$ el número $n$ tiene rango de lucas $n+1$.
Ejercicio-8-Blanca-Cano.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="W1_-pO-S5d3b" # # HPC 6.2 Automating the workflow on ARC # + [markdown] id="qAb-Kfgm6jkA" # We are now going to transfer this workflow over to ARC, automating parts of it as we go. # # The first step is to connect to ARC via `ssh`, as we are off campus we need to do this via `remote-access.leeds.ac.uk` first: # # `ssh <username>@remote-access.leeds.ac.uk` # `ssh <<EMAIL>>@<EMAIL>` # # We recommend to follow the instructions in our KB article to make this process a bit easier. # # + [markdown] id="hgXQAPyg7uRR" # We are going to use Anaconda to download and install our tools. First: # # `module load anaconda` # # `conda config --add channels bioconda` # # Next, we are going to create an **Anaconda Environment** just for our experiment but in order to do this we need to put a list of the tools we need inside a file called `requirements.txt` # + id="3jwlGCSR8HYC" # %%writefile requirements.txt fastqc trimmomatic bwa samtools bcftools # + [markdown] id="jZisqYbC84og" # and once we have that we need to tell Anaconda to make the environment based on what's in the file. # # `conda env create --name varcall --file requirements.txt` # # Which might take a few minutes. # # Once it has completed we can check if that environment has been created: # # `conda env list` # # and change over in to it: # # `source activate varcall` # # Just like before, we can check if the tools work at the command line: # # `bwa` # + [markdown] id="g8cNmgtoA4AU" # The final stage of the initial setup is to create a directory on `/nobackup` for our project work: # # `mkdir -p /nobackup/$USER/vc_pipeline/data/untrimmed_fastq/` # # This will give us a project directory (`vc_pipeline`) our data directory and a directory `untrimmed_fastq/` within that for our source data. # + [markdown] id="xo70LogEBU63" # ## Part 1: Downloading data # # Our first HPC job is to download the data and put it inside this directory we just created. We could do this manually but it's more useful to do this with a `batch job`. # # We have a special `data` queue to do this so we don't clog up the regular queues with non-computational jobs. # + id="nE62GDcTBvCc" # %%writefile 1_download_data.sh # Submission script for download job # <NAME> yyyy-mm-dd # Run from the current directory and with current environment #$ -cwd -V # Ask for some time (hh:mm:ss max of 48:00:00) #$ -l h_rt=01:00:00 # Run in the data queue #$ -P data # Ask for some memory (by default, 1G, without a request) #$ -l h_vmem=2G # Send emails when job starts and ends #$ -m be # Now run the job # cd /nobackup/$USER/vc_pipeline/data/untrimmed_fastq/ curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/004/SRR2589044/SRR2589044_1.fastq.gz curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/004/SRR2589044/SRR2589044_2.fastq.gz curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/003/SRR2584863/SRR2584863_1.fastq.gz curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/003/SRR2584863/SRR2584863_2.fastq.gz curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/006/SRR2584866/SRR2584866_1.fastq.gz curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/006/SRR2584866/SRR2584866_2.fastq.gz # + [markdown] id="0MKpXu7cHGnl" # We can now send this job to the queue to be executed by the **batch scheduler**: # # `qsub download_data.sh` # # and you can check the progress of the job with the command: # # `qstat` # # When it's completed, you'll be able to go and see if the files have been downloaded: # # `ls -l /nobackup/$USER/vc_pipeline/data/untrimmed_fastq/` # + [markdown] id="hguAm6K3WyWv" # At the momnt, the script we wrote is just in our home directory. We should put it inside our project directory in a new `scripts` subdirectory. # # `mkdir /nobackup/$USER/vc_pipeline/scripts/` # `mv download_data.sh /nobackup/$USER/vc_pipeline/scripts/` # + [markdown] id="yrxZS1xfWoMo" # ## Part 2: Analysing quality with fastqc # # # + [markdown] id="9j0Gn0xNX6SW" # Let's create a script using nano to run the `fastqc` part of the pipeline: # # `nano /nobackup/$USER/vc_pipeline/scripts/read_qc.sh` # # `fastqc` can use multiple cores so we can amend our submission script accordingly: # + id="m3Qz6_WZ5X_F" # %%writefile 2_read_qc.sh # Submission script for fastqc job # <NAME> yyyy-mm-dd # Run from the current directory and with current environment #$ -cwd -V # Ask for some time (hh:mm:ss max of 48:00:00) #$ -l h_rt=01:00:00 # Ask for some memory (by default, 1G, without a request) #$ -l h_vmem=4G # Request 4 cores #$ -pe smp 4 # Send emails when job starts and ends #$ -m be # Now run the job module load anaconda source activate varcall # cd /nobackup/$USER/vc_pipeline/data/untrimmed_fastq/ # echo "Running FastQC ..." fastqc *.fastq* # mkdir -p /nobackup/$USER/vc_pipeline/results/fastqc_untrimmed_reads # echo "Saving FastQC results..." # mv *.zip /nobackup/$USER/vc_pipeline/results/fastqc_untrimmed_reads/ # mv *.html /nobackup/$USER/vc_pipeline/results/fastqc_untrimmed_reads/ # cd /nobackup/$USER/vc_pipeline/results/fastqc_untrimmed_reads/ # echo "Unzipping..." for filename in *.zip do unzip $filename done # echo "Saving summary..." # mkdir /nobackup/$USER/vc_pipeline/docs # cat */summary.txt > /nobackup/$USER/vc_pipeline/docs/fastqc_summaries.txt # + [markdown] id="ebABBE6NZo3g" # Now we can submit the job and wait for it to be completed: # # `qsub /nobackup/$USER/vc_pipeline/scripts/read_qc.sh` # # Remembering that `qstat` will show us the progress of the job. # # **NOTE**: At this stage in real life you would need to do some assessment of the quality of your reads to determine the parameters of the `trimmomatic` command. # + [markdown] id="6CXZsUAYc5az" # ## Part 3: Trimming # # Just like in our previous Notebook, we now need to call `trimmomatic` on all of our **untrimmed** data to create the **trimmed** data for the next step. # # Create this file using nano: # # `nano /nobackup/issmcal/vc_pipeline/scripts/run_trims.sh` # + id="Klq8TGIodurs" # %%writefile 3_run_trims.sh # Submission script for trimming job # <NAME> yyyy-mm-dd # Run from the current directory and with current environment #$ -cwd -V # Ask for some time (hh:mm:ss max of 48:00:00) #$ -l h_rt=02:00:00 # Ask for some memory (by default, 1G, without a request) #$ -l h_vmem=4G # Request 4 cores #$ -pe smp 4 # Send emails when job starts and ends #$ -m be # Now run the job # Change to the correct directory # cd /nobackup/$USER/vc_pipeline/data/untrimmed_fastq # Download the Nextera adapter # ! wget https://raw.githubusercontent.com/timflutre/trimmomatic/master/adapters/NexteraPE-PE.fa # Loop over the untrimmed fastq files for infile in *_1.fastq.gz do base=$(basename ${infile} _1.fastq.gz) trimmomatic PE ${infile} ${base}_2.fastq.gz \ ${base}_1.trim.fastq.gz ${base}_1un.trim.fastq.gz \ ${base}_2.trim.fastq.gz ${base}_2un.trim.fastq.gz \ SLIDINGWINDOW:4:20 MINLEN:25 ILLUMINACLIP:NexteraPE-PE.fa:2:40:15 done # move our trimmed FASTQ files to a new subdirectory within our data/ directory # mkdir -p /nobackup/$USER/vc_pipeline/data/trimmed_fastq # mv *.trim* /nobackup/$USER/vc_pipeline/data/trimmed_fastq # + [markdown] id="yj7gwBUaaE4H" # ## Part 4: Automating the rest of the Variant Calling workflow # # We can extend these principles to the entire variant calling workflow. To do this, we will take all of the individual commands that we wrote before, put them into a single file, add variables so that the script knows to iterate through our input files and write to the appropriate output files. # # This is very similar to what we did with our `read_qc.sh` script, but will be a bit more complex. # # Again, using `nano`: # # `nano /nobackup/issmcal/vc_pipeline/scripts/vc_script.sh` # # Submit to the queue with: # # `qsub /nobackup/issmcal/vc_pipeline/scripts/vc_script.sh` # # And watch progress with `qstat` # + colab={"base_uri": "https://localhost:8080/"} id="nOmHip_xEKZa" outputId="f87faf1b-721f-433b-8fe7-e1f6fc062221" # %%writefile 4_vc_script.sh # Submission script for vc workflow job # <NAME> yyyy-mm-dd # Run from the current directory and with current environment #$ -cwd -V # Ask for some time (hh:mm:ss max of 48:00:00) #$ -l h_rt=02:00:00 # Ask for some memory (by default, 1G, without a request) #$ -l h_vmem=4G # Request 4 cores #$ -pe smp 4 # Send emails when job starts and ends #$ -m be # Now run the job module load anaconda source activate varcall # Download the reference genome # mkdir -p /nobackup/$USER/vc_pipeline/data/ref_genome curl -L -o /nobackup/$USER/vc_pipeline/data/ref_genome/ecoli_rel606.fasta.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCA/000/017/985/GCA_000017985.1_ASM1798v1/GCA_000017985.1_ASM1798v1_genomic.fna.gz gunzip /nobackup/$USER/vc_pipeline/data/ref_genome/ecoli_rel606.fasta.gz # cd /nobackup/$USER/vc_pipeline/results genome=/nobackup/$USER/vc_pipeline/data/ref_genome/ecoli_rel606.fasta bwa index $genome # mkdir -p sam bam bcf vcf for fq1 in /nobackup/$USER/vc_pipeline/data/trimmed_fastq/*_1.trim.fastq* do echo "working with file $fq1" base=$(basename $fq1 _1.trim.fastq) echo "base name is $base" fq1=/nobackup/$USER/vc_pipeline/data/trimmed_fastq/${base}_1.trim.fastq fq2=/nobackup/$USER/vc_pipeline/data/trimmed_fastq/${base}_2.trim.fastq sam=/nobackup/$USER/vc_pipeline/results/sam/${base}.aligned.sam bam=/nobackup/$USER/vc_pipeline/results/bam/${base}.aligned.bam sorted_bam=/nobackup/$USER/vc_pipeline/results/bam/${base}.aligned.sorted.bam raw_bcf=/nobackup/$USER/vc_pipeline/results/bcf/${base}_raw.bcf variants=/nobackup/$USER/vc_pipeline/results/bcf/${base}_variants.vcf final_variants=/nobackup/$USER/vc_pipeline/results/vcf/${base}_final_variants.vcf bwa mem $genome $fq1 $fq2 > $sam samtools view -S -b $sam > $bam samtools sort -o $sorted_bam $bam samtools index $sorted_bam bcftools mpileup -O b -o $raw_bcf -f $genome $sorted_bam bcftools call --ploidy 1 -m -v -o $variants $raw_bcf vcfutils.pl varFilter $variants > $final_variants done # + [markdown] id="9KdVLQgdD-e_" # ## Joining scripts together # # We can, in fact, set things up so that one job only starts when the previous one has finished. This means you can avoid having to hang around waiting for one to finish before we set things off for the subsequent job. # # The `-N` means that we can give each part of the job a temporary name to refer back to. # # `-hold_jid` means only start this current job when the specified job has completed. # # We're going to execute: # # `job1(download) -> job2(readqc) -> job3(trim) -> job4(var_call)` # # So, type in all four commands below one after the other. # # `qsub -N download download_data.sh` # # `qsub -N readqc -hold_jid download read_qc.sh` # # `qsub -N trim -hold_jid readqc run_trims.sh` # # `qsub -N var_call -hold_jid trim vc_script.sh` # # You can then log out and go and have a nice cup of tea and the scheduler will run the whole workflow for you. # # You'll get an email when each part has finished and you only need to log back in again when the final stage has finished. # + id="qMVHRWXivTE9"
HPC_6_2_Automating_the_workflow_on_ARC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Create a general MODFLOW model from the NHDPlus dataset # + slideshow={"slide_type": "fragment"} __author__ = '<NAME>' # %matplotlib inline import os import sys import numpy as np import matplotlib.pyplot as plt import scipy.ndimage as nd import pandas as pd import random import gdal from model_specs import * from gen_mod_dict import * from ipywidgets import interact, Dropdown from IPython.display import display # - for key, value in model_dict.items(): md = key ms = model_dict[md] print('trying {}'.format(md)) try: pass except: pass # Project specific variables are imported in the model_spec.py and gen_mod_dict.py files that must be included in the notebook directory. The first first includes pathnames to data sources that will be different for each user. The second file includes a dictionary of model-specific information such as cell size, default hydraulic parameter values, and scenario defintion (e.g. include bedrock, number of layers, etc.). There are examples in the repository. Run the following cell to get a pull-down menu of models in the model_dict. Then, without re-running that cell, run all the remaining cells. Re-running the following cell would re-set the model to the first one in the list, which you probably don't want. If you use the notebook option to run all cells below, it runs the cell you're in, so if you use that option, move to the next cell (below the pull-down menu of models) first. models = list(model_dict.keys()) models.sort() model_area = Dropdown( options=models, description='Model:', background_color='cyan', border_color='black', border_width=2) display(model_area) md = model_area.value ms = model_dict[md] print('The model being processed is {}'.format(md)) # Read model_grid.csv file that was created using first general model notebook model_ws = os.path.join(proj_dir, ms['ws']) model_file = os.path.join(model_ws, 'model_grid.csv') model_grid = pd.read_csv(model_file, na_values=[hnoflo]) if 'obs_grp' in model_grid.columns: model_grid.drop('obs_grp', axis=1, inplace=True) # Get NROW, NCOL from model_grid.csv NROW = model_grid.row.max() + 1 NCOL = model_grid.col.max() + 1 num_cells = NROW * NCOL # This cell makes a new column that contains the percent coarse material (which comes from 'is_coarse' in model_grid.csv') in the local neighborhood of each cell. The user can change the size of the neighborhood, which is a square blcok of cells centered on each cell as it moves, by changing the variable hood_size. # + is_coarse = np.zeros(( NROW, NCOL ), dtype=np.float32) qa = model_grid.qu_atlas.reshape(NROW, NCOL) is_coarse[qa == 1] = 1 is_coarse[qa == 9] = 1 is_coarse[qa == 11] = 1 is_coarse[qa == 17] = 1 # use this number to get broader dist of pct_coarse # this might allow quantiles where otherwise none are possible # this variable is not stored for the next step--only used here for quantiles hood_size = 5 footprint = np.ones((hood_size, hood_size)) / hood_size**2 temp = nd.correlate(is_coarse, footprint,) model_grid['pct_coarse'] = temp.ravel() model_grid.pct_coarse.hist() # - # * Select 'hydro' obs from model_grid # * Put the integer that represents unique reaches into the index # * Groupby the reach integer so that all the cells that belong to a reach are grouped together # * Add labels to identify the quantiles of the median elevation of all the cells for each reach # * Groupby by those quantiles so that all the cells that belong to each quantile are grouped together # * Loop through the rows from the original dataframe and select the rows that belong to the elevation quantile group # * Label each group as they're being looped through and append them for each observation # * The commented-out statement could be used to randomly sample from each group # # + # make additional obs using drain observation in MODFLOW (should be > 0) # pull out drain flows from budget package for first order # also summarize flow at gages sel = pd.DataFrame(model_grid[model_grid.obs_type == 'hydro']) sel.set_index(sel.reach_int, drop=False, inplace=True) num_of_samples = 10 num_of_obs = 5 o1 = sel.groupby(['reach_int']).median() o1['top_quant'], rbins = pd.qcut(o1.top, num_of_obs, retbins=True, labels=False) temp = o1.groupby(['top_quant']) stream_obs = pd.DataFrame() for grp, item in temp: obs = pd.DataFrame(sel.loc[item.index]) obs['obs_grp'] = 'strm_el{}'.format(grp) obs['obs_grp_int'] = grp + 1 stream_obs = pd.concat([stream_obs, obs]) # + # note: possible enhancement is to add within-cell percent coarse num_of_obs = 3 is_topo = model_grid.obs_type == 'topo' try: model_grid.loc[is_topo, 'top_quant'] = pd.qcut(model_grid.top, num_of_obs, labels=[1, 2, 3]) except: pass try: model_grid.loc[is_topo, 'coarse_grp'] = pd.cut(model_grid.pct_coarse, [0.0, 0.1, 0.9, 1.0], include_lowest=True, labels=[1, 2, 3]) except: pass try: mini_mohp = model_grid.dist2str / model_grid.dist2str.max() model_grid.loc[is_topo, 'hypo_quant'] = pd.cut(mini_mohp, [0.0, 0.3333, 0.6666, 1.0], include_lowest=True, labels=[1, 2, 3]) except: pass # - # Each cell saves one of the individual quantiles or quantile-based observation groups as tiff files # + data = model_grid.pct_coarse.reshape(NROW,NCOL) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'pct_coarse.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(-9999) dst = None src = None # + data = model_grid.coarse_grp.reshape(NROW,NCOL) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'coarse_grp.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(255) dst = None src = None # + data = model_grid.hypo_quant.reshape(NROW,NCOL) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'hypo_quant.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(255) dst = None src = None # + data = model_grid.top_quant.reshape(NROW,NCOL) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'top_quant.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(255) dst = None src = None # + blank = np.zeros((num_cells)) blank[stream_obs.node_num.values] = stream_obs.obs_grp_int data = blank.reshape((NROW,NCOL)) src_pth = os.path.join(model_ws, 'ibound.tif') src = gdal.Open(src_pth) dst_pth = os.path.join(model_ws, 'stream_obs.tif') driver = gdal.GetDriverByName('GTiff') dst = driver.CreateCopy(dst_pth, src, 0) band = dst.GetRasterBand(1) band.WriteArray(data) band.SetNoDataValue(0) dst = None src = None
general-models/General_Model_2_Generate_Observations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Creating CNN Using Scratch And Transfer Learning # # + # import the libraries as shown below from tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Conv2D from tensorflow.keras.models import Model from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img from tensorflow.keras.models import Sequential import numpy as np from glob import glob import matplotlib.pyplot as plt # + # re-size all the images to this IMAGE_SIZE = [224, 224] train_path = 'Dataset/Train' valid_path = 'Dataset/Test' # + # Import the Vgg 16 library as shown below and add preprocessing layer to the front of VGG # Here we will be using imagenet weights vgg19 = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) # - vgg19.summary() # don't train existing weights for layer in vgg19.layers: layer.trainable = False # useful for getting number of output classes folders = glob('Dataset/Train/*') folders # our layers - you can add more if you want x = Flatten()(vgg19.output) # + prediction = Dense(len(folders), activation='softmax')(x) # create a model object model = Model(inputs=vgg19.input, outputs=prediction) # + # view the structure of the model model.summary() # - from tensorflow.keras.layers import MaxPooling2D ### Create Model from scratch using CNN model=Sequential() model.add(Conv2D(filters=16,kernel_size=2,padding="same",activation="relu",input_shape=(224,224,3))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=32,kernel_size=2,padding="same",activation ="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(500,activation="relu")) model.add(Dense(2,activation="softmax")) model.summary() # tell the model what cost and optimization method to use model.compile( loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) # + # Use the Image Data Generator to import the images from the dataset from tensorflow.keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) # - # Make sure you provide the same target size as initialied for the image size training_set = train_datagen.flow_from_directory('Dataset/Train', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') training_set test_set = test_datagen.flow_from_directory('Dataset/Test', target_size = (224, 224), batch_size = 32, class_mode = 'categorical') # fit the model # Run the cell. It will take some time to execute r = model.fit( training_set, validation_data=test_set, epochs=10, steps_per_epoch=len(training_set), validation_steps=len(test_set) ) # + # plot the loss plt.plot(r.history['loss'], label='train loss') plt.plot(r.history['val_loss'], label='val loss') plt.legend() plt.show() plt.savefig('LossVal_loss') # plot the accuracy plt.plot(r.history['accuracy'], label='train acc') plt.plot(r.history['val_accuracy'], label='val acc') plt.legend() plt.show() plt.savefig('AccVal_acc') # + # save it as a h5 file from tensorflow.keras.models import load_model model.save('model_vgg19.h5') # - # + y_pred = model.predict(test_set) # - y_pred import numpy as np y_pred = np.argmax(y_pred, axis=1) y_pred from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image model=load_model('model_vgg19.h5') img=image.load_img('Dataset/Test/Uninfected/2.png',target_size=(224,224)) # + img=image.load_img('Dataset/Train/Parasite/2.png',target_size=(224,224)) # - x=image.img_to_array(img) x x.shape x=x/255 x=np.expand_dims(x,axis=0) img_data=preprocess_input(x) img_data.shape model.predict(img_data) a=np.argmax(model.predict(img_data), axis=1) if(a==1): print("Uninfected") else: print("Infected")
CNN And Transfer LEarning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import tensorflow as tf import numpy as np import tensorflow_datasets as tfds # # ## Import the `aiproteomics` python package import aiproteomics # ## Load a ready-to-use dataset derived from the DeepDIA model # It's now possible to load (remote) proteomics datasets defined in the package, with all preprocessing carried out automatically. ds, ds_info = tfds.load('aiproteomicshela1', as_supervised=True, with_info=True) # Let's look at the info for the 'hela1' dataset we have just fetched: print(ds_info) # ## Build a transformer model for retention time prediction # The `aiproteomics` package will also contains quick methods for obtaining common retention time models. Currently we can build a transformer for learning retention time. model = aiproteomics.rt.models.build_rt_transformer_model( num_layers = 6, # number of layers, paper = 6 d_model = 512, num_heads = 8, # Number of attention heads, paper = 8 d_ff = 2048, # Hidden layer size in feed forward network inside transformer, paper = 2048 dropout_rate = 0.1, # vocab_size = 22, # number of aminoacids max_len = 50 # maximal peptide length ) model.summary() import tensorflow as tf tf.keras.utils.plot_model(model, show_shapes=True) # ## Train the transformer model on our dataset learning_rate = aiproteomics.models.CustomSchedule(512) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) model.compile(optimizer=optimizer, loss=tf.keras.losses.MeanAbsoluteError()) # + tags=[] history = model.fit( ds['train'], batch_size=1024, epochs=1, validation_data=ds['validate'], callbacks = [] ) # - # ## Check performance of model against validation data # + tags=[] score_train = model.evaluate(ds['train'], verbose=0) score_val = model.evaluate(ds['validate'], verbose=0) score = model.evaluate(ds['test'], verbose=0) print(score_train, score_val, "->", score) # - # ## Prosit1 fragmentation model model = aiproteomics.frag.models.build_prosit1_model() tf.keras.utils.plot_model(model, show_shapes=True) # ## Fragmentation model model = aiproteomics.frag.models.build_frag_transformer_model( num_layers = 6, # number of layers d_model = 512, num_heads = 8, # Number of attention heads d_ff = 2048, # Hidden layer size in feed forward network inside transformer dropout_rate = 0.1, # vocab_size = 22, # number of aminoacids max_len = 50 # maximal peptide length ) tf.keras.utils.plot_model(model, show_shapes=True)
demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # #!/usr/bin/python3 # - #from collections import Counter #import re #import os import time from collections import defaultdict #from collections import deque date = 25 dev = 0 # extra prints part = 1 # 1,2, or 3 for both # 0 or 1: samp = 0 print("https://adventofcode.com/2021/day/{}".format(date)) # + [markdown] tags=[] # ## Read the input data # + #time0 = time.time() if samp == 1: filename = "/sample.txt" else: filename = "/input.txt" try: with open(str(date) + filename,"r") as f: t = f.readlines() except FileNotFoundError: with open("." + filename,"r") as f: t = f.readlines() t = [(x.strip().replace(' ',' ')) for x in t] #t = [int(x) for x in t] # - # ## Part one def step_east(sea): newsea = defaultdict(lambda: ".") #check east for r in range(rows): for c in range(cols): if sea[(r,c)] != ">": continue # next col is +1, unless col is the right-most nc = c+1 if c != cols-1 else 0 east = sea[(r,nc)] if east == ".": newsea[(r,nc)] = ">" newsea[(r,c)] = "." for k in newsea.keys(): #print(k, newsea[k]) sea[k] = newsea[k] return (sea, len(newsea.keys()) ) def step_south(sea): newsea = defaultdict(lambda: ".") #check south for r in range(rows): for c in range(cols): if sea[(r,c)] != "v": continue # next col is +1, unless col is the right-most nr = r+1 if r != rows-1 else 0 south = sea[(nr,c)] if south == ".": newsea[(nr,c)] = "v" newsea[(r,c)] = "." for k in newsea.keys(): #print(k, newsea[k]) sea[k] = newsea[k] return (sea, len(newsea.keys()) ) def print_sc(sea): print(" ") for r in range(rows): pr = "" for c in range(cols): pr += sea[(r,c)] print(pr) return 0 # + tags=[] cols = len(t[0]) rows = len(t) def day(te): sc = defaultdict(lambda: ".") for r in range(rows): for c in range(cols): sc[(r,c)] = te[r][c] if samp: for t in te: print(t) print("\t-x-x-x-x-") s = 0 while 1: s += 1 sc, newe = step_east(sc) sc, news = step_south(sc) if (newe == 0) and (news == 0): print(":), s={}".format(s)) if samp: print_sc(sc) break #print_sc(sc) if s > 70000: print(":(, s={}".format(s)) break return s #day(t) # - # ## Part two # + tags=[] def day2(te): return 2 day2(t) # - # ## Run the programs if 1: time0 = time.time() if part == 1: print("Part 1: ", day(t)) elif part == 2: print("Part 2: ", day2(t)) elif part == 3: #run both print("Part 1: ", day(t)) print("Part 2: ", day2(t)) tdif = time.time() - time0 print("Elapsed time: {:.4f} s".format(tdif))
25/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import scipy.stats as st import statsmodels.api as sm import pingouin as pg import statsmodels.stats.outliers_influence as sms from functools import reduce import warnings warnings.filterwarnings('ignore') # # Regression # ### i) Simple Regression # # #### Outcome = Model + Error # #### Sum of Squares, SSt, SSm , SSr data = pd.read_csv('/home/atrides/Desktop/R/statistics_with_Python/07_Regression/Data_Files/Album Sales 1.dat', sep='\t') print(data.head()) _ = sns.lmplot(x='adverts', y='sales', data=data) plt.show() model = sm.OLS.from_formula('sales ~ adverts',data=data) res = model.fit() print(res.summary()) # ### ii) Multiple Regression df = pd.read_csv('/home/atrides/Desktop/R/statistics_with_Python/07_Regression/Data_Files/Album Sales 2.dat', sep='\t') print(df.head()) # with one predictor variable model_1 = sm.OLS.from_formula("sales~adverts", data=df) res_1 = model_1.fit() print(res_1.summary()) # with all predictor variables model_2 = sm.OLS.from_formula("sales~adverts+airplay+attract", data=df) res_2 = model_2.fit() print(res_2.summary()) # ##### see the inrcrease in R^2 and Adjusted R^2 in model_2 w.r.t model_1 # ### Standarized regression coeffecients (beta coeffecients) # ##### refers to how many standard deviations a dependent variable will change, per standard deviation increase in the predictor variable. df_ = pd.DataFrame() df_['adverts'] = (df['adverts']-df['adverts'].mean())/df['adverts'].std() df_['airplay'] = (df['airplay']-df['adverts'].mean())/df['airplay'].std() df_['attract'] = (df['attract']-df['adverts'].mean())/df['attract'].std() df_['sales'] = (df['sales']-df['adverts'].mean())/df['sales'].std() model = sm.OLS.from_formula("sales~adverts+airplay+attract", data=df_) res = model.fit() print(res.summary()) # ### Comparing models using python # + from statsmodels.formula.api import ols from statsmodels.stats.anova import anova_lm m01 = ols('sales~adverts', data=df).fit() m02 = ols('sales~adverts+airplay+attract', data=df).fit() anovaResults = anova_lm(m01, m02) print(anovaResults) # - # ### Outliers and Influential cases # #### references # # https://www.statsmodels.org/stable/generated/statsmodels.stats.outliers_influence.OLSInfluence.html#statsmodels.stats.outliers_influence.OLSInfluence # # https://www.statsmodels.org/dev/generated/statsmodels.regression.linear_model.OLS.html # # https://stackoverflow.com/questions/46304514/access-standardized-residuals-cooks-values-hatvalues-leverage-etc-easily-i # # https://www.geeksforgeeks.org/reduce-in-python/ summary_frame = sms.OLSInfluence(m02).summary_frame() summary_frame .head() summary_frame = summary_frame[['cooks_d','standard_resid', 'student_resid', 'hat_diag' ]] summary_frame.head() resid = pd.DataFrame(df['sales'] - m02.fittedvalues) resid.columns = ['residual'] dfbeta = pd.DataFrame(pd.DataFrame(sms.OLSInfluence(m02).dfbeta)[0]) dfbeta.columns = ['dfbeta'] df_ = [df, resid, summary_frame, dfbeta] final_summary = reduce(lambda left,right: pd.merge(left,right, left_index=True, right_index=True), df_) final_summary.head() cov_ratio = pd.DataFrame(sms.OLSInfluence(m02).cov_ratio) cov_ratio.columns = ['cov_ratio'] # these cases have somewhat large residuals large_resid = final_summary[(final_summary['standard_resid']>=2) | (final_summary['standard_resid']<=-2)] large_resid = pd.merge(large_resid, cov_ratio, how = 'left', right_index=True, left_index=True) large_resid # + # now let's look at cooks distance, leverage, covariance Ratio for these cases k = 3 #number of predictors n = 200 #number of objervations average_leverage = (k+1)/n print(average_leverage) # + cvr_limit_high = 1+3*average_leverage cvr_limit_low = 1-3*average_leverage print(cvr_limit_low, cvr_limit_high) # - # #### from this large residual model we conclude that # #### Most of our 12 potential outliers have CVR values within or just outside the boundaries. # #### none of them has a Cook’s distance greater than 1, so none of the cases is having an undue influence on the model. # # # #### So , Note: # # #### i) Look at standardized residuals and check that no more than 5% of cases have absolute values above 2, # #### and that no more than about 1% have absolute values above 2.5. Any case with a value above about 3 could be an outlier. # # #### ii)Look at the values of Cook’s distance: any value above 1 indicates a case that might be influencing the model. # # #### iii)Calculate the average leverage (the number of predictors plus 1, divided by the sample size) # #### and then look for values greater than twice or three times this average value # # #### iv)Calculate the upper and lower limit of acceptable values for the covariance ratio, CVR. # #### The upper limit is 1 plus three times the average leverage, whereas # #### the lower limit is 1 minus three times the average leverage. # #### Cases that have a CVR falling outside these limits may be problematic # ## Testing Various Assumptions # ### i) Assumptions of Independent Errors # + from statsmodels.stats.stattools import durbin_watson print(durbin_watson(m02.resid)) # The closer to 2 that the value is, the better, and for these data the value is 1.950, # which is so close to 2 that the assumption has almost certainly been met. # - # ### ii) Assumption of no multicollinearity # + from statsmodels.tools.tools import add_constant from statsmodels.stats.outliers_influence import variance_inflation_factor df_ = add_constant(df) df_.drop(['sales'], inplace=True,axis=1) # dropping Dependent variable # - vif = pd.Series([variance_inflation_factor(df_.values, i) for i in range(1, df_.shape[1])], index=df_.columns[1:]) print(vif) avg_vif = np.mean(vif) print(avg_vif) tolerance = 1/vif print(tolerance) # ##### the assumption of multicollinearity is followed too # ### iii) Assumption about the Residuals prediction = pd.DataFrame(m02.fittedvalues) prediction.columns = ['predicted'] prediction['standarized_prediction'] = (prediction['predicted']-prediction['predicted'].mean())/prediction['predicted'].std() final_summary.head() _ = sns.scatterplot(x= final_summary['standard_resid'], y = prediction['standarized_prediction'] ) _ = plt.axhline(y=0) plt.show() _ = pg.qqplot(final_summary['standard_resid']) plt.show() fig,ax = plt.subplots(figsize=(6, 4)) ax = plt.hist(final_summary['student_resid'],density=True,bins=30, edgecolor='black', linewidth=1.4) plt.xlabel('student_resid', fontsize=14) plt.show() # ##### this assumption was also met
Python/statistics_with_Python/07_Regression/Markdown_notebook/01_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Ic4_occAAiAT" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" id="ioaprt5q5US7" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + cellView="form" id="yCl0eTNH5RS3" #@title MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # + [markdown] id="ItXfxkxvosLH" # # Basic text classification # + [markdown] id="hKY4XMc9o8iB" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/text_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/keras/text_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="Eg62Pmz3o83v" # This tutorial demonstrates text classification starting from plain text files stored on disk. You'll train a binary classifier to perform sentiment analysis on an IMDB dataset. At the end of the notebook, there is an exercise for you to try, in which you'll train a multiclass classifier to predict the tag for a programming question on Stack Overflow. # # + id="8RZOuS9LWQvv" import matplotlib.pyplot as plt import os import re import shutil import string import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import losses from tensorflow.keras import preprocessing from tensorflow.keras.layers.experimental.preprocessing import TextVectorization # + id="6-tTFS04dChr" print(tf.__version__) # + [markdown] id="NBTI1bi8qdFV" # ## Sentiment analysis # # This notebook trains a sentiment analysis model to classify movie reviews as *positive* or *negative*, based on the text of the review. This is an example of *binary*—or two-class—classification, an important and widely applicable kind of machine learning problem. # # You'll use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. # # + [markdown] id="iAsKG535pHep" # ### Download and explore the IMDB dataset # # Let's download and extract the dataset, then explore the directory structure. # + id="k7ZYnuajVlFN" url = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz" dataset = tf.keras.utils.get_file("aclImdb_v1.tar.gz", url, untar=True, cache_dir='.', cache_subdir='') dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb') # + id="355CfOvsV1pl" os.listdir(dataset_dir) # + id="7ASND15oXpF1" train_dir = os.path.join(dataset_dir, 'train') os.listdir(train_dir) # + [markdown] id="ysMNMI1CWDFD" # The `aclImdb/train/pos` and `aclImdb/train/neg` directories contain many text files, each of which is a single movie review. Let's take a look at one of them. # + id="R7g8hFvzWLIZ" sample_file = os.path.join(train_dir, 'pos/1181_9.txt') with open(sample_file) as f: print(f.read()) # + [markdown] id="Mk20TEm6ZRFP" # ### Load the dataset # # Next, you will load the data off disk and prepare it into a format suitable for training. To do so, you will use the helpful [text_dataset_from_directory](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/text_dataset_from_directory) utility, which expects a directory structure as follows. # # ``` # main_directory/ # ...class_a/ # ......a_text_1.txt # ......a_text_2.txt # ...class_b/ # ......b_text_1.txt # ......b_text_2.txt # ``` # + [markdown] id="nQauv38Lnok3" # To prepare a dataset for binary classification, you will need two folders on disk, corresponding to `class_a` and `class_b`. These will be the positive and negative movie reviews, which can be found in `aclImdb/train/pos` and `aclImdb/train/neg`. As the IMDB dataset contains additional folders, you will remove them before using this utility. # + id="VhejsClzaWfl" remove_dir = os.path.join(train_dir, 'unsup') shutil.rmtree(remove_dir) # + [markdown] id="95kkUdRoaeMw" # Next, you will use the `text_dataset_from_directory` utility to create a labeled `tf.data.Dataset`. [tf.data](https://www.tensorflow.org/guide/data) is a powerful collection of tools for working with data. # # When running a machine learning experiment, it is a best practice to divide your dataset into three splits: [train](https://developers.google.com/machine-learning/glossary#training_set), [validation](https://developers.google.com/machine-learning/glossary#validation_set), and [test](https://developers.google.com/machine-learning/glossary#test-set). # # The IMDB dataset has already been divided into train and test, but it lacks a validation set. Let's create a validation set using an 80:20 split of the training data by using the `validation_split` argument below. # + id="nOrK-MTYaw3C" batch_size = 32 seed = 42 raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory( 'aclImdb/train', batch_size=batch_size, validation_split=0.2, subset='training', seed=seed) # + [markdown] id="5Y33oxOUpYkh" # As you can see above, there are 25,000 examples in the training folder, of which you will use 80% (or 20,000) for training. As you will see in a moment, you can train a model by passing a dataset directly to `model.fit`. If you're new to `tf.data`, you can also iterate over the dataset and print out a few examples as follows. # + id="51wNaPPApk1K" for text_batch, label_batch in raw_train_ds.take(1): for i in range(3): print("Review", text_batch.numpy()[i]) print("Label", label_batch.numpy()[i]) # + [markdown] id="JWq1SUIrp1a-" # Notice the reviews contain raw text (with punctuation and occasional HTML tags like `<br/>`). You will show how to handle these in the following section. # # The labels are 0 or 1. To see which of these correspond to positive and negative movie reviews, you can check the `class_names` property on the dataset. # # + id="MlICTG8spyO2" print("Label 0 corresponds to", raw_train_ds.class_names[0]) print("Label 1 corresponds to", raw_train_ds.class_names[1]) # + [markdown] id="pbdO39vYqdJr" # Next, you will create a validation and test dataset. You will use the remaining 5,000 reviews from the training set for validation. # + [markdown] id="SzxazN8Hq1pF" # Note: When using the `validation_split` and `subset` arguments, make sure to either specify a random seed, or to pass `shuffle=False`, so that the validation and training splits have no overlap. # + id="JsMwwhOoqjKF" raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory( 'aclImdb/train', batch_size=batch_size, validation_split=0.2, subset='validation', seed=seed) # + id="rdSr0Nt3q_ns" raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory( 'aclImdb/test', batch_size=batch_size) # + [markdown] id="kDA_Lu2PoGyP" # Note: The Preprocessing APIs used in the following section are experimental in TensorFlow 2.3 and subject to change. # + [markdown] id="qJmTiO0IYAjm" # ### Prepare the dataset for training # # Next, you will standardize, tokenize, and vectorize the data using the helpful `preprocessing.TextVectorization` layer. # # Standardization refers to preprocessing the text, typically to remove punctuation or HTML elements to simplify the dataset. Tokenization refers to splitting strings into tokens (for example, splitting a sentence into individual words, by splitting on whitespace). Vectorization refers to converting tokens into numbers so they can be fed into a neural network. All of these tasks can be accomplished with this layer. # # As you saw above, the reviews contain various HTML tags like `<br />`. These tags will not be removed by the default standardizer in the `TextVectorization` layer (which converts text to lowecase and strips punctuation by default, but doesn't strip HTML). You will write a custom standardization function to remove the HTML. # + [markdown] id="ZVcHl-SLrH-u" # Note: to prevent [train/test skew](https://developers.google.com/machine-learning/guides/rules-of-ml#training-serving_skew) (also know as train/serving skew), it is important to preprocess the data identically at train and test time. To facilitate this, the `TextVectorization` layer can be included directly inside your model, as shown later in this tutorial. # + id="SDRI_s_tX1Hk" def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ') return tf.strings.regex_replace(stripped_html, '[%s]' % re.escape(string.punctuation), '') # + [markdown] id="d2d3Aw8dsUux" # Next, you will create a `TextVectorization` layer. you will use this layer to standardize, tokenize, and vectorize our data. You set the `output_mode` to `int` to create unique integer indices for each token. # # Note that you're using the default split function, and the custom standardization function you defined above. You'll also define some constants for the model, like an explicit maximum `sequence_length`, which will cause the layer to pad or truncate sequences to exactly `sequence_length` values. # + id="-c76RvSzsMnX" max_features = 10000 sequence_length = 250 vectorize_layer = TextVectorization( standardize=custom_standardization, max_tokens=max_features, output_mode='int', output_sequence_length=sequence_length) # + [markdown] id="vlFOpfF6scT6" # Next, you will call `adapt` to fit the state of the preprocessing layer to the dataset. This will cause the model to build an index of strings to integers. # + [markdown] id="lAhdjK7AtroA" # Note: it's important to only use your training data when calling adapt (using the test set would leak information). # + id="GH4_2ZGJsa_X" # Make a text-only dataset (without labels), then call adapt train_text = raw_train_ds.map(lambda x, y: x) vectorize_layer.adapt(train_text) # + [markdown] id="SHQVEFzNt-K_" # Let's create a function to see the result of using this layer to preprocess some data. # + id="SCIg_T50wOCU" def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label # + id="XULcm6B3xQIO" # retrieve a batch (of 32 reviews and labels) from the dataset text_batch, label_batch = next(iter(raw_train_ds)) first_review, first_label = text_batch[0], label_batch[0] print("Review", first_review) print("Label", raw_train_ds.class_names[first_label]) print("Vectorized review", vectorize_text(first_review, first_label)) # + [markdown] id="6u5EX0hxyNZT" # As you can see above, each token has been replaced by an integer. You can lookup the token (string) that each integer corresponds to by calling `.get_vocabulary()` on the layer. # + id="kRq9hTQzhVhW" print("1287 ---> ",vectorize_layer.get_vocabulary()[1287]) print(" 313 ---> ",vectorize_layer.get_vocabulary()[313]) print('Vocabulary size: {}'.format(len(vectorize_layer.get_vocabulary()))) # + [markdown] id="XD2H6utRydGv" # You are nearly ready to train your model. As a final preprocessing step, you will apply the TextVectorization layer you created earlier to the train, validation, and test dataset. # + id="2zhmpeViI1iG" train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) # + [markdown] id="YsVQyPMizjuO" # ### Configure the dataset for performance # # These are two important methods you should use when loading data to make sure that I/O does not become blocking. # # `.cache()` keeps data in memory after it's loaded off disk. This will ensure the dataset does not become a bottleneck while training your model. If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache, which is more efficient to read than many small files. # # `.prefetch()` overlaps data preprocessing and model execution while training. # # You can learn more about both methods, as well as how to cache data to disk in the [data performance guide](https://www.tensorflow.org/guide/data_performance). # + id="wMcs_H7izm5m" AUTOTUNE = tf.data.experimental.AUTOTUNE train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE) test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) # + [markdown] id="LLC02j2g-llC" # ### Create the model # # It's time to create our neural network: # + id="dkQP6in8yUBR" embedding_dim = 16 # + id="xpKOoWgu-llD" model = tf.keras.Sequential([ layers.Embedding(max_features + 1, embedding_dim), layers.Dropout(0.2), layers.GlobalAveragePooling1D(), layers.Dropout(0.2), layers.Dense(1)]) model.summary() # + [markdown] id="6PbKQ6mucuKL" # The layers are stacked sequentially to build the classifier: # # 1. The first layer is an `Embedding` layer. This layer takes the integer-encoded reviews and looks up an embedding vector for each word-index. These vectors are learned as the model trains. The vectors add a dimension to the output array. The resulting dimensions are: `(batch, sequence, embedding)`. To learn more about embeddings, see the [word embedding tutorial](../text/word_embeddings.ipynb). # 2. Next, a `GlobalAveragePooling1D` layer returns a fixed-length output vector for each example by averaging over the sequence dimension. This allows the model to handle input of variable length, in the simplest way possible. # 3. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units. # 4. The last layer is densely connected with a single output node. # + [markdown] id="L4EqVWg4-llM" # ### Loss function and optimizer # # A model needs a loss function and an optimizer for training. Since this is a binary classification problem and the model outputs a probability (a single-unit layer with a sigmoid activation), you'll use `losses.BinaryCrossentropy` loss function. # # Now, configure the model to use an optimizer and a loss function: # + id="Mr0GP-cQ-llN" model.compile(loss=losses.BinaryCrossentropy(from_logits=True), optimizer='adam', metrics=tf.metrics.BinaryAccuracy(threshold=0.0)) # + [markdown] id="35jv_fzP-llU" # ### Train the model # # You will train the model by passing the `dataset` object to the fit method. # + id="tXSGrjWZ-llW" epochs = 10 history = model.fit( train_ds, validation_data=val_ds, epochs=epochs) # + [markdown] id="9EEGuDVuzb5r" # ### Evaluate the model # # Let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy. # + id="zOMKywn4zReN" loss, accuracy = model.evaluate(test_ds) print("Loss: ", loss) print("Accuracy: ", accuracy) # + [markdown] id="z1iEXVTR0Z2t" # This fairly naive approach achieves an accuracy of about 86%. # + [markdown] id="ldbQqCw2Xc1W" # ### Create a plot of accuracy and loss over time # # `model.fit()` returns a `History` object that contains a dictionary with everything that happened during training: # + id="-YcvZsdvWfDf" history_dict = history.history history_dict.keys() # + [markdown] id="1_CH32qJXruI" # There are four entries: one for each monitored metric during training and validation. You can use these to plot the training and validation loss for comparison, as well as the training and validation accuracy: # + id="2SEMeQ5YXs8z" acc = history_dict['binary_accuracy'] val_acc = history_dict['val_binary_accuracy'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) # "bo" is for "blue dot" plt.plot(epochs, loss, 'bo', label='Training loss') # b is for "solid blue line" plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # + id="Z3PJemLPXwz_" plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.show() # + [markdown] id="hFFyCuJoXy7r" # In this plot, the dots represent the training loss and accuracy, and the solid lines are the validation loss and accuracy. # # Notice the training loss *decreases* with each epoch and the training accuracy *increases* with each epoch. This is expected when using a gradient descent optimization—it should minimize the desired quantity on every iteration. # # This isn't the case for the validation loss and accuracy—they seem to peak before the training accuracy. This is an example of overfitting: the model performs better on the training data than it does on data it has never seen before. After this point, the model over-optimizes and learns representations *specific* to the training data that do not *generalize* to test data. # # For this particular case, you could prevent overfitting by simply stopping the training when the validation accuracy is no longer increasing. One way to do so is to use the [EarlyStopping callback](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping?version=nightly). # + [markdown] id="-to23J3Vy5d3" # ## Export the model # # In the code above, you applied the `TextVectorization` layer to the dataset before feeding text to the model. If you want to make your model capable of processing raw strings (for example, to simplify deploying it), you can include the `TextVectorization` layer inside your model. To do so, you can create a new model using the weights you just trained. # + id="FWXsMvryuZuq" export_model = tf.keras.Sequential([ vectorize_layer, model, layers.Activation('sigmoid') ]) export_model.compile( loss=losses.BinaryCrossentropy(from_logits=False), optimizer="adam", metrics=['accuracy'] ) # Test it with `raw_test_ds`, which yields raw strings loss, accuracy = export_model.evaluate(raw_test_ds) print(accuracy) # + [markdown] id="TwQgoN88LoEF" # ### Inference on new data # # To get predictions for new examples, you can simply call `model.predict()`. # + id="QW355HH5L49K" examples = [ "The movie was great!", "The movie was okay.", "The movie was terrible..." ] export_model.predict(examples) # + [markdown] id="MaxlpFWpzR6c" # Including the text preprocessing logic inside your model enables you to export a model for production that simplifies deployment, and reduces the potential for [train/test skew](https://developers.google.com/machine-learning/guides/rules-of-ml#training-serving_skew). # # There is a performance difference to keep in mind when choosing where to apply your TextVectorization layer. Using it outside of your model enables you to do asynchronous CPU processing and buffering of your data when training on GPU. So, if you're training your model on the GPU, you probably want to go with this option to get the best performance while developing your model, then switch to including the TextVectorization layer inside your model when you're ready to prepare for deployment. # # Visit this [tutorial](https://www.tensorflow.org/tutorials/keras/save_and_load) to learn more about saving models. # + [markdown] id="eSSuci_6nCEG" # ## Exercise: multiclass classification on Stack Overflow questions # # This tutorial showed how to train a binary classifier from scratch on the IMDB dataset. As an exercise, you can modify this notebook to train a multiclass classifier to predict the tag of a programming question on [Stack Overflow](http://stackoverflow.com/). # # We have prepared a [dataset](http://storage.googleapis.com/download.tensorflow.org/data/stack_overflow_16k.tar.gz) for you to use containing the body of several thousand programming questions (for example, "How can sort a dictionary by value in Python?") posted to Stack Overflow. Each of these is labeled with exactly one tag (either Python, CSharp, JavaScript, or Java). Your task is to take a question as input, and predict the appropriate tag, in this case, Python. # # The dataset you will work with contains several thousand questions extracted from the much larger public Stack Overflow dataset on [BigQuery](https://console.cloud.google.com/marketplace/details/stack-exchange/stack-overflow), which contains more than 17 million posts. # # After downloading the dataset, you will find it has a similar directory structure to the IMDB dataset you worked with previously: # # ``` # train/ # ...python/ # ......0.txt # ......1.txt # ...javascript/ # ......0.txt # ......1.txt # ...csharp/ # ......0.txt # ......1.txt # ...java/ # ......0.txt # ......1.txt # ``` # # Note: to increase the difficulty of the classification problem, we have replaced any occurences of the words Python, CSharp, JavaScript, or Java in the programming questions with the word *blank* (as many questions contain the language they're about). # # To complete this exercise, you should modify this notebook to work with the Stack Overflow dataset by making the following modifications: # # 1. At the top of your notebook, update the code that downloads the IMDB dataset with code to download the [Stack Overflow dataset](http://storage.googleapis.com/download.tensorflow.org/data/stack_overflow_16k.tar.gz) we have prepreared. As the Stack Overflow dataset has a similar directory structure, you will not need to make many modifications. # # 1. Modify the last layer of your model to read `Dense(4)`, as there are now four output classes. # # 1. When you compile your model, change the loss to [SparseCategoricalCrossentropy](https://www.tensorflow.org/api_docs/python/tf/keras/losses/SparseCategoricalCrossentropy?version=nightly). This is the correct loss function to use for a multiclass classification problem, when the labels for each class are integers (in our case, they can be 0, *1*, *2*, or *3*). # # 1. Once these changes are complete, you will be able to train a multiclass classifier. # # If you get stuck, you can find a solution [here](https://github.com/tensorflow/examples/blob/master/community/en/text_classification_solution.ipynb). # # + [markdown] id="F0T5SIwSm7uc" # ## Learning more # # This tutorial introduced text classification from scratch. To learn more about the text classification workflow in general, we recommend reading [this guide](https://developers.google.com/machine-learning/guides/text-classification/) from Google Developers. #
site/en/tutorials/keras/text_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import matplotlib.pyplot as plt # %matplotlib inline from IPython.display import set_matplotlib_formats import os import numpy as np set_matplotlib_formats('jpg') plt.style.use('ggplot') # + ''' ---------------------------------------------- SUPPORT FUNCTION ---------------------------------------------- ''' def get_log_data(path, n = 1): ''' From log get mAP and each object AP Parameter: path: log dir name n: default is 1 which is mAP ''' root_path = '/home/sparrow/mx-rcnn/logs/{dirs}'.format(dirs = path) mAP = [] for i in range(20): log_path = os.path.join(root_path,str(i+1)+'.txt') try: with open(log_path, 'r') as f: data = [i.strip() for i in f] mAP.append(float(data[-n][-7:])) except: mAP.append(mAP[-1]) return mAP def plot_log(data, ylim_min = 0.2,ylim_max = 0.9,title = None, legend = None, max_epoch = 20): fig = plt.figure(figsize=(6,4), dpi=200) x = list(range(1, max_epoch+1)) for i in data: plt.plot(x, i ) plt.scatter(x, i) if(legend is not None): plt.legend(legend) if(title is not None): plt.title(title) plt.xlabel('$EPOCH$') plt.ylabel('$mAP$') plt.xticks(range(1,max_epoch+1)) plt.yticks(np.arange(ylim_min,ylim_max,0.05)) plt.ylim(ylim_min, ylim_max) plt.xlim(0, max_epoch+1) # - # - data from uav & circle mAP = get_log_data('train_test_res_uav', 1) circle = get_log_data('train_test_res_uav', 2) uav = get_log_data('train_test_res_uav', 3) plot_log(data= [mAP, circle, uav], legend = ['mAP','Circle','UAV'], title = '$Faster$ $RCNN$ (Resnet-101)', ylim_min = 0.2, ylim_max = 0.9, max_epoch = 20) vgg_roi = get_log_data('test_vgg_ohem_deformable_roi_pooling') vgg_ohem = get_log_data('test_vgg_ohem') vgg_focal = get_log_data('test_vgg_focal') vgg_std = get_log_data('test_vgg_std') vgg_std_2048 = get_log_data('test_vgg_std_2048') plot_log(data= [vgg_std, vgg_ohem, vgg_roi, vgg_focal, vgg_std_2048], legend = ['STD','OHEM','Deformable ROI Pooling','Focal','STD - ROI BATCH 2048'], title = '$Faster$ $RCNN$ $on$ $VOC2007$ (VGG 16)', ylim_min = 0.3, ylim_max = 0.7, max_epoch = 20)
log_analysis.ipynb
# --- # jupyter: # jupytext: # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide #skip ! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab # + # default_exp callback.core # - #export from fastai.data.all import * from fastai.optimizer import * #hide from nbdev.showdoc import * #export _all_ = ['CancelFitException', 'CancelEpochException', 'CancelTrainException', 'CancelValidException', 'CancelBatchException'] # # Callback # # > Basic callbacks for Learner # ## Events # Callbacks can occur at any of these times:: *after_create before_fit before_epoch before_train before_batch after_pred after_loss before_backward after_backward after_step after_cancel_batch after_batch after_cancel_train after_train before_validate after_cancel_validate after_validate after_cancel_epoch after_epoch after_cancel_fit after_fit*. # + # export _events = L.split('after_create before_fit before_epoch before_train before_batch after_pred after_loss \ before_backward after_backward after_step after_cancel_batch after_batch after_cancel_train \ after_train before_validate after_cancel_validate after_validate after_cancel_epoch \ after_epoch after_cancel_fit after_fit') mk_class('event', **_events.map_dict(), doc="All possible events as attributes to get tab-completion and typo-proofing") # - # export _all_ = ['event'] show_doc(event, name='event', title_level=3) # To ensure that you are referring to an event (that is, the name of one of the times when callbacks are called) that exists, and to get tab completion of event names, use `event`: test_eq(event.after_backward, 'after_backward') # ## Callback - #export _inner_loop = "before_batch after_pred after_loss before_backward after_backward after_step after_cancel_batch after_batch".split() #export @funcs_kwargs(as_method=True) class Callback(GetAttr): "Basic class handling tweaks of the training loop by changing a `Learner` in various events" _default,learn,run,run_train,run_valid = 'learn',None,True,True,True _methods = _events def __init__(self, **kwargs): assert not kwargs, f'Passed unknown events: {kwargs}' def __repr__(self): return type(self).__name__ def __call__(self, event_name): "Call `self.{event_name}` if it's defined" _run = (event_name not in _inner_loop or (self.run_train and getattr(self, 'training', True)) or (self.run_valid and not getattr(self, 'training', False))) res = None if self.run and _run: res = getattr(self, event_name, noop)() if event_name=='after_fit': self.run=True #Reset self.run to True at each end of fit return res def __setattr__(self, name, value): if hasattr(self.learn,name): warn(f"You are setting an attribute ({name}) that also exists in the learner, so you're not setting it in the learner but in the callback. Use `self.learn.{name}` otherwise.") super().__setattr__(name, value) @property def name(self): "Name of the `Callback`, camel-cased and with '*Callback*' removed" return class2attr(self, 'Callback') # The training loop is defined in `Learner` a bit below and consists in a minimal set of instructions: looping through the data we: # - compute the output of the model from the input # - calculate a loss between this output and the desired target # - compute the gradients of this loss with respect to all the model parameters # - update the parameters accordingly # - zero all the gradients # # Any tweak of this training loop is defined in a `Callback` to avoid over-complicating the code of the training loop, and to make it easy to mix and match different techniques (since they'll be defined in different callbacks). A callback can implement actions on the following events: # # - `after_create`: called after the `Learner` is created # - `before_fit`: called before starting training or inference, ideal for initial setup. # - `before_epoch`: called at the beginning of each epoch, useful for any behavior you need to reset at each epoch. # - `before_train`: called at the beginning of the training part of an epoch. # - `before_batch`: called at the beginning of each batch, just after drawing said batch. It can be used to do any setup necessary for the batch (like hyper-parameter scheduling) or to change the input/target before it goes in the model (change of the input with techniques like mixup for instance). # - `after_pred`: called after computing the output of the model on the batch. It can be used to change that output before it's fed to the loss. # - `after_loss`: called after the loss has been computed, but before the backward pass. It can be used to add any penalty to the loss (AR or TAR in RNN training for instance). # - `before_backward`: called after the loss has been computed, but only in training mode (i.e. when the backward pass will be used) # - `after_backward`: called after the backward pass, but before the update of the parameters. It can be used to do any change to the gradients before said update (gradient clipping for instance). # - `after_step`: called after the step and before the gradients are zeroed. # - `after_batch`: called at the end of a batch, for any clean-up before the next one. # - `after_train`: called at the end of the training phase of an epoch. # - `before_validate`: called at the beginning of the validation phase of an epoch, useful for any setup needed specifically for validation. # - `after_validate`: called at the end of the validation part of an epoch. # - `after_epoch`: called at the end of an epoch, for any clean-up before the next one. # - `after_fit`: called at the end of training, for final clean-up. show_doc(Callback.__call__) # One way to define callbacks is through subclassing: class _T(Callback): def call_me(self): return "maybe" test_eq(_T()("call_me"), "maybe") # Another way is by passing the callback function to the constructor: def cb(self): return "maybe" _t = Callback(before_fit=cb) test_eq(_t(event.before_fit), "maybe") # `Callback`s provide a shortcut to avoid having to write `self.learn.bla` for any `bla` attribute we seek; instead, just write `self.bla`. This only works for getting attributes, *not* for setting them. # + mk_class('TstLearner', 'a') class TstCallback(Callback): def batch_begin(self): print(self.a) learn,cb = TstLearner(1),TstCallback() cb.learn = learn test_stdout(lambda: cb('batch_begin'), "1") # - # If you want to change the value of an attribute, you have to use `self.learn.bla`, no `self.bla`. In the example below, `self.a += 1` creates an `a` attribute of 2 in the callback instead of setting the `a` of the learner to 2. It also issues a warning that something is probably wrong: learn.a # + class TstCallback(Callback): def batch_begin(self): self.a += 1 learn,cb = TstLearner(1),TstCallback() cb.learn = learn cb('batch_begin') test_eq(cb.a, 2) test_eq(cb.learn.a, 1) # - # A proper version needs to write `self.learn.a = self.a + 1`: # + class TstCallback(Callback): def batch_begin(self): self.learn.a = self.a + 1 learn,cb = TstLearner(1),TstCallback() cb.learn = learn cb('batch_begin') test_eq(cb.learn.a, 2) # - show_doc(Callback.name, name='Callback.name') test_eq(TstCallback().name, 'tst') class ComplicatedNameCallback(Callback): pass test_eq(ComplicatedNameCallback().name, 'complicated_name') # ## TrainEvalCallback - #export class TrainEvalCallback(Callback): "`Callback` that tracks the number of iterations done and properly sets training/eval mode" run_valid = False def after_create(self): self.learn.n_epoch = 1 def before_fit(self): "Set the iter and epoch counters to 0, put the model and the right device" self.learn.epoch,self.learn.loss = 0,tensor(0.) self.learn.train_iter,self.learn.pct_train = 0,0. if hasattr(self.dls, 'device'): self.model.to(self.dls.device) if hasattr(self.model, 'reset'): self.model.reset() def after_batch(self): "Update the iter counter (in training mode)" self.learn.pct_train += 1./(self.n_iter*self.n_epoch) self.learn.train_iter += 1 def before_train(self): "Set the model in training mode" self.learn.pct_train=self.epoch/self.n_epoch self.model.train() self.learn.training=True def before_validate(self): "Set the model in validation mode" self.model.eval() self.learn.training=False show_doc(TrainEvalCallback, title_level=3) # This `Callback` is automatically added in every `Learner` at initialization. # + #hide #test of the TrainEvalCallback below in Learner.fit # - # export if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback] # ## GatherPredsCallback - #export #TODO: save_targs and save_preds only handle preds/targets that have one tensor, not tuples of tensors. class GatherPredsCallback(Callback): "`Callback` that saves the predictions and targets, optionally `with_loss`" def __init__(self, with_input=False, with_loss=False, save_preds=None, save_targs=None, concat_dim=0): store_attr("with_input,with_loss,save_preds,save_targs,concat_dim") def before_batch(self): if self.with_input: self.inputs.append((self.learn.to_detach(self.xb))) def before_validate(self): "Initialize containers" self.preds,self.targets = [],[] if self.with_input: self.inputs = [] if self.with_loss: self.losses = [] def after_batch(self): "Save predictions, targets and potentially losses" if not hasattr(self, 'pred'): return preds,targs = self.learn.to_detach(self.pred),self.learn.to_detach(self.yb) if self.save_preds is None: self.preds.append(preds) else: (self.save_preds/str(self.iter)).save_array(preds) if self.save_targs is None: self.targets.append(targs) else: (self.save_targs/str(self.iter)).save_array(targs[0]) if self.with_loss: bs = find_bs(self.yb) loss = self.loss if self.loss.numel() == bs else self.loss.view(bs,-1).mean(1) self.losses.append(self.learn.to_detach(loss)) def after_validate(self): "Concatenate all recorded tensors" if not hasattr(self, 'preds'): return if self.with_input: self.inputs = detuplify(to_concat(self.inputs, dim=self.concat_dim)) if not self.save_preds: self.preds = detuplify(to_concat(self.preds, dim=self.concat_dim)) if not self.save_targs: self.targets = detuplify(to_concat(self.targets, dim=self.concat_dim)) if self.with_loss: self.losses = to_concat(self.losses) def all_tensors(self): res = [None if self.save_preds else self.preds, None if self.save_targs else self.targets] if self.with_input: res = [self.inputs] + res if self.with_loss: res.append(self.losses) return res show_doc(GatherPredsCallback, title_level=3) #export class FetchPredsCallback(Callback): "A callback to fetch predictions during the training loop" remove_on_fetch = True def __init__(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, cbs=None, reorder=True): self.cbs = L(cbs) store_attr('ds_idx,dl,with_input,with_decoded,reorder') def after_validate(self): to_rm = L(cb for cb in self.learn.cbs if getattr(cb, 'remove_on_fetch', False)) with self.learn.removed_cbs(to_rm + self.cbs) as learn: self.preds = learn.get_preds(ds_idx=self.ds_idx, dl=self.dl, with_input=self.with_input, with_decoded=self.with_decoded, inner=True, reorder=self.reorder) show_doc(FetchPredsCallback, title_level=3) # When writing a callback, the following attributes of `Learner` are available: # - `model`: the model used for training/validation # - `data`: the underlying `DataLoaders` # - `loss_func`: the loss function used # - `opt`: the optimizer used to update the model parameters # - `opt_func`: the function used to create the optimizer # - `cbs`: the list containing all `Callback`s # - `dl`: current `DataLoader` used for iteration # - `x`/`xb`: last input drawn from `self.dl` (potentially modified by callbacks). `xb` is always a tuple (potentially with one element) and `x` is detuplified. You can only assign to `xb`. # - `y`/`yb`: last target drawn from `self.dl` (potentially modified by callbacks). `yb` is always a tuple (potentially with one element) and `y` is detuplified. You can only assign to `yb`. # - `pred`: last predictions from `self.model` (potentially modified by callbacks) # - `loss`: last computed loss (potentially modified by callbacks) # - `n_epoch`: the number of epochs in this training # - `n_iter`: the number of iterations in the current `self.dl` # - `epoch`: the current epoch index (from 0 to `n_epoch-1`) # - `iter`: the current iteration index in `self.dl` (from 0 to `n_iter-1`) # # The following attributes are added by `TrainEvalCallback` and should be available unless you went out of your way to remove that callback: # # - `train_iter`: the number of training iterations done since the beginning of this training # - `pct_train`: from 0. to 1., the percentage of training iterations completed # - `training`: flag to indicate if we're in training mode or not # # The following attribute is added by `Recorder` and should be available unless you went out of your way to remove that callback: # # - `smooth_loss`: an exponentially-averaged version of the training loss # ## Callbacks control flow # It happens that we may want to skip some of the steps of the training loop: in gradient accumulation, we don't always want to do the step/zeroing of the grads for instance. During an LR finder test, we don't want to do the validation phase of an epoch. Or if we're training with a strategy of early stopping, we want to be able to completely interrupt the training loop. # # This is made possible by raising specific exceptions the training loop will look for (and properly catch). # + #export _ex_docs = dict( CancelFitException="Skip the rest of this batch and go to `after_batch`", CancelEpochException="Skip the rest of the training part of the epoch and go to `after_train`", CancelTrainException="Skip the rest of the validation part of the epoch and go to `after_validate`", CancelValidException="Skip the rest of this epoch and go to `after_epoch`", CancelBatchException="Interrupts training and go to `after_fit`") for c,d in _ex_docs.items(): mk_class(c,sup=Exception,doc=d) # - show_doc(CancelBatchException, title_level=3) show_doc(CancelTrainException, title_level=3) show_doc(CancelValidException, title_level=3) show_doc(CancelEpochException, title_level=3) show_doc(CancelFitException, title_level=3) # You can detect one of those exceptions occurred and add code that executes right after with the following events: # - `after_cancel_batch`: reached immediately after a `CancelBatchException` before proceeding to `after_batch` # - `after_cancel_train`: reached immediately after a `CancelTrainException` before proceeding to `after_epoch` # - `after_cancel_valid`: reached immediately after a `CancelValidException` before proceeding to `after_epoch` # - `after_cancel_epoch`: reached immediately after a `CancelEpochException` before proceeding to `after_epoch` # - `after_cancel_fit`: reached immediately after a `CancelFitException` before proceeding to `after_fit` # ## Export - #hide from nbdev.export import notebook2script notebook2script()
nbs/13_callback.core.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (DL) # language: python # name: dl # --- # # Homework 1: Differentiation # Since it easy to google every task please please please try to undestand what's going on. The "just answer" thing will be not counted, make sure to present derivation of your solution. It is absolutely OK if you found an answer on web then just exercise in $\LaTeX$ copying it into here. # Useful links: # [1](http://www.machinelearning.ru/wiki/images/2/2a/Matrix-Gauss.pdf) # [2](http://www.atmos.washington.edu/~dennis/MatrixCalculus.pdf) # [3](http://cal.cs.illinois.edu/~johannes/research/matrix%20calculus.pdf) # [4](http://research.microsoft.com/en-us/um/people/cmbishop/prml/index.htm) # ## ex. 1 # $$ # y = x^Tx, \quad x \in \mathbb{R}^N # $$ # $$ # \frac{dy}{dx} = # $$ # $$ # y = \sum_i x_i^2 # $$ # $$ # \frac{dy}{dx_i} = 2 x_i # $$ # Значит, # $$ # \frac{dy}{dx} = 2 x # $$ # # __Ответ:__ # $$ # \frac{dy}{dx} = 2 x # $$ # ## ex. 2 # $$ y = tr(AB) \quad A,B \in \mathbb{R}^{N \times N} $$ # $$ # \frac{dy}{dA} = # $$ # Используя нотацию Эйнтштейна можем записать # $$ # y = A_{kl}B_{lk} # $$ # Тогда # $$ # \left(\frac{d\,tr(AB)}{dA}\right)_{ij} = \frac{d(A_{kl}B_{lk})}{d(A_{ij})} = B_{ji} # $$ # Значит, # $$ # \frac{d\,tr(AB)}{dA} = B^T # $$ # # __Ответ:__ # $$ # \frac{d\,tr(AB)}{dA} = B^T # $$ # # ## ex. 3 # $$ # y = x^TAc , \quad A\in \mathbb{R}^{N \times N}, x\in \mathbb{R}^{N}, c\in \mathbb{R}^{N} # $$ # $$ # \frac{dy}{dx} = # $$ # $$ # \frac{dy}{dA} = # $$ # Hint for the latter (one of the ways): use *ex. 2* result and the fact # $$ # tr(ABC) = tr (CAB) # $$ # $$ # y = x_i (Ac)_i # $$ # Поэтому # $$ # \frac{dy}{dx_i} = (Ac)_i # $$ # Значит, # $$ # \frac{dy}{dx} = Ac # $$ # # Теперь заметим, что # $$ # y = x^TAc = tr\, (x^TAc) = tr\, (Acx^t) # $$ # В силу прошлого упражнения получаем # $$ # \frac{dy}{dA} = \left(cx^T\right)^T = xc^T # $$ # # __Ответ:__ # $$ # \frac{dy}{dx} = Ac # $$ # $$ # \frac{dy}{dA} = xc^T # $$ # ## ex. 4 # Classic matrix factorization example. Given matrix $X$ you need to find $A$, $S$ to approximate $X$. This can be done by simple gradient descent iteratively alternating $A$ and $S$ updates. # $$ # J = || X - AS ||_F^2 , \quad A\in \mathbb{R}^{N \times R} , \quad S\in \mathbb{R}^{R \times M} # $$ # $$ # \frac{dJ}{dS} = ? # $$ # ### First approach # Using ex.2 and the fact: # $$ # || X ||_F^2 = tr(XX^T) # $$ # it is easy to derive gradients (you can find it in one of the refs). # ### Second approach # You can use *slightly different techniques* if they suits you. Take a look at this derivation: # <img src="grad.png"> # (excerpt from [Handbook of blind source separation, Jutten, page 517](https://books.google.ru/books?id=PTbj03bYH6kC&printsec=frontcover&dq=Handbook+of+Blind+Source+Separation&hl=en&sa=X&ved=0ahUKEwi-q_apiJDLAhULvXIKHVXJDWcQ6AEIHDAA#v=onepage&q=Handbook%20of%20Blind%20Source%20Separation&f=false), open for better picture). # ### Third approach # And finally we can use chain rule! **YOUR TURN** to do it. # let $ F = AS $ # # **Find** # $$ # \frac{dJ}{dF} = # $$ # and # $$ # \frac{dF}{dS} = # $$ # (the shape should be $ NM \times RM )$. # # Now it is easy do get desired gradients: # $$ # \frac{dJ}{dS} = # $$ # $$ # J = || X - F ||_2^2 = \sum_{i,j} (X_{ij} - F_{ij})^2 # $$ # Поэтому # $$ # \left(\frac{dJ}{dF}\right)_{ij} = \frac{dJ}{dF_{ij}} = -2 (X_{ij} - F_{ij}) # $$ # Значит, # $$ # \frac{dJ}{dF} = 2 (F - X) # $$ # Теперь # $$ # F_{ij} = A_{ir} S_{rj} # $$ # Значит # $$ # \left(\frac{dF}{dS}\right)_{ijkl} = \frac{dF_{ij}}{dS_{kl}} = \frac{d A_{ir} S_{rj}}{dS_{kl}} = A_{ik} \delta_l^j # $$ # В итоге # $$ # \left(\frac{dJ}{dS}\right)_{kl} = \left(\frac{dJ}{dF}\right)_{ij} \left(\frac{dF}{dS}\right)_{ijkl} = 2 (F - X)_{ij} A_{ik} \delta_l^j = 2 (F - X)_{il} A_{ik} = \left(2 A^T (F - X)\right)_{kl} # $$ # То есть # $$ # \frac{dJ}{dS} = 2 A^T (F - X) = 2 A^T (AS - X) # $$ # # __Ответ:__ # # $$ # \frac{dJ}{dS} = 2 A^T (AS - X) # $$
homework01/homework_differentiation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 1 # ### Python code to find Mean Median and Mode without using library functions. # + import math l=list(map(int,input("Enter data ").split())) print("The mean of entered data is:",sum(l)/len(l)) print("The median of entered data is: ") if len(l)%2!=0: print(sorted(l)[len(l)//2]) else: print((sorted(l)[len(l)//2]+sorted(l)[len(l)//2-1])/2) print("The mode of entered data is: ") v=0 for i in set(l): if l.count(i)>v: v=l.count(i) for i in set(l): if l.count(i)==v: print(i,end=" ") print() variance=sum((i-sum(l)/len(l))**2 for i in l)/len(l) print("The variance of entered data is: ",variance) print("The standard deviation of entered data is: ",math.sqrt(variance)) # - # ### Python code to find Mean Median and Mode with using library functions. # + import math import statistics l=list(map(int,input("Enter data: ").split())) print("The mean of entered data is: ",statistics.mean(l)) print("The median of entered data is: ",statistics.median(l)) if len(l)==len(set(l)): print("The mode of entered data is: ",*l) else: print("The mode of entered data is: ",statistics.mode(l)) print("The variance of entered data is: ",statistics.variance(l)) print("The standard deviation of entered data is: ",statistics.stdev(l)) # - # ### Some problems related to List Tuple Dictionary and string # ## List l=[9,7,"sonalika","panda"] print(type(l)) l.append("panda") print(l) # ## Tuple t=(1,2,5,6,9,7) print(type(t)) print(t*3) # ## Dictionary d={1:"Sonalika",2:"panda","hi":3,"bye":4} print(d) for i in d: print(i,d[i]) # ## String s1="GIET" s2=" University" print(s1 + s2)
18CSE010-Assignment 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # # Control Flow Graph # # The code in this notebook helps with obtaining the control flow graph of python functions. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # **Prerequisites** # # * This notebook needs some understanding on advanced concepts in Python, notably # * classes # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Control Flow Graph # # The class `PyCFG` allows one to obtain the control flow graph. # # ```Python # from ControlFlow import gen_cfg, to_graph # from graphviz import Source # cfg = gen_cfg(inspect.getsource(my_function)) # graph = to_graph(cfg) # Source(graph) # ``` # # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} import bookutils # + slideshow={"slide_type": "skip"} import ast import re import astor from graphviz import Source, Graph, Digraph # + [markdown] slideshow={"slide_type": "subslide"} # ### Registry # + slideshow={"slide_type": "fragment"} REGISTRY_IDX = 0 # + slideshow={"slide_type": "fragment"} REGISTRY = {} # + slideshow={"slide_type": "fragment"} def get_registry_idx(): global REGISTRY_IDX v = REGISTRY_IDX REGISTRY_IDX += 1 return v # + slideshow={"slide_type": "fragment"} def reset_registry(): global REGISTRY_IDX global REGISTRY REGISTRY_IDX = 0 REGISTRY = {} # + slideshow={"slide_type": "subslide"} def register_node(node): node.rid = get_registry_idx() REGISTRY[node.rid] = node # + slideshow={"slide_type": "fragment"} def get_registry(): return dict(REGISTRY) # + [markdown] slideshow={"slide_type": "subslide"} # ### CFGNode # We start with the `CFGNode` representing each node in the control flow graph. # \todo{Augmented and annotated assignments (`a += 1`), (`a:int = 1`)}. # + slideshow={"slide_type": "subslide"} class CFGNode(dict): def __init__(self, parents=[], ast=None): assert type(parents) is list register_node(self) self.parents = parents self.ast_node = ast self.update_children(parents) # requires self.rid self.children = [] self.calls = [] def i(self): return str(self.rid) def update_children(self, parents): for p in parents: p.add_child(self) def add_child(self, c): if c not in self.children: self.children.append(c) def lineno(self): return self.ast_node.lineno if hasattr(self.ast_node, 'lineno') else 0 def __str__(self): return "id:%d line[%d] parents: %s : %s" % ( self.rid, self.lineno(), str([p.rid for p in self.parents]), self.source()) def __repr__(self): return str(self) def __eq__(self, other): return self.rid == other.rid def __neq__(self, other): return self.rid != other.rid def set_parents(self, p): self.parents = p def add_parent(self, p): if p not in self.parents: self.parents.append(p) def add_parents(self, ps): for p in ps: self.add_parent(p) def add_calls(self, func): self.calls.append(func) def source(self): return astor.to_source(self.ast_node).strip() def to_json(self): return { 'id': self.rid, 'parents': [p.rid for p in self.parents], 'children': [c.rid for c in self.children], 'calls': self.calls, 'at': self.lineno(), 'ast': self.source() } # + slideshow={"slide_type": "subslide"} REGISTRY_IDX = 0 # + slideshow={"slide_type": "fragment"} REGISTRY = {} # + slideshow={"slide_type": "fragment"} def get_registry_idx(): global REGISTRY_IDX v = REGISTRY_IDX REGISTRY_IDX += 1 return v # + slideshow={"slide_type": "fragment"} def reset_registry(): global REGISTRY_IDX global REGISTRY REGISTRY_IDX = 0 REGISTRY = {} # + slideshow={"slide_type": "fragment"} def register_node(node): node.rid = get_registry_idx() REGISTRY[node.rid] = node # + [markdown] slideshow={"slide_type": "subslide"} # ### PyCFG # # Next, the `PyCFG` class which is responsible for parsing, and holding the graph. # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} class PyCFG: def __init__(self): self.founder = CFGNode( parents=[], ast=ast.parse('start').body[0]) # sentinel self.founder.ast_node.lineno = 0 self.functions = {} self.functions_node = {} # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} class PyCFG(PyCFG): def parse(self, src): return ast.parse(src) # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def walk(self, node, myparents): fname = "on_%s" % node.__class__.__name__.lower() if hasattr(self, fname): fn = getattr(self, fname) v = fn(node, myparents) return v else: return myparents # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_module(self, node, myparents): """ Module(stmt* body) """ # each time a statement is executed unconditionally, make a link from # the result to next statement p = myparents for n in node.body: p = self.walk(n, p) return p # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_augassign(self, node, myparents): """ AugAssign(expr target, operator op, expr value) """ p = [CFGNode(parents=myparents, ast=node)] p = self.walk(node.value, p) return p # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_annassign(self, node, myparents): """ AnnAssign(expr target, expr annotation, expr? value, int simple) """ p = [CFGNode(parents=myparents, ast=node)] p = self.walk(node.value, p) return p # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_assign(self, node, myparents): """ Assign(expr* targets, expr value) """ if len(node.targets) > 1: raise NotImplemented('Parallel assignments') p = [CFGNode(parents=myparents, ast=node)] p = self.walk(node.value, p) return p # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} class PyCFG(PyCFG): def on_pass(self, node, myparents): return [CFGNode(parents=myparents, ast=node)] # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_break(self, node, myparents): parent = myparents[0] while not hasattr(parent, 'exit_nodes'): # we have ordered parents parent = parent.parents[0] assert hasattr(parent, 'exit_nodes') p = CFGNode(parents=myparents, ast=node) # make the break one of the parents of label node. parent.exit_nodes.append(p) # break doesn't have immediate children return [] # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_continue(self, node, myparents): parent = myparents[0] while not hasattr(parent, 'exit_nodes'): # we have ordered parents parent = parent.parents[0] assert hasattr(parent, 'exit_nodes') p = CFGNode(parents=myparents, ast=node) # make continue one of the parents of the original test node. parent.add_parent(p) # return the parent because a continue is not the parent # for the just next node return [] # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_for(self, node, myparents): # node.target in node.iter: node.body # The For loop in python (no else) can be translated # as follows: # # for a in iterator: # mystatements # # __iv = iter(iterator) # while __iv.__length_hint() > 0: # a = next(__iv) # mystatements init_node = CFGNode(parents=myparents, ast=ast.parse('__iv = iter(%s)' % astor.to_source(node.iter).strip()).body[0]) ast.copy_location(init_node.ast_node, node.iter) _test_node = CFGNode( parents=[init_node], ast=ast.parse('_for: __iv.__length__hint__() > 0').body[0]) ast.copy_location(_test_node.ast_node, node) # we attach the label node here so that break can find it. _test_node.exit_nodes = [] test_node = self.walk(node.iter, [_test_node]) extract_node = CFGNode(parents=test_node, ast=ast.parse('%s = next(__iv)' % astor.to_source(node.target).strip()).body[0]) ast.copy_location(extract_node.ast_node, node.iter) # now we evaluate the body, one at a time. p1 = [extract_node] for n in node.body: p1 = self.walk(n, p1) # the test node is looped back at the end of processing. _test_node.add_parents(p1) return _test_node.exit_nodes + test_node # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_while(self, node, myparents): # For a while, the earliest parent is the node.test _test_node = CFGNode( parents=myparents, ast=ast.parse( '_while: %s' % astor.to_source(node.test).strip()).body[0]) ast.copy_location(_test_node.ast_node, node.test) _test_node.exit_nodes = [] test_node = self.walk(node.test, [_test_node]) # we attach the label node here so that break can find it. # now we evaluate the body, one at a time. assert len(test_node) == 1 p1 = test_node for n in node.body: p1 = self.walk(n, p1) # the test node is looped back at the end of processing. _test_node.add_parents(p1) # link label node back to the condition. return _test_node.exit_nodes + test_node # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_if(self, node, myparents): _test_node = CFGNode( parents=myparents, ast=ast.parse( '_if: %s' % astor.to_source(node.test).strip()).body[0]) ast.copy_location(_test_node.ast_node, node.test) test_node = self.walk(node.test, [ _test_node]) assert len(test_node) == 1 g1 = test_node for n in node.body: g1 = self.walk(n, g1) g2 = test_node for n in node.orelse: g2 = self.walk(n, g2) return g1 + g2 # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_binop(self, node, myparents): left = self.walk(node.left, myparents) right = self.walk(node.right, left) return right # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} class PyCFG(PyCFG): def on_compare(self, node, myparents): left = self.walk(node.left, myparents) right = self.walk(node.comparators[0], left) return right # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} class PyCFG(PyCFG): def on_unaryop(self, node, myparents): return self.walk(node.operand, myparents) # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_call(self, node, myparents): def get_func(node): if type(node.func) is ast.Name: mid = node.func.id elif type(node.func) is ast.Attribute: mid = node.func.attr elif type(node.func) is ast.Call: mid = get_func(node.func) else: raise Exception(str(type(node.func))) return mid #mid = node.func.value.id p = myparents for a in node.args: p = self.walk(a, p) mid = get_func(node) myparents[0].add_calls(mid) # these need to be unlinked later if our module actually defines these # functions. Otherwsise we may leave them around. # during a call, the direct child is not the next # statement in text. for c in p: c.calllink = 0 return p # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_expr(self, node, myparents): p = [CFGNode(parents=myparents, ast=node)] return self.walk(node.value, p) # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_return(self, node, myparents): if type(myparents) is tuple: parent = myparents[0][0] else: parent = myparents[0] val_node = self.walk(node.value, myparents) # on return look back to the function definition. while not hasattr(parent, 'return_nodes'): parent = parent.parents[0] assert hasattr(parent, 'return_nodes') p = CFGNode(parents=val_node, ast=node) # make the break one of the parents of label node. parent.return_nodes.append(p) # return doesnt have immediate children return [] # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def on_functiondef(self, node, myparents): # a function definition does not actually continue the thread of # control flow # name, args, body, decorator_list, returns fname = node.name args = node.args returns = node.returns enter_node = CFGNode( parents=[], ast=ast.parse('enter: %s(%s)' % (node.name, ', '.join( [a.arg for a in node.args.args]))).body[0]) # sentinel enter_node.calleelink = True ast.copy_location(enter_node.ast_node, node) exit_node = CFGNode( parents=[], ast=ast.parse('exit: %s(%s)' % (node.name, ', '.join( [a.arg for a in node.args.args]))).body[0]) # sentinel exit_node.fn_exit_node = True ast.copy_location(exit_node.ast_node, node) enter_node.return_nodes = [] # sentinel p = [enter_node] for n in node.body: p = self.walk(n, p) for n in p: if n not in enter_node.return_nodes: enter_node.return_nodes.append(n) for n in enter_node.return_nodes: exit_node.add_parent(n) self.functions[fname] = [enter_node, exit_node] self.functions_node[enter_node.lineno()] = fname return myparents # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def get_defining_function(self, node): if node.lineno() in self.functions_node: return self.functions_node[node.lineno()] if not node.parents: self.functions_node[node.lineno()] = '' return '' val = self.get_defining_function(node.parents[0]) self.functions_node[node.lineno()] = val return val # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def link_functions(self): for nid, node in REGISTRY.items(): if node.calls: for calls in node.calls: if calls in self.functions: enter, exit = self.functions[calls] enter.add_parent(node) if node.children: # # until we link the functions up, the node # # should only have succeeding node in text as # # children. # assert(len(node.children) == 1) # passn = node.children[0] # # We require a single pass statement after every # # call (which means no complex expressions) # assert(type(passn.ast_node) == ast.Pass) # # unlink the call statement assert node.calllink > -1 node.calllink += 1 for i in node.children: i.add_parent(exit) # passn.set_parents([exit]) # ast.copy_location(exit.ast_node, passn.ast_node) # #for c in passn.children: c.add_parent(exit) # #passn.ast_node = exit.ast_node # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def update_functions(self): for nid, node in REGISTRY.items(): _n = self.get_defining_function(node) # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "fragment"} class PyCFG(PyCFG): def update_children(self): for nid, node in REGISTRY.items(): for p in node.parents: p.add_child(node) # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} class PyCFG(PyCFG): def gen_cfg(self, src): """ >>> i = PyCFG() >>> i.walk("100") 5 """ node = self.parse(src) nodes = self.walk(node, [self.founder]) self.last_node = CFGNode(parents=nodes, ast=ast.parse('stop').body[0]) ast.copy_location(self.last_node.ast_node, self.founder.ast_node) self.update_children() self.update_functions() self.link_functions() # + [markdown] slideshow={"slide_type": "subslide"} # ### Supporting Functions # + slideshow={"slide_type": "subslide"} def compute_dominator(cfg, start=0, key='parents'): dominator = {} dominator[start] = {start} all_nodes = set(cfg.keys()) rem_nodes = all_nodes - {start} for n in rem_nodes: dominator[n] = all_nodes c = True while c: c = False for n in rem_nodes: pred_n = cfg[n][key] doms = [dominator[p] for p in pred_n] i = set.intersection(*doms) if doms else set() v = {n} | i if dominator[n] != v: c = True dominator[n] = v return dominator # + slideshow={"slide_type": "subslide"} def compute_flow(pythonfile): cfg, first, last = get_cfg(pythonfile) return cfg, compute_dominator( cfg, start=first), compute_dominator( cfg, start=last, key='children') # + slideshow={"slide_type": "subslide"} def gen_cfg(fnsrc, remove_start_stop=True): reset_registry() cfg = PyCFG() cfg.gen_cfg(fnsrc) cache = dict(REGISTRY) if remove_start_stop: return { k: cache[k] for k in cache if cache[k].source() not in {'start', 'stop'} } else: return cache # + slideshow={"slide_type": "subslide"} def get_cfg(src): reset_registry() cfg = PyCFG() cfg.gen_cfg(src) cache = dict(REGISTRY) g = {} for k, v in cache.items(): j = v.to_json() at = j['at'] parents_at = [cache[p].to_json()['at'] for p in j['parents']] children_at = [cache[c].to_json()['at'] for c in j['children']] if at not in g: g[at] = {'parents': set(), 'children': set()} # remove dummy nodes ps = set([p for p in parents_at if p != at]) cs = set([c for c in children_at if c != at]) g[at]['parents'] |= ps g[at]['children'] |= cs if v.calls: g[at]['calls'] = v.calls g[at]['function'] = cfg.functions_node[v.lineno()] return (g, cfg.founder.ast_node.lineno, cfg.last_node.ast_node.lineno) # + slideshow={"slide_type": "subslide"} def to_graph(cache, arcs=[]): graph = Digraph(comment='Control Flow Graph') colors = {0: 'blue', 1: 'red'} kind = {0: 'T', 1: 'F'} cov_lines = set(i for i, j in arcs) for nid, cnode in cache.items(): lineno = cnode.lineno() shape, peripheries = 'oval', '1' if isinstance(cnode.ast_node, ast.AnnAssign): if cnode.ast_node.target.id in {'_if', '_for', '_while'}: shape = 'diamond' elif cnode.ast_node.target.id in {'enter', 'exit'}: shape, peripheries = 'oval', '2' else: shape = 'rectangle' graph.node(cnode.i(), "%d: %s" % (lineno, unhack(cnode.source())), shape=shape, peripheries=peripheries) for pn in cnode.parents: plineno = pn.lineno() if hasattr(pn, 'calllink') and pn.calllink > 0 and not hasattr( cnode, 'calleelink'): graph.edge(pn.i(), cnode.i(), style='dotted', weight=100) continue if arcs: if (plineno, lineno) in arcs: graph.edge(pn.i(), cnode.i(), color='green') elif plineno == lineno and lineno in cov_lines: graph.edge(pn.i(), cnode.i(), color='green') # child is exit and parent is covered elif hasattr(cnode, 'fn_exit_node') and plineno in cov_lines: graph.edge(pn.i(), cnode.i(), color='green') # parent is exit and one of its parents is covered. elif hasattr(pn, 'fn_exit_node') and len( set(n.lineno() for n in pn.parents) | cov_lines) > 0: graph.edge(pn.i(), cnode.i(), color='green') # child is a callee (has calleelink) and one of the parents is covered. elif plineno in cov_lines and hasattr(cnode, 'calleelink'): graph.edge(pn.i(), cnode.i(), color='green') else: graph.edge(pn.i(), cnode.i(), color='red') else: order = {c.i():i for i,c in enumerate(pn.children)} if len(order) < 2: graph.edge(pn.i(), cnode.i()) else: o = order[cnode.i()] graph.edge(pn.i(), cnode.i(), color=colors[o], label=kind[o]) return graph # + slideshow={"slide_type": "subslide"} def unhack(v): for i in ['if', 'while', 'for', 'elif']: v = re.sub(r'^_%s:' % i, '%s:' % i, v) return v # + [markdown] slideshow={"slide_type": "subslide"} # ### Examples # + [markdown] slideshow={"slide_type": "subslide"} # #### check_triangle # + slideshow={"slide_type": "subslide"} def check_triangle(a,b,c): if a == b: if a == c: if b == c: return "Equilateral" else: return "Isosceles" else: return "Isosceles" else: if b != c: if a == c: return "Isosceles" else: return "Scalene" else: return "Isosceles" # + slideshow={"slide_type": "skip"} import inspect # + slideshow={"slide_type": "subslide"} graph = to_graph(gen_cfg(inspect.getsource(check_triangle))) # + slideshow={"slide_type": "fragment"} Source(graph) # + [markdown] slideshow={"slide_type": "subslide"} # #### cgi_decode # # Note that we do not yet support _augmented assignments_: i.e assignments such as `+=` # + slideshow={"slide_type": "subslide"} def cgi_decode(s): hex_values = { '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15, 'A': 10, 'B': 11, 'C': 12, 'D': 13, 'E': 14, 'F': 15, } t = "" i = 0 while i < len(s): c = s[i] if c == '+': t += ' ' elif c == '%': digit_high, digit_low = s[i + 1], s[i + 2] i += 2 if digit_high in hex_values and digit_low in hex_values: v = hex_values[digit_high] * 16 + hex_values[digit_low] t += chr(v) else: raise ValueError("Invalid encoding") else: t += c i += 1 return t # + slideshow={"slide_type": "subslide"} graph = to_graph(gen_cfg(inspect.getsource(cgi_decode))) # + slideshow={"slide_type": "fragment"} Source(graph) # + [markdown] slideshow={"slide_type": "subslide"} # #### gcd # + slideshow={"slide_type": "fragment"} def gcd(a, b): if a<b: c: int = a a: int = b b: int = c while b != 0 : c: int = a a: int = b b: int = c % b return a # + slideshow={"slide_type": "fragment"} graph = to_graph(gen_cfg(inspect.getsource(gcd))) # + slideshow={"slide_type": "fragment"} Source(graph) # + slideshow={"slide_type": "subslide"} def compute_gcd(x, y): if x > y: small = y else: small = x for i in range(1, small+1): if((x % i == 0) and (y % i == 0)): gcd = i return gcd # + slideshow={"slide_type": "fragment"} graph = to_graph(gen_cfg(inspect.getsource(compute_gcd))) # + slideshow={"slide_type": "fragment"} Source(graph) # + [markdown] slideshow={"slide_type": "subslide"} # #### fib # # Note that the *for-loop* requires additional massaging. While we show the labels correctly, the *comparison node* needs to be extracted. Hence, the representation is not accurate. # + slideshow={"slide_type": "fragment"} def fib(n,): l = [0, 1] for i in range(n-2): l.append(l[-1]+l[-2]) return l # + slideshow={"slide_type": "fragment"} graph = to_graph(gen_cfg(inspect.getsource(fib))) # + slideshow={"slide_type": "fragment"} Source(graph) # + [markdown] slideshow={"slide_type": "subslide"} # #### quad_solver # + slideshow={"slide_type": "subslide"} def quad_solver(a, b, c): discriminant = b^2 - 4*a*c r1, r2 = 0, 0 i1, i2 = 0, 0 if discriminant >= 0: droot = math.sqrt(discriminant) r1 = (-b + droot) / (2*a) r2 = (-b - droot) / (2*a) else: droot = math.sqrt(-1 * discriminant) droot_ = droot/(2*a) r1, i1 = -b/(2*a), droot_ r2, i2 = -b/(2*a), -droot_ if i1 == 0 and i2 == 0: return (r1, r2) return ((r1,i1), (r2,i2)) # + slideshow={"slide_type": "subslide"} graph = to_graph(gen_cfg(inspect.getsource(quad_solver))) # + slideshow={"slide_type": "fragment"} Source(graph) # + [markdown] slideshow={"slide_type": "slide"} # ## Call Graph # ### Install: Pyan Static Call Graph Lifter # + slideshow={"slide_type": "skip"} import os import networkx as nx from graphviz import Source # + [markdown] slideshow={"slide_type": "subslide"} # ### Call Graph Helpers # + slideshow={"slide_type": "subslide"} def construct_callgraph(code, name="callgraph"): file_name = name + ".py" with open(file_name, 'w') as f: f.write(code) cg_file = name + '.dot' os.system(f'pyan {file_name} --uses --defines --colored --grouped --annotated --dot > {cg_file}') def callgraph(code, name="callgraph"): if not os.path.isfile(name + '.dot'): construct_callgraph(code, name) return Source.from_file(name + '.dot') def get_callgraph(code, name="callgraph"): if not os.path.isfile(name + '.dot'): construct_callgraph(code, name) return nx.drawing.nx_pydot.read_dot(name + '.dot') # + [markdown] slideshow={"slide_type": "subslide"} # ### Example: Maze # To provide a meaningful example where you can easily change the code complexity and target location, we generate the maze source code from the maze provided as string. This example is loosely based on an old [blog post](https://feliam.wordpress.com/2010/10/07/the-symbolic-maze/) on symbolic execution by <NAME> (Quick shout-out!). # # You simply specify the maze as a string. Like so. # + slideshow={"slide_type": "subslide"} maze_string = """ +-+-----+ |X| | | | --+ | | | | | | +-- | | | |#| +-----+-+ """ # + [markdown] slideshow={"slide_type": "subslide"} # Each character in `maze_string` represents a tile. For each tile, a tile-function is generated. # * If the current tile is "benign" (` `), the tile-function corresponding to the next input character (D, U, L, R) is called. Unexpected input characters are ignored. If no more input characters are left, it returns "VALID" and the current maze state. # * If the current tile is a "trap" (`+`,`|`,`-`), it returns "INVALID" and the current maze state. # * If the current tile is the "target" (`#`), it returns "SOLVED" and the current maze state. # # The code is generated using the function `generate_maze_code`. # + slideshow={"slide_type": "subslide"} def generate_print_maze(maze_string): return """ def print_maze(out, row, col): output = out +"\\n" c_row = 0 c_col = 0 for c in list(\"\"\"%s\"\"\"): if c == '\\n': c_row += 1 c_col = 0 output += "\\n" else: if c_row == row and c_col == col: output += "X" elif c == "X": output += " " else: output += c c_col += 1 return output """ % maze_string # + slideshow={"slide_type": "subslide"} def generate_trap_tile(row, col): return """ def tile_%d_%d(input, index): try: HTMLParser().feed(input) except: pass return print_maze("INVALID", %d, %d) """ % (row, col, row, col) # + slideshow={"slide_type": "subslide"} def generate_good_tile(c, row, col): code = """ def tile_%d_%d(input, index): if (index == len(input)): return print_maze("VALID", %d, %d) elif input[index] == 'L': return tile_%d_%d(input, index + 1) elif input[index] == 'R': return tile_%d_%d(input, index + 1) elif input[index] == 'U': return tile_%d_%d(input, index + 1) elif input[index] == 'D': return tile_%d_%d(input, index + 1) else : return tile_%d_%d(input, index + 1) """ % (row, col, row, col, row, col - 1, row, col + 1, row - 1, col, row + 1, col, row, col) if c == "X": code += """ def maze(input): return tile_%d_%d(list(input), 0) """ % (row, col) return code # + slideshow={"slide_type": "subslide"} def generate_target_tile(row, col): return """ def tile_%d_%d(input, index): return print_maze("SOLVED", %d, %d) def target_tile(): return "tile_%d_%d" """ % (row, col, row, col, row, col) # + slideshow={"slide_type": "subslide"} def generate_maze_code(maze, name="maze"): row = 0 col = 0 code = generate_print_maze(maze) for c in list(maze): if c == '\n': row += 1 col = 0 else: if c == "-" or c == "+" or c == "|": code += generate_trap_tile(row, col) elif c == " " or c == "X": code += generate_good_tile(c, row, col) elif c == "#": code += generate_target_tile(row, col) else: print("Invalid maze! Try another one.") col += 1 return code # + [markdown] slideshow={"slide_type": "subslide"} # Now you can generate the maze code for an arbitrary maze. # + slideshow={"slide_type": "fragment"} maze_code = generate_maze_code(maze_string) exec(maze_code) # + slideshow={"slide_type": "fragment"} print(maze("DDDDRRRRUULLUURRRRDDD")) # Appending one more 'D', you have reached the target. # + [markdown] slideshow={"slide_type": "subslide"} # This is the corresponding call graph. # + slideshow={"slide_type": "fragment"} callgraph(maze_code) # + [markdown] slideshow={"slide_type": "slide"} # ## Cleanup # # We're done, so we clean up: # + slideshow={"slide_type": "skip"} import shutil # + slideshow={"slide_type": "fragment"} if os.path.exists('callgraph.dot'): os.remove('callgraph.dot') if os.path.exists('callgraph.py'): os.remove('callgraph.py')
ControlFlow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.ticker as mtick df = pd.read_csv("dataset.csv") df.head(5) # + fig = plt.figure(figsize=(10,6)) ax = plt.subplot() ax.plot(df["A"], df["B"], "-", linewidth=2, label="Effective porosity - Average Aquifer") ax.plot(df["C"], df["D"], "--", linewidth=2, label="Effective porosity - Well sorted") ax.plot(df["E"], df["F"],"-.", linewidth=2, label="Total porosity") ax.plot(df["G"], df["H"],"+-",linewidth=2, label="Specific retention - Well sorted") ax.plot(df["I"], df["J"],'*-',linewidth=2, label="specific retention - Average Aquifer") plt.legend() ax.set_xscale("log") ax.set_xlim(0.0001, 200) ax.set_ylim(0,65) fmt = '%.0f%%' # Format you want the ticks, e.g. '40%' yticks = mtick.FormatStrFormatter(fmt) ax.yaxis.set_major_formatter(yticks) #x_fmt = mtick.FormatStrFormatter('%e') #ax.xaxis.set_major_formatter(x_fmt) ax.set_ylabel("Porosity") ax.set_xlabel("Particle size (mm) ") ax2 = ax.twiny() # add second x-axis ax2.xaxis.set_ticks_position("bottom") ax2.xaxis.set_label_position("bottom") ax2.spines["bottom"].set_position(("axes", -0.15)) ax2.set_frame_on(True) #ax2.patch.set_visible(False) ax2.set_frame_on(True) ax2.tick_params(direction='out', length= 15, width=2, colors='r', grid_color='r', grid_alpha=0.5, axis='x', rotation=90, which="minor") ax2.set_xscale("log") vals = [0.0001, 0.002, 0.06, 2.0, 63, 1000] ax2.set_xticks(vals, minor=True) ax2.set_xticklabels(vals, minor=True) ; ax2.set_xlim(0.0001, 200) plt.setp(ax2.get_xmajorticklabels(), visible=False); # remove the major xaxis label fig.text(0.15,-0.02 , 'Clay', ha='left', va='top', size=12, fontweight='bold') fig.text(0.35,-0.02 , 'Silt', ha='left', va='top', size=12, fontweight='bold') fig.text(0.52,-0.02 , 'Sand', ha='left', va='top', size=12, fontweight='bold') fig.text(0.7,-0.02 , 'Gravel', ha='left', va='top', size=12, fontweight='bold') fig.text(0.15,0.87 , 'Replotted from Davies & De Wiest (1966)', ha='left', va='top', size=12, fontweight='bold') plt.savefig("eff_por.png",) # - ax2.sexlabel[0].set_visible(False)
_build/jupyter_execute/contents/flow/lecture_03/effective porosity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Cowell's formulation # # For cases where we only study the gravitational forces, solving the Kepler's equation is enough to propagate the orbit forward in time. However, when we want to take perturbations that deviate from Keplerian forces into account, we need a more complex method to solve our initial value problem: one of them is **Cowell's formulation**. # # In this formulation we write the two body differential equation separating the Keplerian and the perturbation accelerations: # # $$\ddot{\mathbb{r}} = -\frac{\mu}{|\mathbb{r}|^3} \mathbb{r} + \mathbb{a}_d$$ # <div class="alert alert-info">For an in-depth exploration of this topic, still to be integrated in poliastro, check out https://github.com/Juanlu001/pfc-uc3m</div> # <div class="alert alert-info">An earlier version of this notebook allowed for more flexibility and interactivity, but was considerably more complex. Future versions of poliastro and plotly might bring back part of that functionality, depending on user feedback. You can still download the older version <a href="https://github.com/poliastro/poliastro/blob/0.8.x/docs/source/examples/Propagation%20using%20Cowell's%20formulation.ipynb">here</a>.</div> # ## First example # # Let's setup a very simple example with constant acceleration to visualize the effects on the orbit. # + import numpy as np from astropy import units as u from poliastro.bodies import Earth from poliastro.twobody import Orbit from poliastro.examples import iss from poliastro.twobody.propagation import cowell from poliastro.plotting import OrbitPlotter3D from poliastro.util import norm # - # To provide an acceleration depending on an extra parameter, we can use **closures** like this one: accel = 2e-5 def constant_accel_factory(accel): def constant_accel(t0, u, k): v = u[3:] norm_v = (v[0]**2 + v[1]**2 + v[2]**2)**.5 return accel * v / norm_v return constant_accel def custom_propagator(orbit, tof, rtol=1e-11, accel=accel): # Use our custom perturbation acceleration return cowell(orbit, tof, rtol, ad=constant_accel_factory(accel)) times = np.linspace(0, 10 * iss.period, 500) times positions = iss.sample(times, method=custom_propagator) # And we plot the results: # + frame = OrbitPlotter3D() frame.set_attractor(Earth) frame.plot_trajectory(positions, label="ISS") # - # ## Error checking def state_to_vector(ss): r, v = ss.rv() x, y, z = r.to(u.km).value vx, vy, vz = v.to(u.km / u.s).value return np.array([x, y, z, vx, vy, vz]) k = Earth.k.to(u.km**3 / u.s**2).value rtol = 1e-13 full_periods = 2 # + u0 = state_to_vector(iss) tf = ((2 * full_periods + 1) * iss.period / 2).to(u.s).value u0, tf # - iss_f_kep = iss.propagate(tf * u.s, rtol=1e-18) # + r, v = cowell(iss, tf, rtol=rtol) iss_f_num = Orbit.from_vectors(Earth, r * u.km, v * u.km / u.s, iss.epoch + tf * u.s) # - iss_f_num.r, iss_f_kep.r assert np.allclose(iss_f_num.r, iss_f_kep.r, rtol=rtol, atol=1e-08 * u.km) assert np.allclose(iss_f_num.v, iss_f_kep.v, rtol=rtol, atol=1e-08 * u.km / u.s) assert np.allclose(iss_f_num.a, iss_f_kep.a, rtol=rtol, atol=1e-08 * u.km) assert np.allclose(iss_f_num.ecc, iss_f_kep.ecc, rtol=rtol) assert np.allclose(iss_f_num.inc, iss_f_kep.inc, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.raan, iss_f_kep.raan, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.argp, iss_f_kep.argp, rtol=rtol, atol=1e-08 * u.rad) assert np.allclose(iss_f_num.nu, iss_f_kep.nu, rtol=rtol, atol=1e-08 * u.rad) # ## Numerical validation # # According to [Edelbaum, 1961], a coplanar, semimajor axis change with tangent thrust is defined by: # # $$\frac{\operatorname{d}\!a}{a_0} = 2 \frac{F}{m V_0}\operatorname{d}\!t, \qquad \frac{\Delta{V}}{V_0} = \frac{1}{2} \frac{\Delta{a}}{a_0}$$ # # So let's create a new circular orbit and perform the necessary checks, assuming constant mass and thrust (i.e. constant acceleration): # + ss = Orbit.circular(Earth, 500 * u.km) tof = 20 * ss.period ad = constant_accel_factory(1e-7) r, v = cowell(ss, tof.to(u.s).value, ad=ad) ss_final = Orbit.from_vectors(Earth, r * u.km, v * u.km / u.s, ss.epoch + tof) # - da_a0 = (ss_final.a - ss.a) / ss.a da_a0 dv_v0 = abs(norm(ss_final.v) - norm(ss.v)) / norm(ss.v) 2 * dv_v0 np.allclose(da_a0, 2 * dv_v0, rtol=1e-2) # This means **we successfully validated the model against an extremely simple orbit transfer with approximate analytical solution**. Notice that the final eccentricity, as originally noticed by Edelbaum, is nonzero: ss_final.ecc # ## References # # * [Edelbaum, 1961] "Propulsion requirements for controllable satellites"
docs/source/examples/Propagation using Cowell's formulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Prune leaf ROIs in an annotation ontology file without voxel-counts # This notebook deletes nodes in an anatomical ontology (AO) text file if these are leaf nodes without voxel counts (VC) in annotation volume (AV). # # - input # - 1_VC.json: AO file with VC # - output # - 1_VC_pruned.json: pruned AO file with VC # # Set variables dir_data = 'data' fn_input_AO = '1_VC.json' # filename for anatomical ontology with voxel counts fn_output_AO = '1_VC_pruned.json' import os import json import copy import numpy as np from collections import OrderedDict from jsonpath_rw import jsonpath, parse # # Load data with open(os.path.join(dir_data, fn_input_AO)) as f: df_VC = json.load(f, object_pairs_hook=OrderedDict) # # Delete ROIs in an annotation ontology file if it's a leaf without voxel counts def Delete_leaf_node_WO_VC(match_id, match_fullpath): global df_VC_pruned global leaf_count bool1 = eval(("df_VC_pruned['msg'][0]"+\ str(match_fullpath).replace('.','')\ .replace('children', "['children']")\ .replace('id','')+"['children'] == []")) bool2 = eval(("df_VC_pruned['msg'][0]"+\ str(match_fullpath).replace('.','')\ .replace('children', "['children']")\ .replace('id','')+"['voxel_count'] is None")) if bool1: leaf_count = leaf_count + 1 if bool1 and bool2: print(("***** deleted ID: " + \ str(eval("df_VC_pruned['msg'][0]"+\ str(match_fullpath).replace('.','')\ .replace('children', "['children']")\ .replace('id', "['id']"))))) exec("del df_VC_pruned['msg'][0]"+\ str(match_fullpath).replace('.','')\ .replace('children', "['children']")\ .replace('id', '')) return match_id * (-1) # deleted ids are assigned as negative else: print(("remained ID: " + str(eval("df_VC_pruned['msg'][0]"+\ str(match_fullpath)\ .replace('.','')\ .replace('children', "['children']")\ .replace('id', "['id']"))))) return match_id # remained ids in positive leaf_count = 0 jsonpath_expr = parse('$..id') df_VC_pruned = copy.deepcopy(df_VC) temp_list = [] temp_list = [Delete_leaf_node_WO_VC(match.value, match.full_path)\ for match in reversed(jsonpath_expr.find(df_VC_pruned['msg'][0]))] DeletedLeafs = np.array(temp_list) # # Save a pruned annotation ontology file with voxel counts with open(os.path.join(dir_data, fn_output_AO), mode='w') as fw: json.dump(df_VC_pruned, fw, indent=4) # # Check data print('# all IDs in AO: '+str(len(DeletedLeafs))) # 1327 print('# remained IDs: '+str(sum(DeletedLeafs>0))) # 837 print('# deleted IDs: '+str(sum(DeletedLeafs<0))) # 490 print('# leaf count: ' +str(leaf_count))
notebooks/Prune_leaf_ROI_wo_VC_in_AO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Processing Multisentence Documents import pyConTextNLP.pyConTextGraph as pyConText import pyConTextNLP.itemData as itemData from textblob import TextBlob import networkx as nx import pyConTextNLP.display.html as html from IPython.display import display, HTML reports = [ """IMPRESSION: Evaluation limited by lack of IV contrast; however, no evidence of bowel obstruction or mass identified within the abdomen or pelvis. Non-specific interstitial opacities and bronchiectasis seen at the right base, suggestive of post-inflammatory changes.""", """IMPRESSION: Evidence of early pulmonary vascular congestion and interstitial edema. Probable scarring at the medial aspect of the right lung base, with no definite consolidation.""" , """IMPRESSION: 1. 2.0 cm cyst of the right renal lower pole. Otherwise, normal appearance of the right kidney with patent vasculature and no sonographic evidence of renal artery stenosis. 2. Surgically absent left kidney.""", """IMPRESSION: No pneumothorax.""", """IMPRESSION: No definite pneumothorax""" """IMPRESSION: New opacity at the left lower lobe consistent with pneumonia.""" ] # + modifiers = itemData.instantiateFromCSVtoitemData( "https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/lexical_kb_05042016.tsv") targets = itemData.instantiateFromCSVtoitemData( "https://raw.githubusercontent.com/chapmanbe/pyConTextNLP/master/KB/utah_crit.tsv") # - # ## Define ``markup_sentence`` # # We are putting the functionality we went through in the previous two notebooks ([BasicSentenceMarkup](./BasicSentenceMarkup.ipynb) and [BasicSentenceMarkupPart2](BasicSentenceMarkupPart2.ipynb)) into a function ``markup_sentence``. We add one step to the function: ``dropInactiveModifiers`` will delete any modifier node that does not get attached to a target node. def markup_sentence(s, modifiers, targets, prune_inactive=True): """ """ markup = pyConText.ConTextMarkup() markup.setRawText(s) markup.cleanText() markup.markItems(modifiers, mode="modifier") markup.markItems(targets, mode="target") markup.pruneMarks() markup.dropMarks('Exclusion') # apply modifiers to any targets within the modifiers scope markup.applyModifiers() markup.pruneSelfModifyingRelationships() if prune_inactive: markup.dropInactiveModifiers() return markup report = reports[0] print(report) # #### Create a ``ConTextDocument`` # # ``ConTextDocument`` is a class for organizing the markup of multiple sentences. It has a private attribute that is NetworkX DiGraph that represents the document structure. In this exmaple we only use the ``ConTextDocument`` class to collect multiple sentence markups. context = pyConText.ConTextDocument() # #### Split the document into sentences and process each sentence # # pyConTextNLP comes with a simple sentence splitter in ``helper.py``. I have not been maintaining this and have recently been using TextBlob to split sentences. A known problem with either sentence splitting solution is enumerated lists that don't use periods. # + blob = TextBlob(report.lower()) count = 0 rslts = [] for s in blob.sentences: m = markup_sentence(s.raw, modifiers=modifiers, targets=targets) rslts.append(m) for r in rslts: context.addMarkup(r) # - # ### Displaying pyConTextNLP Markups # # The ``display`` subpackage contains some functionality for visualizing the markups. Here I use HTML to color-code identified concepts. clrs = {\ "bowel_obstruction": "blue", "inflammation": "blue", "definite_negated_existence": "red", "probable_negated_existence": "indianred", "ambivalent_existence": "orange", "probable_existence": "forestgreen", "definite_existence": "green", "historical": "goldenrod", "indication": "pink", "acute": "golden" } display(HTML(html.mark_document_with_html(context,colors = clrs, default_color="black"))) # ### There is also a rich XML description of the ``ConTextDocument`` print(context.getXML())
MultiSentenceDocuments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="U0xpls63SQra" colab_type="text" # # Churn rate # + [markdown] id="1971HG29SLda" colab_type="text" # Problem domain: predict behavior to retain customers. # # Data source: https://www.kaggle.com/blastchar/telco-customer-churn # + id="wGISuim9ZNL4" colab_type="code" colab={} # Import libraries import json # access data import numpy as np # linear algebra import pandas as pd # data processing import matplotlib.pyplot as plt # plot graph import seaborn as sns # plot interactive graph import warnings warnings.filterwarnings("ignore") from pylab import rcParams # %matplotlib inline # + [markdown] id="bJUUDGi2bSPw" colab_type="text" # ### Access the data # + id="ph-KP6B9DCxh" colab_type="code" outputId="0d10edb0-6e77-4af2-ba1d-ac709a32281f" colab={"base_uri": "https://localhost:8080/", "height": 230} # !pip install kaggle # !mkdir ~/.kaggle # !touch '/root/.kaggle/kaggle.json' ###################################################################################### # Type Kaggle username and api key # api_token = {"username":"username","key":"TOKEN_HERE"} api_token = {"username":" ","key":" "} ###################################################################################### with open('/root/.kaggle/kaggle.json', 'w') as file: json.dump(api_token, file) # !chmod 600 /root/.kaggle/kaggle.json # + id="h6bXGvdlDLhu" colab_type="code" outputId="b5dfd46f-ca76-420e-b016-44b4d9db8a0c" colab={"base_uri": "https://localhost:8080/", "height": 70} # Dataset download # !kaggle datasets download -d blastchar/telco-customer-churn # + id="3Lhbknp-LR2T" colab_type="code" outputId="d9073eb7-075c-4c7a-c418-2f4c6040b476" colab={"base_uri": "https://localhost:8080/", "height": 52} # Decompress the files in zip format # !unzip telco-customer-churn.zip # + id="h7Xvgb0QE2Gd" colab_type="code" outputId="85c87c7d-d85a-4d19-c61e-57e09717e35f" colab={"base_uri": "https://localhost:8080/", "height": 34} # Verify the files available # !ls # + id="vcjBN4xRZa9E" colab_type="code" colab={} # read file data = pd.read_csv('WA_Fn-UseC_-Telco-Customer-Churn.csv')#, header = 0, index_col = 0) # + [markdown] id="O5PjLJutfvAj" colab_type="text" # ### Exploratory Data Analysis (EDA) # + id="RRlBx7VMah0Y" colab_type="code" outputId="7e9b5ebd-6393-4817-cf66-55179f01c199" colab={"base_uri": "https://localhost:8080/", "height": 282} # Show the first five lines of dataset data.head() # + id="m_7Nr0Qqp2JL" colab_type="code" outputId="72d8fd82-6d74-496b-8c66-8866f7d84531" colab={"base_uri": "https://localhost:8080/", "height": 34} # Show dimensionality (rows, columns) data.shape # + [markdown] id="c0E6XN-5cNi2" colab_type="text" # 7043 data objects with 21 attributes # + id="QkAX9rOQqMRj" colab_type="code" outputId="eddebe49-60c1-4295-8e9b-fba3297389cf" colab={"base_uri": "https://localhost:8080/", "height": 122} # List columns data.columns # + id="avNsENYLddOu" colab_type="code" outputId="68e4c532-5adb-45d2-c5be-10d9d9f97e0f" colab={"base_uri": "https://localhost:8080/", "height": 474} # Concise summary of a dataframe data.info() # + id="w8LgSrhvlPxi" colab_type="code" outputId="fcf48351-a9a0-40a3-e82e-af99c000970b" colab={"base_uri": "https://localhost:8080/", "height": 669} # Missing data total = data.isnull().sum().sort_values(ascending=False) percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data # + [markdown] id="ivhDv-K4dftk" colab_type="text" # Complete dataframe. No missing data. # + id="Y2d0aSlnfBdz" colab_type="code" outputId="76288719-e1e0-4a67-b37a-3e8737d560a2" colab={"base_uri": "https://localhost:8080/", "height": 404} # Return the number of distinct observations data.nunique() # + id="9VU0OEh8ehXW" colab_type="code" colab={} # Drop customerID df = data.iloc[:,1:] # + [markdown] id="IojyNs2re7sP" colab_type="text" # ### Target feature # + id="rDF07HFqia7W" colab_type="code" outputId="f65ce5f9-c109-4134-9667-efb2c962aeb8" colab={"base_uri": "https://localhost:8080/", "height": 70} df['Churn'].value_counts() # + id="3zsLiNoFh-QZ" colab_type="code" outputId="ba3798d9-75b4-4f94-c654-bd924b224e86" colab={"base_uri": "https://localhost:8080/", "height": 319} # Data to plot labels = df['Churn'].value_counts(sort = True).index sizes = df['Churn'].value_counts(sort = True) colors = ["whitesmoke","red"] explode = (0.1,0) # explode 1st slice rcParams['figure.figsize'] = 5,5 # Plot plt.pie(sizes, explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=270,) plt.title('Percent of churn in customer') plt.show() # + [markdown] id="5B3WI27njxSz" colab_type="text" # Churn customer is 26.5% # + [markdown] id="NJrqG3ubmc90" colab_type="text" # ### Preprocessing # + [markdown] id="3vecqywFrYbB" colab_type="text" # #### Churn # + id="-hslHIakkV3e" colab_type="code" colab={} # 'Yes' -> 1 and 'No' -> 0 df['Churn'] = df['Churn'].map(lambda s :1 if s =='Yes' else 0) # + [markdown] id="76KwjAKNrbSm" colab_type="text" # #### Gender # + id="jrCsF71npFFA" colab_type="code" outputId="d690b897-48ff-4226-ab65-25a7dd6942e0" colab={"base_uri": "https://localhost:8080/", "height": 369} g = sns.catplot(y = "Churn", x = "gender", data = df, kind = "bar", palette = "Pastel1") # + id="XDvEaP7Bqc-6" colab_type="code" outputId="4ed430b2-e428-4f35-c163-4757a0603583" colab={"base_uri": "https://localhost:8080/", "height": 122} df['gender'].head() # + id="EG4jceZ_qzm5" colab_type="code" colab={} # Create columns: gender_Female and gender_Male df = pd.get_dummies(data=df, columns=['gender']) # + id="QYBCFHEpqtiE" colab_type="code" outputId="57460ad3-9831-4b53-c912-027fc57e4547" colab={"base_uri": "https://localhost:8080/", "height": 122} # Female: 'Yes' -> 1 and 'No' -> 0 df['gender_Female'].head() # + id="Sr4btMerwTI5" colab_type="code" outputId="d83fa073-b0a7-45e6-eadb-f6846f2a52a6" colab={"base_uri": "https://localhost:8080/", "height": 122} # Male: 'Yes' -> 1 and 'No' -> 0 df['gender_Male'].head() # + [markdown] id="JxtVkZKBwqiX" colab_type="text" # #### SeniorCitizen # + id="hG3l0fYdwuri" colab_type="code" outputId="8ce57c3e-fa18-4d81-9c98-a0438f1d6617" colab={"base_uri": "https://localhost:8080/", "height": 70} # The customer is a senior citizen or not (1, 0) df['SeniorCitizen'].value_counts() # + id="smmwlHNcyzsh" colab_type="code" colab={} sc = sns.catplot(y = "Churn", x = "SeniorCitizen", data = df, kind = "bar", palette = "Pastel1") # + [markdown] id="wv-ZLc2TxMkz" colab_type="text" # #### Partner # + id="scUTyS-pxQ4w" colab_type="code" colab={} # The customer has a partner or not (Yes, No) data['Partner'].value_counts() # + id="jcJ3rAgT0Pnq" colab_type="code" colab={} # 'Yes' -> 1 and 'No' -> 0 data['Partner'] = data['Partner'].map(lambda s :1 if s =='Yes' else 0) # + id="mFGj96ZB0bOs" colab_type="code" colab={} data['Partner'].value_counts() # + id="xVA-ZxsX0rFp" colab_type="code" colab={} p = sns.catplot(y="Churn", x="Partner", data=data, kind="bar", palette="muted") # + [markdown] id="qOZT3ZHD2Y1e" colab_type="text" # #### Dependents, PhoneService, PaperlessBilling, OnlineSecurity, DeviceProtection, TechSupport, StreamingTV, StreamingMovies # + id="47JnIqtj2Xqn" colab_type="code" colab={} data['Dependents'] = data['Dependents'].map(lambda s :1 if s =='Yes' else 0) data['PhoneService'] = data['PhoneService'].map(lambda s :1 if s =='Yes' else 0) data['PaperlessBilling'] = data['PaperlessBilling'].map(lambda s :1 if s =='Yes' else 0) data['OnlineSecurity'] = data['OnlineSecurity'].map(lambda s :1 if s =='Yes' else 0) data['OnlineBackup'] = data['OnlineBackup'].map(lambda s :1 if s =='Yes' else 0) data['DeviceProtection'] = data['DeviceProtection'].map(lambda s :1 if s =='Yes' else 0) data['TechSupport'] = data['TechSupport'].map(lambda s :1 if s =='Yes' else 0) data['StreamingTV'] = data['StreamingTV'].map(lambda s :1 if s =='Yes' else 0) data['StreamingMovies'] = data['StreamingMovies'].map(lambda s :1 if s =='Yes' else 0) # + [markdown] id="g3a82lvZ36nD" colab_type="text" # #### Tenure # + id="Fv6GXn8a38l1" colab_type="code" colab={} data['tenure'].head() # + id="A_DBu-07voqp" colab_type="code" colab={} ax = sns.distplot(data['tenure'], hist=True, kde=False, bins=int(180/5), color = 'darkblue', hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 4}) ax.set_ylabel('# of Customers') ax.set_xlabel('Tenure (months)') ax.set_title('# of Customers by their tenure') # + id="lI5muUdl4HTd" colab_type="code" colab={} # tenure distibution related to Churn t = sns.kdeplot(data.tenure[(data["Churn"] == 0) ], color="Blue", shade = True) t = sns.kdeplot(data.tenure[(data["Churn"] == 1) ], ax =t, color="Red", shade= True) t.set_xlabel("tenure") t.set_ylabel("Frequency") plt.title('Distribution of tenure comparing with churn feature') t = t.legend(["Not Churn","Churn"]) # + id="sh-S6F5_yFBh" colab_type="code" colab={} # Churn vs tenure sns.boxplot(x = data['Churn'], y = data['tenure']) # + [markdown] id="1x2BkXJcBP6d" colab_type="text" # #### MultipleLines # + id="xlUmJh-dBVO-" colab_type="code" colab={} data['MultipleLines'].value_counts() # + id="4h5PFH1_FbzT" colab_type="code" colab={} # To be just binary values, 'No phone service' will be treated as 'No' data['MultipleLines'].replace('No phone service','No', inplace=True) data['MultipleLines'] = data['MultipleLines'].map(lambda s :1 if s =='Yes' else 0) data['MultipleLines'].value_counts() # + [markdown] id="E-kaL3WbL6cO" colab_type="text" # #### InternetService # + id="EnNIuZIxFUX_" colab_type="code" colab={} data['InternetService'].unique() # + id="x6s2g1mWPEqo" colab_type="code" colab={} # To create a binary column is necessary: # 1) divide the InternetService into 3 columns: internetService; internetDSL; internetFiber_optic # 2) drop InternetService column data['internetService'] = data['InternetService'].map(lambda s :0 if s =='No' else 1) data['internetFiber_optic'] = data['InternetService'].map(lambda s :1 if s =='Fiber optic' else 0) data['internetDSL'] = data['InternetService'].map(lambda s :1 if s =='DSL' else 0) data.drop(['InternetService'], axis=1, inplace=True) # + id="mCVW_r44ZvSq" colab_type="code" colab={} print(data['internetService'].value_counts()) print(data['internetFiber_optic'].value_counts()) print(data['internetDSL'].value_counts()) # + [markdown] id="LpaDBXXhbUCK" colab_type="text" # #### Contract # + id="gnKiad4IdhKh" colab_type="code" colab={} data['Contract'].value_counts() # + id="PvyxMGHDwS5b" colab_type="code" colab={} c = data['Contract'].value_counts().plot(kind = 'bar',rot = 0, width = 0.3) c.set_ylabel('# of Customers') c.set_title('# of Customers by Contract Type') # + [markdown] id="2SqOIpjAwx2C" colab_type="text" # Most of the customers are in the month to month contract. # + id="DFjzI5XkdoJK" colab_type="code" colab={} # Use pandas function "get_dummies" for this feature data = pd.get_dummies(data=data, columns=['Contract']) # + id="Exq1JCYOfPE3" colab_type="code" colab={} data[['Contract_Month-to-month','Contract_One year', 'Contract_Two year']].head() # + [markdown] id="WREQG40CdF4H" colab_type="text" # #### PaymentMethod # + id="wNGC0WGIbYVx" colab_type="code" colab={} data['PaymentMethod'].value_counts() # + id="but5D3Qmdov5" colab_type="code" colab={} data = pd.get_dummies(data=data, columns=['PaymentMethod']) # + id="n3KzF3P6fB4j" colab_type="code" colab={} data[['PaymentMethod_Bank transfer (automatic)', 'PaymentMethod_Credit card (automatic)', 'PaymentMethod_Electronic check', 'PaymentMethod_Mailed check']].head() # + [markdown] id="AOq9xM8oilyz" colab_type="text" # #### MonthlyCharges # + id="iCw-wjMripYs" colab_type="code" colab={} # Numeric feature data['MonthlyCharges'].head() # + id="dtTHfgtUmUHU" colab_type="code" colab={} mc = sns.factorplot(x="Churn", y = "MonthlyCharges",data = data, kind="box", palette = "Pastel1") # + [markdown] id="IgUV88f_mq3q" colab_type="text" # #### TotalCharges # + id="YYQpoUtXmuRC" colab_type="code" colab={} # Object type data['TotalCharges'].head() # + id="ATuxroB4o_pv" colab_type="code" colab={} # 11 missing data in dataset, turn all the column an object type len(data[data['TotalCharges'] == " "]) # + id="EhI3FQHmpI0j" colab_type="code" colab={} # Drop missing data data = data[data['TotalCharges'] != " "] # + id="wRiZcXPwp_kt" colab_type="code" colab={} # Change column type data['TotalCharges'] = pd.to_numeric(data['TotalCharges']) # + id="0IILPMl5qbnm" colab_type="code" colab={} # Check column type data['TotalCharges'].dtypes # + id="fqrNoCkByEgM" colab_type="code" colab={} tc = sns.factorplot(y="TotalCharges",x="Churn",data=data,kind="boxen", palette = "Pastel2") # + [markdown] id="_h1-0zuRybTN" colab_type="text" # Most of churn customer has less than 2000 in total charge. # The loyalty customer is around twice as amount as churn customer. # + id="4NSxzcknxkPT" colab_type="code" colab={} # 'MonthlyCharges' x 'TotalCharges' data[['MonthlyCharges', 'TotalCharges']].plot.scatter(x = 'MonthlyCharges', y='TotalCharges') # + id="mBFVxW3ZGG79" colab_type="code" colab={} data.info() # + id="ffKsS4MlWrZL" colab_type="code" colab={} # Correlation of "Churn" with other variables: plt.figure(figsize=(15,8)) data.corr()['Churn'].sort_values(ascending = False).plot(kind='bar') # + [markdown] id="jRdWXk-KbbTb" colab_type="text" # The 3 biggest correlations (non-causalities) with respect to 'Churn' are 'Contract_Month-to-month', 'internetFiber_optic' and 'PaymentMethod_Eletronic check' # + [markdown] id="RyN18Lavuv46" colab_type="text" # # + [markdown] id="jMwwXOHizNsF" colab_type="text" # ### Model # + [markdown] id="XyDsmtP5NiIi" colab_type="text" # #### Logistic regression # + id="RP1K1ZMxzQik" colab_type="code" colab={} y = data['Churn'].values X = data.drop(columns = ['Churn']) # + id="TTAsOXSq3lie" colab_type="code" colab={} # Scaling all the variables to a range of 0 to 1 from sklearn.preprocessing import MinMaxScaler features = X.columns.values scaler = MinMaxScaler(feature_range = (0,1)) scaler.fit(X) X = pd.DataFrame(scaler.transform(X)) X.columns = features # + [markdown] id="IxfRV2XI30FT" colab_type="text" # It is important to scale the variables in logistic regression so that all of them are within a range of 0 to 1. This helped me improve the accuracy from 79.7% to 80.7%. Further, you will notice below that the importance of variables is also aligned with what we are seeing in Random Forest algorithm and the EDA we conducted above. # + id="A3XBjH-P3mkX" colab_type="code" colab={} # Create Train & Test Data from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123) # + id="brtHBvsd4NpF" colab_type="code" colab={} # Running logistic regression model from sklearn.linear_model import LogisticRegression model = LogisticRegression() result = model.fit(X_train, y_train) # + id="WBM9UQLT4STn" colab_type="code" colab={} from sklearn import metrics prediction_test = model.predict(X_test) # Print the prediction accuracy print (metrics.accuracy_score(y_test, prediction_test)) # + id="RcsgDOfS4nvy" colab_type="code" colab={} # To get the weights of all the variables weights = pd.Series(model.coef_[0], index=X.columns.values) print (weights.sort_values(ascending = False)[:10].plot(kind='bar')) # + id="YyDXTyjv5fDD" colab_type="code" colab={} print(weights.sort_values(ascending = False)[-10:].plot(kind='bar')) # + [markdown] id="x1c0QUt1557s" colab_type="text" # We can see that some variables have a negative relation to our predicted variable (Churn), while some have positive relation. Negative relation means that likeliness of churn decreases with that variable. Let us summarize some of the interesting features below: # # . As we saw in our EDA, having a 2 month contract reduces chances of churn. 2 month contract along with tenure have the most negative relation with Churn as predicted by logistic regressions # # . Having DSL internet service also reduces the proability of Churn # # . Lastly, total charges, monthly contracts, fibre optic internet services and seniority can lead to higher churn rates. This is interesting because although fibre optic services are faster, customers are likely to churn because of it. I think we need to explore more to better understad why this is happening. # # Any hypothesis on the above would be really helpful! # + [markdown] id="ljmyolYS2TXB" colab_type="text" # #### Random forest # + id="_P_iqktMn_rN" colab_type="code" colab={} from sklearn.ensemble import RandomForestClassifier X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123) model_rf = RandomForestClassifier(n_estimators=1000 , oob_score = True, n_jobs = -1, random_state =50, max_features = "auto", max_leaf_nodes = 30) model_rf.fit(X_train, y_train) # Make predictions prediction_test = model_rf.predict(X_test) print (metrics.accuracy_score(y_test, prediction_test)) # + id="jXhK8Ij25FH_" colab_type="code" colab={} importances = model_rf.feature_importances_ weights = pd.Series(importances, index=X.columns.values) weights.sort_values()[-10:].plot(kind = 'barh') # + [markdown] id="UCZIBhzf6u3B" colab_type="text" # Observations: # # . From random forest algorithm, monthly contract, tenure and total charges are the most important predictor variables to predict churn. # # . The results from random forest are very similar to that of the logistic regression and in line to what we had expected from our EDA # + [markdown] id="F2rOjUSY60Mu" colab_type="text" # #### Support Vector Machine (SVM) # + id="8ihQ5K7E6-dE" colab_type="code" colab={} X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=99) # + id="Fw3RhqBeCGJI" colab_type="code" colab={} from sklearn.svm import SVC model.svm = SVC(kernel='linear') model.svm.fit(X_train,y_train) preds = model.svm.predict(X_test) metrics.accuracy_score(y_test, preds) # + id="-vGrbcQ0CbLD" colab_type="code" colab={} # Create the Confusion matrix from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test,preds)) # + [markdown] id="4hAl5-MFClQE" colab_type="text" # Wth SVM I was able to increase the accuracy to upto 82%. However, we need to take a deeper look at the true positive and true negative rates, including the Area Under the Curve (AUC) for a better prediction. # + id="-g0vQy6LCz4O" colab_type="code" colab={} # alter dataframe data to make this code work: there is no 'gender' column ax1 = sns.catplot(x=data["gender"], kind="count", hue="Churn", data=data, estimator=lambda x: sum(x==0)*100.0/len(x)) #ax1.yaxis.set_major_formatter(mtick.PercentFormatter()) # + [markdown] id="hBPZChwzDkGI" colab_type="text" # #### ADA Boost # + id="pY1pEGiNDmcV" colab_type="code" colab={} # AdaBoost Algorithm from sklearn.ensemble import AdaBoostClassifier model = AdaBoostClassifier() # n_estimators = 50 (default value) # base_estimator = DecisionTreeClassifier (default value) model.fit(X_train,y_train) preds = model.predict(X_test) metrics.accuracy_score(y_test, preds) # + [markdown] id="Gkirol1hDueZ" colab_type="text" # #### XG Boost # + id="i8ENmY02Dw9t" colab_type="code" colab={} from xgboost import XGBClassifier model = XGBClassifier() model.fit(X_train, y_train) preds = model.predict(X_test) metrics.accuracy_score(y_test, preds) # + [markdown] id="ytOO4DoKrrqT" colab_type="text" # MELHORAR... # + id="C0YBhg78ErJQ" colab_type="code" colab={} from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score,GridSearchCV Rfclf = RandomForestClassifier(random_state=15) Rfclf.fit(X_train, y_train) # + id="BU1KVznuE2kM" colab_type="code" colab={} # 10 Folds Cross Validation clf_score = cross_val_score(Rfclf, X_train, y_train, cv=10) print(clf_score) clf_score.mean() # + [markdown] id="jnnEolo2FBaY" colab_type="text" # My default random forest get around 78% accuracy from cross-validation # + id="VwTJ-D3LFCZ7" colab_type="code" colab={} # %%time param_grid = { 'n_estimators' : [500,1200], # 'min_samples_split': [2,5,10,15,100], # 'min_samples_leaf': [1,2,5,10], 'max_depth': range(1,5,2), 'max_features' : ('log2', 'sqrt'), 'class_weight':[{1: w} for w in [1,1.5]] } GridRF = GridSearchCV(RandomForestClassifier(random_state=15), param_grid) GridRF.fit(X_train, y_train) #RF_preds = GridRF.predict_proba(X_test)[:, 1] #RF_performance = roc_auc_score(Y_test, RF_preds) print( #'DecisionTree: Area under the ROC curve = {}'.format(RF_performance) "\nBest parameters \n" + str(GridRF.best_params_)) # + id="LkBgwd3WFdkV" colab_type="code" colab={} rf = RandomForestClassifier(random_state=15,**GridRF.best_params_) rf.fit(X_train, y_train) # + id="SFdqJxXwFju_" colab_type="code" colab={} # 10 Folds Cross Validation clf_score = cross_val_score(rf, X_train, y_train, cv=10) print(clf_score) clf_score.mean() # + [markdown] id="HtIYxAyyFycW" colab_type="text" # My grid-search random forest get around 80% accuracy from cross-validation, # a little bit improve from default parameter # + [markdown] id="K_OSIXYmF8mz" colab_type="text" # Feature importances # # another advantage from rule-based model # + id="pgq85HmiFsrH" colab_type="code" colab={} Rfclf_fea = pd.DataFrame(rf.feature_importances_) Rfclf_fea["Feature"] = list(X_train) Rfclf_fea.sort_values(by=0, ascending=False).head() # + id="NSEgm4tcGT5C" colab_type="code" colab={} g = sns.barplot(0,"Feature",data = Rfclf_fea.sort_values(by=0, ascending=False)[0:5], palette="Pastel1",orient = "h") g.set_xlabel("Weight") g = g.set_title("Random Forest") # + [markdown] id="a1DAKq1xGhZQ" colab_type="text" # Confusion Matrix # # also known as an error matrix, it is a specific table layout that allows visualization of the performance of an algorithm # + id="00KUbzEDGjar" colab_type="code" colab={} from sklearn.metrics import confusion_matrix y_pred = rf.predict(X_train) print(confusion_matrix(y_train, y_pred)) # + [markdown] id="odOMvXc9G1pg" colab_type="text" # !!!!!!!!!!!!!! estudar matriz de confusão e interpretar esse valor # + id="jkQXwy06G_cI" colab_type="code" colab={} from sklearn.metrics import classification_report print(classification_report( y_train, y_pred)) # + [markdown] id="eMTrSnESIQE8" colab_type="text" #
churn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exp 106 analysis # # See `./informercial/Makefile` for experimental # details. # + import os import numpy as np from IPython.display import Image import matplotlib import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import seaborn as sns sns.set_style('ticks') matplotlib.rcParams.update({'font.size': 16}) matplotlib.rc('axes', titlesize=16) from infomercial.exp import meta_bandit from infomercial.exp import epsilon_bandit from infomercial.exp import beta_bandit from infomercial.exp import softbeta_bandit from infomercial.local_gym import bandit from infomercial.exp.meta_bandit import load_checkpoint import gym # + def plot_meta(env_name, result): """Plots!""" # episodes, actions, scores_E, scores_R, values_E, values_R, ties, policies episodes = result["episodes"] actions =result["actions"] bests =result["p_bests"] scores_E = result["scores_E"] scores_R = result["scores_R"] values_R = result["values_R"] values_E = result["values_E"] ties = result["ties"] policies = result["policies"] # - env = gym.make(env_name) best = env.best print(f"Best arm: {best}, last arm: {actions[-1]}") # Plotz fig = plt.figure(figsize=(6, 14)) grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8) # Arm plt.subplot(grid[0, 0]) plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit") plt.plot(episodes, np.repeat(best, np.max(episodes)+1), color="red", alpha=0.8, ls='--', linewidth=2) plt.ylim(-.1, np.max(actions)+1.1) plt.ylabel("Arm choice") plt.xlabel("Episode") # Policy policies = np.asarray(policies) episodes = np.asarray(episodes) plt.subplot(grid[1, 0]) m = policies == 0 plt.scatter(episodes[m], policies[m], alpha=.4, s=2, label="$\pi_E$", color="purple") m = policies == 1 plt.scatter(episodes[m], policies[m], alpha=.4, s=2, label="$\pi_R$", color="grey") plt.ylim(-.1, 1+.1) plt.ylabel("Controlling\npolicy") plt.xlabel("Episode") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) _ = sns.despine() # score plt.subplot(grid[2, 0]) plt.scatter(episodes, scores_E, color="purple", alpha=0.4, s=2, label="E") plt.plot(episodes, scores_E, color="purple", alpha=0.4) plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R") plt.plot(episodes, scores_R, color="grey", alpha=0.4) plt.plot(episodes, np.repeat(tie_threshold, np.max(episodes)+1), color="violet", alpha=0.8, ls='--', linewidth=2) plt.ylabel("Score") plt.xlabel("Episode") # plt.semilogy() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) _ = sns.despine() # Q plt.subplot(grid[3, 0]) plt.scatter(episodes, values_E, color="purple", alpha=0.4, s=2, label="$Q_E$") plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="$Q_R$") plt.plot(episodes, np.repeat(tie_threshold, np.max(episodes)+1), color="violet", alpha=0.8, ls='--', linewidth=2) plt.ylabel("Value") plt.xlabel("Episode") # plt.semilogy() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) _ = sns.despine() # Ties plt.subplot(grid[4, 0]) plt.scatter(episodes, bests, color="red", alpha=.5, s=2) plt.ylabel("p(best)") plt.xlabel("Episode") plt.ylim(0, 1) # Ties plt.subplot(grid[5, 0]) plt.scatter(episodes, ties, color="black", alpha=.5, s=2, label="$\pi_{tie}$ : 1\n $\pi_\pi$ : 0") plt.ylim(-.1, 1+.1) plt.ylabel("Ties index") plt.xlabel("Episode") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) def plot_epsilon(env_name, result): """Plots!""" # episodes, actions, scores_E, scores_R, values_E, values_R, ties, policies episodes = result["episodes"] actions =result["actions"] bests =result["p_bests"] scores_R = result["scores_R"] values_R = result["values_R"] epsilons = result["epsilons"] # - env = gym.make(env_name) best = env.best print(f"Best arm: {best}, last arm: {actions[-1]}") # Plotz fig = plt.figure(figsize=(6, 14)) grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8) # Arm plt.subplot(grid[0, 0]) plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit") plt.plot(episodes, np.repeat(best, np.max(episodes)+1), color="red", alpha=0.8, ls='--', linewidth=2) plt.ylim(-.1, np.max(actions)+1.1) plt.ylabel("Arm choice") plt.xlabel("Episode") # score plt.subplot(grid[1, 0]) plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R") plt.ylabel("Score") plt.xlabel("Episode") # plt.semilogy() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) _ = sns.despine() # Q plt.subplot(grid[2, 0]) plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="$Q_R$") plt.ylabel("Value") plt.xlabel("Episode") # plt.semilogy() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) _ = sns.despine() # best plt.subplot(grid[3, 0]) plt.scatter(episodes, bests, color="red", alpha=.5, s=2) plt.ylabel("p(best)") plt.xlabel("Episode") plt.ylim(0, 1) # Decay plt.subplot(grid[4, 0]) plt.scatter(episodes, epsilons, color="black", alpha=.5, s=2) plt.ylabel("$\epsilon_R$") plt.xlabel("Episode") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) def plot_beta(env_name, result): """Plots!""" # episodes, actions, scores_E, scores_R, values_E, values_R, ties, policies episodes = result["episodes"] actions =result["actions"] bests =result["p_bests"] scores_R = result["scores_R"] values_R = result["values_R"] beta = result["beta"] # - env = gym.make(env_name) best = env.best print(f"Best arm: {best}, last arm: {actions[-1]}") # Plotz fig = plt.figure(figsize=(6, 14)) grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8) # Arm plt.subplot(grid[0, 0]) plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit") plt.plot(episodes, np.repeat(best, np.max(episodes)+1), color="red", alpha=0.8, ls='--', linewidth=2) plt.ylim(-.1, np.max(actions)+1.1) plt.ylabel("Arm choice") plt.xlabel("Episode") # score plt.subplot(grid[1, 0]) plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R") plt.ylabel("Score") plt.xlabel("Episode") # plt.semilogy() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) _ = sns.despine() # Q plt.subplot(grid[2, 0]) plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="$Q_R$") plt.ylabel("Value") plt.xlabel("Episode") # plt.semilogy() plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) _ = sns.despine() # best plt.subplot(grid[3, 0]) plt.scatter(episodes, bests, color="red", alpha=.5, s=2) plt.ylabel("p(best)") plt.xlabel("Episode") plt.ylim(0, 1) def plot_critic(critic_name, env_name, result): # - env = gym.make(env_name) best = env.best # Data critic = result[critic_name] arms = list(critic.keys()) values = list(critic.values()) # Plotz fig = plt.figure(figsize=(8, 3)) grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8) # Arm plt.subplot(grid[0]) plt.scatter(arms, values, color="black", alpha=.5, s=30) plt.plot([best]*10, np.linspace(min(values), max(values), 10), color="red", alpha=0.8, ls='--', linewidth=2) plt.ylabel("Value") plt.xlabel("Arm") # - # # Load and process data data_path ="/Users/qualia/Code/infomercial/data/" exp_name = "exp106" sorted_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_sorted.pkl")) # print(sorted_params.keys()) best_params = sorted_params[0] beta = best_params['beta'] sorted_params # # Performance # # of best parameters # + env_name = 'BanditUniform121-v0' num_episodes = 60500 # Run w/ best params result = beta_bandit( env_name=env_name, num_episodes=num_episodes, lr_R=best_params["lr_R"], beta=best_params["beta"], seed_value=2, ) print(best_params) plot_beta(env_name, result=result) # - plot_critic('critic', env_name, result) # # Sensitivity # # to parameter choices # + total_Rs = [] betas = [] lrs_R = [] lrs_E = [] trials = list(sorted_params.keys()) for t in trials: total_Rs.append(sorted_params[t]['total_R']) lrs_R.append(sorted_params[t]['lr_R']) betas.append(sorted_params[t]['beta']) # Init plot fig = plt.figure(figsize=(5, 18)) grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8) # Do plots: # Arm plt.subplot(grid[0, 0]) plt.scatter(trials, total_Rs, color="black", alpha=.5, s=6, label="total R") plt.xlabel("Sorted params") plt.ylabel("total R") _ = sns.despine() plt.subplot(grid[1, 0]) plt.scatter(trials, lrs_R, color="black", alpha=.5, s=6, label="total R") plt.xlabel("Sorted params") plt.ylabel("lr_R") _ = sns.despine() plt.subplot(grid[2, 0]) plt.scatter(lrs_R, total_Rs, color="black", alpha=.5, s=6, label="total R") plt.xlabel("lrs_R") plt.ylabel("total_Rs") _ = sns.despine() plt.subplot(grid[3, 0]) plt.scatter(betas, total_Rs, color="black", alpha=.5, s=6, label="total R") plt.xlabel("beta") plt.ylabel("total_Rs") _ = sns.despine() # - # # Parameter correlations from scipy.stats import spearmanr spearmanr(total_Rs, lrs_R) spearmanr(betas, total_Rs) spearmanr(betas, lrs_R) # # Distributions # # of parameters # + # Init plot fig = plt.figure(figsize=(5, 6)) grid = plt.GridSpec(3, 1, wspace=0.3, hspace=0.8) plt.subplot(grid[0, 0]) plt.hist(betas, color="black") plt.xlabel("beta") plt.ylabel("Count") _ = sns.despine() plt.subplot(grid[1, 0]) plt.hist(lrs_R, color="black") plt.xlabel("lr_R") plt.ylabel("Count") _ = sns.despine() # - # of total reward # + # Init plot fig = plt.figure(figsize=(5, 2)) grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8) plt.subplot(grid[0, 0]) plt.hist(total_Rs, color="black", bins=50) plt.xlabel("Total reward") plt.ylabel("Count") # plt.xlim(0, 10) _ = sns.despine() # -
notebooks/exp106_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np import sympy as sp import scipy as sci import matplotlib.pyplot as plt # %matplotlib widget sp.physics.mechanics.init_vprinting() # # [Lagrangian 1](https://www.youtube.com/watch?v=HYUd9QFcefs) # > # constant # >> R : radius # >> L : line length # >>> $ L = x + x_2 + \pi R $ # >> # >> $ x : m_1 $ # >> $ y : m_2 $ # + R,m1,m2,g,L = sp.symbols('R m_1 m_2 g L',positive=True) t = sp.symbols('t') x = sp.Function('x')(t) y = L-(x + (sp.pi * R)) # - T = (sp.Rational(1,2) * m1 * x.diff(t)**2) + (sp.Rational(1/2) * m2 * y.diff(t)**2) T.simplify() U = m1*g*x + m2*g*y U.simplify() Lagrangian = T - U Lagrangian.simplify() rhs = Lagrangian.diff(x) rhs lhs = Lagrangian.diff(x.diff(t)).diff(t) lhs sp.solve(sp.Eq(rhs,lhs),x.diff(t,2)) sp.solve(sp.Eq(rhs,lhs),x.diff(t,2))[0] # # https://www.youtube.com/watch?v=rJaXxb_piGI&t=1s # import numpy as np import matplotlib.pyplot as plt from scipy.integrate import odeint import sympy as smp from matplotlib import animation from matplotlib.animation import PillowWriter t, g, l1, l2, m1, m2, m3, k, L0 = smp.symbols('t g l_1 l_2 m_1 m_2 m_3 k L_0') the1, the2 = smp.symbols(r'\theta_1 \theta_2', cls=smp.Function) the1 = the1(t) the2 = the2(t) the1_d = smp.diff(the1, t) the2_d = smp.diff(the2, t) the1_dd = smp.diff(the1_d, t) the2_dd = smp.diff(the2_d, t)
python/Template/lagrangian.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df=pd.read_csv("C:/Users/d3us/Desktop/cursoDataScienceLinkedIn/bd/2008.csv", nrows=100000) df.head() df[['Year','Month','DayOfWeek','Cancelled']][:10] #consultas df[df['ArrDelay']>60].head(20) # df.columns df[((df['Year']==2008) & (df['Origin']=='LAX')) & (df['ArrDelay']>100)]
filtrado de datos pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example - Reproject # + import rioxarray # for the extension to load import xarray # %matplotlib inline # - # ## Load in xarray dataset xds = xarray.open_dataset("PLANET_SCOPE_3D.nc") xds xds.green.where(xds.green!=xds.green.rio.nodata).isel(time=1).plot() # ## Reproject xds_lonlat = xds.rio.reproject("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") xds_lonlat xds_lonlat.green.where(xds_lonlat.green!=xds_lonlat.green.rio.nodata).isel(time=1).plot()
sphinx/examples/reproject.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/daripp/root_area_counter/blob/main/New_Root_Counter.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="aoNrRygVso7F" outputId="3d876996-13fe-4995-b7db-eab7c46e01c5" #Load the Drive helper and mount from google.colab import drive # This will prompt for authorization. drive.mount('/content/drive') # !ls # + id="oVQpHif2syp9" #########!!!!!!!!!!!!!!!!!!!!!CODE For ROOTS!!!!!!!!!!!!!!!!!! import glob import numpy as np import matplotlib.pyplot as plt import skimage as skimage import scipy as scipy import cv2 as cv2 import pandas as pd import os from skimage import filters, io, img_as_ubyte from skimage.color import rgb2grey, label2rgb from skimage import (exposure, feature, filters, io, measure, morphology, restoration, segmentation, transform, util) from skimage.exposure import match_histograms from skimage.filters import sobel, laplace, gaussian from skimage import feature from scipy.ndimage import morphology from skimage.morphology import disk, binary_dilation, diamond from skimage.segmentation import clear_border from skimage.measure import label, regionprops #*********************************************Put folder name here************************************************** directn = 'Folder_name_here' filenames = os.listdir(directn) for filename in os.listdir(directn): #*******************************************makes new directory called "(directory name here) + cropped"******************************************** new_dir_name=directn+'_extension_to_folder_name' if not os.path.exists(new_dir_name): os.makedirs(new_dir_name) #image is read from directory to system camera1=io.imread(directn+'/'+filename) image = rgb2grey(camera1) img=camera1 b,g,r = cv2.split(img) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Adjust Dilation and remove small objects values here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! binary_dilation= disk(3) remove_small_objects= 2000 edges_r=np.array(r, dtype='ubyte') edges_b=np.array(b, dtype='ubyte') edges_g=np.array(g, dtype='ubyte') #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Adjust threshold values here; values are additive!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! edges_r1=np.where((edges_r < 70) & (edges_r > 30), 255, 0) edges_b1=np.where((edges_b < 120) & (edges_b > 80), 255, 0) edges_g1=np.where((edges_g < 15) & (edges_g > 5), 255, 0) edges_1r1=np.where((edges_r < 30) & (edges_r > 70), 255, 0) #!!!!!!!!!!!Images converted to 0-255 scale!!!!!!!!!!!!!!!!!!!!!!!! edges_r2=img_as_ubyte(edges_r1) edges_b2=img_as_ubyte(edges_b1) edges_g2=img_as_ubyte(edges_g1) # #edges2= feature.canny(edges2, sigma=1.5) #!!!!!!!!!!!!!!!!!!!!Mask expansion, adjust values above!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1 edges_r2=skimage.morphology.binary_dilation(edges_r2, binary_dilation) edges_b2=skimage.morphology.binary_dilation(edges_b2, binary_dilation) edges_g2=skimage.morphology.binary_dilation(edges_g2, binary_dilation) ###!!!!!!!!!!!!!!!!!!!!Removes small objects marked by the computer!!!!!!!!!!!!!!!!!!!!!!!!!!!!! edges_r3 = skimage.morphology.remove_small_objects(edges_r2, min_size=remove_small_objects) edges_b3 = skimage.morphology.remove_small_objects(edges_b2, min_size=remove_small_objects) edges_g3 = skimage.morphology.remove_small_objects(edges_g2, min_size=remove_small_objects) ###*************************************************!!!!!!!!!!!!!!!!Combine layers here! You don't have to combine if not helpful, but you have to tell the computer what you want!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! edges_combine=edges_r3+edges_b3 #***************Redefine layers as np arrays for subtraction************************************************* edges_combine=np.array(edges_combine) edges_r3=np.array(edges_r3) edges_b3=np.array(edges_b3) np.subtract(edges_combine, edges_r3, dtype=np.float) edges_bsubtractr=np.subtract(edges_combine, edges_r3, dtype=np.float) edges_rsubtractb=np.subtract(edges_combine, edges_b3, dtype=np.float) #**!!!!!!!!!!!!!!!Fills in holes after layers are combined!!!!!!!!!!!!!!!!!!!!! #edges_rb3=scipy.ndimage.morphology.binary_fill_holes(edges_combine) #!!!!!!!!!!!Generates label from combined images, back ground is a little wonky as the output is being inverted by label_image = label(edges_combine, background=0) image_label_overlay = label2rgb(label_image, image=camera1, bg_label=255, alpha=0.2) d=measure.regionprops_table(label_image, image, properties=['label','area','centroid']) data = pd.DataFrame(d) # data.sort_values('area', inplace=True, ascending=False) # label_images=label_image<1 # label_image[label_images]=255 # print(data) #edges = clear_border(edges) #camera=np.array(camera) #camera=np.array(np.count_nonzero(255)) #print(camera) #inds_x = np.arange(len(camera)) #inds_y = (4 * inds_x) % len(camera) #camera[inds_x, inds_y] = 0 #io.imshow(edges) #io.imshow(edges) for j in range(len(d['centroid-0'])): print(j) position = (d['centroid-1'][j],d['centroid-0'][j]) cv2.putText( image_label_overlay, #numpy array on which text is written str(d['label'][j]), #text position, #position at which writing has to start cv2.FONT_HERSHEY_SIMPLEX, #font family 5, #font size (0, 0, 0, 0),15) plt.figure(dpi=1000) plt.imshow(edges_r3, cmap='binary') plt.savefig(new_dir_name+'/'+filename.split('.')[0]+'_red.png') plt.close() # plt.figure(dpi=300) # plt.imshow(edges_g3, cmap='binary') # plt.savefig(new_dir_name+'/'+filename.split('.')[0]+'_green.tif') # plt.close() plt.figure(dpi=1000) plt.imshow(edges_b3, cmap='binary') plt.savefig(new_dir_name+'/'+filename.split('.')[0]+'_blue.png') plt.close() plt.figure(dpi=1000) plt.imshow(edges_combine, cmap='binary') plt.savefig(new_dir_name+'/'+filename.split('.')[0]+'_combined.png') plt.close() plt.figure(dpi=1000) plt.imshow(edges_bsubtractr, cmap='binary') plt.savefig(new_dir_name+'/'+filename.split('.')[0]+'_b_csubtractr') plt.close() plt.figure(dpi=1000) plt.imshow(edges_rsubtractb, cmap='binary') plt.savefig(new_dir_name+'/'+filename.split('.')[0]+'_r_csubtractb.png') plt.close() plt.figure(dpi=1000) plt.imshow(image_label_overlay) plt.savefig(new_dir_name+'/'+filename.split('.')[0]+'combined_image_label_overlay.png') data.to_excel(new_dir_name+'/'+filename.split('.')[0]+'combined_image_labeloverlaydata.xlsx') plt.close() # io.imsave(new_dir_name+'/'+filename.split('.')[0]+'red.jpeg', edges_r3) # io.imsave(new_dir_name+'/'+filename.split('.')[0]+'blue.jpeg', edges_b3) # io.imsave(new_dir_name+'/'+filename.split('.')[0]+'green.jpeg', edges_g3) # data.to_excel(new_dir_name+'/'+filename.split('.')[0]+'.xlsx')
New_Root_Counter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/xulisong1/test1/blob/master/src/Untitled72.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="tajaVtrB7Q1C" colab_type="code" outputId="60893e84-d6b8-4f2a-b7ba-e7eae095784f" colab={"base_uri": "https://localhost:8080/", "height": 35} 1+1 # + [markdown] id="qmQiwYPC7drK" colab_type="text" # test for colab # $\phi(x) = $ # $$\phi(x) = \frac{1}{\sqrt{2\pi}} e^{-1/x^2}.$$ # + id="-sbvz7V79tP9" colab_type="code" outputId="b9e1bd9e-8691-45d5-c982-7fc5585d7dc0" colab={"base_uri": "https://localhost:8080/", "height": 304} '''=========== This is python code ==============''' print('--->>> hello world! Here is the sin graph <<<-----') import numpy as np import matplotlib.pyplot as plt x = np.linspace(-3, 3, 100) plt.plot(x, np.sin(x)); print('this is the end') # + [markdown] id="2FHBdhT08S8F" colab_type="text" # # + [markdown] id="rK7Xhaec8Qdz" colab_type="text" #
src/Untitled72.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Python常用内置函数典型用法 # Python中有许多功能丰富的内置函数,本文基于Python 2.7,就常用的一些函数的典型用法做一些积累,不断更新中。 # # sorted函数的三种用法 # ```python # # coding:utf-8 # # sorted函数的三种用法 # from operator import itemgetter # # data1 = [{'aa':22,'bb':11},{'aa':12,'cc':23},{'aa':67,'dd':103}] # data2 = [{'age':18,'name':'Tom'},{'age':10,'name':'Tim'},{'age':30,'name':'John'},{'age':18,'name':'Amy'}] # # def sort1(): # # 对data1依据'aa'字段值的大小从小打到排序 # ret = sorted(data1,key = lambda item:item['aa']) # 注:如果这里的key写'bb'或'cc',会报KeyError,因为这两个属性并不是每个元素都有的 # print ret # # 输出: # ''' # [{'aa': 12, 'cc': 23}, {'aa': 22, 'bb': 11}, {'aa': 67, 'dd': 103}] # ''' # # def sort2(): # # 对data1依据'aa'字段值的大小从小打到排序 # ret = sorted(data1,cmp = lambda x,y:cmp(x['aa'],y['aa'])) # print ret # # 输出: # ''' # [{'aa': 12, 'cc': 23}, {'aa': 22, 'bb': 11}, {'aa': 67, 'dd': 103}] # ''' # # def sort3(): # # 使用itemgetter对data1依据'aa'字段值的大小从小打到排序 # ret = sorted(data1,key = itemgetter('aa')) # print ret # # 输出: # ''' # [{'aa': 12, 'cc': 23}, {'aa': 22, 'bb': 11}, {'aa': 67, 'dd': 103}] # ''' # # def sort4(): # # 对data2进行排序,先按照'age'从小到大排序,'age'相同的情况下,再按照'name'排序 # ret = sorted(data2,key = itemgetter('age','name')) # print ret # # 输出: # ''' # [{'age': 10, 'name': 'Tim'}, {'age': 18, 'name': 'Amy'}, {'age': 18, 'name': 'Tom'}, {'age': 30, 'name': 'John'}] # ''' # ``` # # 执行命令行命令的三种方式 # ```python # # coding:utf-8 # # 执行命令行命令的三种方式 # import os # import commands # # command = 'ls -al /root' # # def method1(): # ''' # 方式1 # ''' # os.system(command) # # 执行结果:返回执行状态码 # # def method2(): # ''' # 方式2 # ''' # out1 = os.popen(command) # print out1.read() # # 输出:执行结果字符串 # # def method3(): # ''' # 方式3 # ''' # (status,out) = commands.getstatusoutput(command) # # 输出:status是执行状态码,out是执行结果字符串 # ``` # # zip函数的用法 # ``` # Docstring: # zip(seq1 [, seq2 [...]]) -> [(seq1[0], seq2[0] ...), (...)] # Return a list of tuples, where each tuple contains the i-th element # from each of the argument sequences. The returned list is truncated # in length to the length of the shortest argument sequence. # ``` # 先来看看zip函数的文档,从文档中可以看出,zip函数接收1个或多个序列作为参数,返回一个由元组组成的列表。 # 结果列表的第i个元素是seq1~seqn的第i个元素组成的元组。 # 结果列表的长度等于seq1~seqn中最短的序列的长度。 # 一段测试代码如下: # ```python # # coding:utf-8 # # def main(): # a = '1234' # b = [4,6,7] # # print zip() # # 输出:[] # # print zip(a) # # 输出:[('1',), ('2',), ('3',), ('4',)] # # print zip(a,a) # # 输出:[('1', '1'), ('2', '2'), ('3', '3'), ('4', '4')] # # print zip(a,[]) # # 输出:[] # # print zip(a,b) # # 输出:[('1', 4), ('2', 6), ('3', 7)] # # if __name__ == '__main__': # main() # ``` # # map函数的用法 # map函数是一个高阶函数,支持传入一个函数作为参数。先来看它的文档是怎么说的: # ``` # Docstring: # map(function, sequence[, sequence, ...]) -> list # Return a list of the results of applying the function to the items of # the argument sequence(s). If more than one sequence is given, the # function is called with an argument list consisting of the corresponding # item of each sequence, substituting None for missing values when not all # sequences have the same length. If the function is None, return a list of # the items of the sequence (or a list of tuples if more than one sequence). # ``` # 从map函数的文档中可以看出,该函数的第一个参数为一个函数对象,后面可以跟一个或多个序列,函数的返回值是一个list. # 对比zip函数的用法,可以发现其实map函数就是一个增强版的zip函数,与zip函数不同的是,map函数支持传入一个函数参数来处理序列。 # 如果第一个函数参数不为None,那么返回的结果list的第i个元素,是将该函数作用于每个序列的第i个元素的结果。如果传入的序列的长度不都是相同的,那么结果list的某些元素将会是None. # 如果第一个函数参数为None,那么返回的的结果list的第i个元素,是每个序列第i个元素组成的n元组(n为序列的个数),如果每个序列的长度不都是相同的,那么结果list的某些元素将是None. # 下面通过一段程序来看map函数的实际用法: # ```python # # coding:utf-8 # # def main(): # a = [1,2,3,4] # b = [3,5,9] # c = [8,2,3] # print map(None,a,b,c) # # 输出:[(1, 3, 8), (2, 5, 2), (3, 9, 3), (4, None, None)] # # print map(lambda x : x ** 2,a) # # 输出:[1, 4, 9, 16] # # # print map(lambda x,y : x + y,a) # # 输出:TypeError <lambda>() takes exactly 2 arguments (1 given) # # print map(lambda x,y : x + y,b,c) # # 输出:[11, 7, 12] # # # print map(lambda x,y,z : x + y + z,a,b,c) # # 输出:TypeError: unsupported operand type(s) for +: 'int' and 'NoneType' # # print map(lambda x,y : x + y if x is not None and y is not None else None,a,b) # # 输出:[4, 7, 12, None] # # if __name__ == '__main__': # main() # ``` # # reduce函数的用法 # 先看函数文档: # ``` # Docstring: # reduce(function, sequence[, initial]) -> value # Apply a function of two arguments cumulatively to the items of a sequence, # from left to right, so as to reduce the sequence to a single value. # For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates # ((((1+2)+3)+4)+5). If initial is present, it is placed before the items # of the sequence in the calculation, and serves as a default when the # sequence is empty. # ``` # reduce函数接收三个参数:function,seq,init,其中前两个是必选参数,最后一个为可选参数。 # reduce函数做了这样一件事情:从左到右遍历seq,将seq[0]和seq[1]传入函数function进行运算(function为一个接收两个参数的函数),得到一个结果值,然后将这个结果值再和seq[2]传入fucntion进行运算再得到一个新的结果值...以此类推。最终得到一个值,就是该函数的返回值。 # 如果传入了init,那么init和seq[0]会作为第一次传入funciton的参数,如果seq为空,init也会作为reduce的返回值返回。 # 用法示例如下: # ```python # # coding:utf-8 # # def main(): # lst = [1,2,3] # f = lambda x,y:x*y # print reduce(f,lst) # # 输出:6 # # print reduce(f,lst,-1) # # 输出:-6 # # print reduce(f,[],-2) # # 输出:-2 # # if __name__ == '__main__': # main() # # ``` # # base64编解码 # ```python # # coding:utf-8 # # 测试base64编解码 # import base64 # # def main(): # s = '123abc' # # # 编码 # print base64.b64encode(s) # # 输出:MTIzYWJj # # # 解码 # print base64.b64decode('MTIzYWJj') # # 输出:123abc # # if __name__ == '__main__': # main() # ```
useful-func/func-handout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jpwhalley/GMS_Stats_Course/blob/master/6_Machine_Learning_Applications/NeuralNetworkTutorial_GMS2019.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="x30sRRVu_hXA" colab_type="text" # ### Neural Network tutorial # # In this tutorial we will be exploring a number of topics discussed in this morning's lecture, using a famous dataset, the MNIST collection of 70000 handwritten and labeled digits. This will hopefully make these topics "come alive" by showing how they work in practice in a simple but realistic setting. # # This tutorial makes use of a bunch of useful tools and frameworks: Keras, TensorFlow, Tensorboard, Numpy, Matplotlib, Python, jupyter, Google colab. You can run this tutorial even if you haven't had previous experience with some or any of these. Hopefully this tutorial gives you a starting point for exploring them further; in my experience they can massively boost your productivity in data exploration and research generally, and machine learning in particular. # + [markdown] id="Tsa9KZXxBAzZ" colab_type="text" # ## Initialization # # This just loads the various packages we'll be using, and the MNIST data set. # # Make sure you run this on a GPU backend (runs faster) - go to Edit > Notebook settings or Runtime > Change runtime type and select GPU as Hardware accelerator. If you want to double-check, uncomment the line with `device_lib.list_local_devices()` and check that the output shows `device_type GPU` somewhere. # + id="8S1ayS227InJ" colab_type="code" colab={} # %tensorflow_version 1.x from __future__ import division from __future__ import print_function import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from datetime import datetime from tensorflow.keras import datasets from tensorflow.keras.callbacks import TensorBoard #from tensorflow.python.client import device_lib #print(device_lib.list_local_devices()) dataformat="channels_last" shape=[28,28,1] (x_train, y_train), (x_test, y_test) = datasets.mnist.load_data() x_train = x_train / 255.0 x_test = x_test / 255.0 print("Training: images",x_train.shape, " labels", y_train.shape, "; Test set",x_test.shape, " labels",y_test.shape) # + [markdown] id="70KhZjCNCEHM" colab_type="text" # Always good practice to have a look at your data. Here are the first two data points, plotted as a matrix, and their labels. # # The data are just `numpy` arrays; their dimensions are printed above. Visualisation is done using the `matplotlib` library. # # * Find a way to plot several digits with their labels in a grid. # + id="p934ZSvt7NEk" colab_type="code" colab={} plt.matshow(x_train[0]) plt.matshow(x_train[1]) print(y_train[0:2]) # + [markdown] id="dPg68VzeCi0m" colab_type="text" # ## Model 1: multinomial logistic regression # # Below is the first complete model to predict the label (0 to 9) from the pixel intensities, in the simplest possible way (kindof) - using a multinomial logistic regression model, basically logistic regression but for classification instead of binary outcomes. # # (The `Sequential` refers to the fact that in this model (and all models in this tutorial), data flows sequentially through a number of layers from input to output. This is not true for e.g. the Inception module, but Tensorflow/Keras also easily copy with such models.) # # In detail, for an input $x \in R^{28\times 28} = R^{784}$, a vector representing the input image, the model predicts # # $$\hat{y} = A x,$$ # # where $A \in R^{10 \times 784}$ and $\hat{y}\in R^{10}$. These are transformed into probabilities using the softmax function: # # $$p(\mathrm{digit}=i) = { \exp(\hat{y}_i) \over \sum_j \exp(\hat{y}_j) }$$ # # To compare predicted probabilities $p$ for a given input $x_j$ with the actual class $y_j$, we use the `sparse_categorical_entropy` loss function. (Here, `sparse` refers to the class encoding we use; we encode classes as integers 0 to 9, rather than "one-hot encoding", where we would represent e.g. class 2 as the vector $[0,1,0,0,0,0,0,0,0,0,0]$.) # # For this model there is theory about how to obtain optimal parameters $A$ for a given data set; but in the spirit of neural networks, we are using a standard stochastic gradient descend algorithm, `adam`. We will very quickly leave theory behind when we add even a little more complexity to the model, and then numerical optimization is the only option. # # The call to `compile` builds a computational graph of the model. The computation performed by the graph includes: # # 1. evaluation of the model's prediction $F_\theta(x)$ over a batch $\{x_1,\ldots,x_B\}$; # 2. calculation of the loss $L_i := L( F_\theta(x_i), y_i )$; # 3. calculation of the gradient $\nabla_\theta \sum_{i=1}^L L_i$, and # 4. a single step of the optimization algorithm `adam`, resulting in new parameters $\theta$, ready to be executed on a GPU once you feed it data. # # If you've ever any of those steps by hand, you will appreciate how transformative Tensorflow/Keras (and similar frameworks) have been in machine learning research. # # The call to `fit` finally runs the computational graph on successive batches of data. One complete feed-through of all data is called an `epoch` in machine learning parlance. The `fit` function is also given test data -- this is not used for training, but to evaluate the model's performance after each epoch. # # * How many parameters does this model have? # * Find out how to print a summary of the model # + id="lvffNZdF764J" colab_type="code" colab={} tf.reset_default_graph() tf.keras.backend.clear_session() ## not entirely sure why this is necessary ## a very simple model - multinomial logistic regression (= softmax activation) on all 28 x 28 pixels model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28,28)), tf.keras.layers.Dense(10,activation=tf.nn.softmax) ]) ## set up logging via tensorboard logdir = "logs_model1/{}".format(datetime.now().strftime("%Y%m%d-%H%M%S")) print("Writing log files to ",logdir) tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1) ## pull the model, loss function, optimizer, and output metrics into a single computational graph model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) ## run the computational graph 50 times on the entire training data, to fit the model parameters model.fit(x_train,y_train,epochs=50,validation_data=(x_test,y_test),callbacks=[tensorboard]) ## evaluate the final model on the test set print("Test set performance:") model.evaluate(x_test,y_test) # + [markdown] id="Wd419VmzKbVz" colab_type="text" # ## Tensorboard # # The `tensorboard` "callback" in the code above is called by the `fit` function at the end of each epoch. The callback writes some logging information in a directory, which we can explore later. # # Tensorboard is a standalone program that visualises the log files produced by tensorflow. Running the cell below starts the tensorboard program inside the notebook. You just have to do this once and use it for all runs in this session. Within tensorboard you can select the log files you want to look at. # # Once the run above has finished, start Tensorboard and have a look at the run. Notice that the model has overtrained - the validation loss has reached a minimum at around the 10th epoch, and gradually increased after that, even as the training loss continued to decrease. This behaviour is seen quite often (but more usually after many more epochs). Early Stopping is a heuristic that identifies the validation loss minimum, and stops training there. # # * Click on the 'graph' tab. Can you make sense of the computational graph? Try double-clicking on nodes. # + id="lKEt_1VPA5kp" colab_type="code" colab={} # Load TENSORBOARD # %load_ext tensorboard # Start TENSORBOARD # %tensorboard --logdir logs_model1 # + [markdown] id="OcQqW0aHO97y" colab_type="text" # ## Model 2 - a two-layer neural network # # The accuracy (about 90%) achieved by the first model is not bad, but not hugely impressive either. Part of the reason is that the model is purely a linear model, without nonlinearities or interactions. So let's add a second layer of neurons, with the standard (ReLU) activation. # # * Compare this model's performance with that of model 1 # * Also compare the extent of overtraining that has occurred. # * How many parameters does this model have? # * Try running with fewer or more than 50 nodes in the middle layer. How does this affect the model's performance and degree of overtraining? # + id="7KYymOvMRO96" colab_type="code" colab={} tf.reset_default_graph() tf.keras.backend.clear_session() ## a very simple model - multinomial logistic regression (= softmax activation) on all 28 x 28 pixels model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28,28)), tf.keras.layers.Dense(50,activation=tf.nn.relu), tf.keras.layers.Dense(10,activation=tf.nn.softmax) ]) logdir = "logs_twolayers/{}".format(datetime.now().strftime("%Y%m%d-%H%M%S")) tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train,y_train,epochs=25,validation_data=(x_test,y_test),callbacks=[tensorboard]) print("Test set performance:") model.evaluate(x_test,y_test) # + id="_OyXIwx7RsfI" colab_type="code" colab={} ## tensorboard does not seem to find new data in old log directories, so start a new instance # %tensorboard --logdir logs_twolayers # + [markdown] id="1P2Yqj2DAasI" colab_type="text" # ## Convolutional neural network # # Adding a second layer of neurons did improve performance a bit, but we're a long way off state of art. A key shortcoming of the # models so far is that they treat pixels in isolation. In particular the models are not constrained to be translation-symmetric: shifting the input image by one pixel to the right should not make a difference in the output. # # One way to help the model is to use a convolutional layer, which re-uses the same "kernel" and applies it across the (2D) image. # The next model uses two 2D convolutions, each using 'same' padding which ensures the output resolution is the same as the input resolution. Each 2D convolution is followed by a $2\times 2$ max pooling layer, reducing the resolution with a factor 2 in both dimensions. # # The first convolutional layer has 32 kernels, so its output is of dimension $28\times 28\times 32$. After max pooling this becomes $14 \times 14\times 32$. The next convolutional layer has 64 kernels, and after max pooling the output dimension is $7\times 7\times 64$. This is then followed by a $50$-channel fully connected layer, and finally a $10$-channel output layer. # # * How many parameters does this model have? # + id="O1LS7Xc-T1Uc" colab_type="code" colab={} tf.reset_default_graph() tf.keras.backend.clear_session() ## a simple convolutional model: ## - 2d convolutional layer with 32 5x5 kernels, and ReLU activation (here implemented as a separate layer) ## - 2d max pooling, input size 2x2, and stride 2 in both dimensions model = tf.keras.models.Sequential([ tf.keras.layers.Reshape(target_shape=shape, input_shape=[28,28]), tf.keras.layers.Conv2D(16, 5, padding='same', data_format=dataformat, activation='linear'), tf.keras.layers.Activation('relu'), tf.keras.layers.MaxPooling2D((2,2),(2,2), padding='same', data_format=dataformat), tf.keras.layers.Conv2D(16, 5, padding='same', data_format=dataformat, activation='linear'), tf.keras.layers.Activation('relu'), tf.keras.layers.MaxPooling2D((2,2),(2,2), padding='same', data_format=dataformat), tf.keras.layers.Flatten(), tf.keras.layers.Dense(50,activation=tf.nn.relu), tf.keras.layers.Dense(10,activation=tf.nn.softmax) ]) logdir = "logs_twoconvolutions/{}".format(datetime.now().strftime("%Y%m%d-%H%M%S")) tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train,y_train,epochs=10,validation_data=(x_test,y_test),callbacks=[tensorboard]) print("Test set performance:") model.evaluate(x_test,y_test) # + [markdown] id="llc2_oEpyCyy" colab_type="text" # ## Dropout # # The convolutional model is pretty good, but still suffers from overtraining. The trailing loss has become very low, while the # test accuracy stabilises. # # Dropout randomly removes neurons from the network (sets the corresponding output to 0). The probability that a neuron # is dropped out is set by the user. It can be shown that the # effect of this is (approximately) equivalent to putting a # prior on the parameters, causing them to shrink, which reduces # overtraining. # # * Does this address overtraining? Why is the prediction accuracy higher than the training accuracy? (Prediction is deterministic; how is this achieved with dropout layers?) # + id="RjzQCOpNFU3O" colab_type="code" colab={} tf.reset_default_graph() tf.keras.backend.clear_session() dataformat="channels_last" shape=[28,28,1] ## a simple convolutional model: ## - 2d convolutional layer with 32 5x5 kernels, and ReLU activation (here implemented as a separate layer) ## - 2d max pooling, input size 2x2, and stride 2 in both dimensions model = tf.keras.models.Sequential([ tf.keras.layers.Reshape(target_shape=shape, input_shape=[28,28]), tf.keras.layers.Conv2D(16, 5, padding='same', data_format=dataformat, activation='relu'), tf.keras.layers.MaxPooling2D((2,2),(2,2), padding='same', data_format=dataformat), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(16, 5, padding='same', data_format=dataformat, activation='relu'), tf.keras.layers.MaxPooling2D((2,2),(2,2), padding='same', data_format=dataformat), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(50,activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10,activation=tf.nn.softmax) ]) logdir = "logs_twoconvolutions_dropout/{}".format(datetime.now().strftime("%Y%m%d-%H%M%S")) tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train,y_train,epochs=20,validation_data=(x_test,y_test),callbacks=[tensorboard]) print("Test set performance:") model.evaluate(x_test,y_test) # + [markdown] id="q65FgZ1HE_yp" colab_type="text" # ## Data augmentation # # The first convolutional layer shares weights across positions of the image. It detects features like edges and corners, and this # weight sharing encodes our intuition that these features can occur # anywhere in the image. # # More broadly, our model should interpret images of digits in the same may, no matter where the digits occur within the image. In other words it should be "invariant" with respect to translations. # A convolutional layer helps to achieve this - shifting the input by one pixel, causes the output of the convolutional layer to also shift by one pixel. (This is not "invariance", but rather "equivariance" - the output is transformed by an "equivalent" transformation.) However, the other layers (e.g. the max pool layer) break the symmetry again -- shifting the image by one pixel to the left, will give an output at the max pool layer that does not correspond in a simple way to the original output. # # Data augmentation helps to make the model more symmetric, by giving it more "equivalent" input data points, and let the model learn the required symmetry. # # Rotational symmetry is another symmetry of the model - digits should be interpreted the same way if they are rotated by a small angle. # + id="sk5CAXJ1GmPw" colab_type="code" colab={} def augment_data(dataset, dataset_labels, num_augmented_images=1): augmented_images = [] augmented_image_labels = [] dataset_with_colourchan = np.reshape(dataset, dataset.shape + (1,)) for num in range (dataset.shape[0]): ## original image augmented_images.append(dataset[num]) augmented_image_labels.append(dataset_labels[num]) for i in range(num_augmented_images): ## shift images by up to 0.05*28 (~1) pixels in any direction augmented_image = tf.contrib.keras.preprocessing.image.random_shift(dataset_with_colourchan[num], 0.05, 0.05, row_axis=0, col_axis=1, channel_axis=2) ## add augmented image, dropping the colour channel again augmented_images.append( augmented_image[:,:,0] ) augmented_image_labels.append(dataset_labels[num]) ## rotate images by up to 20 degrees augmented_image = tf.contrib.keras.preprocessing.image.random_rotation(dataset_with_colourchan[num], 20, row_axis=0, col_axis=1, channel_axis=2) augmented_images.append( augmented_image[:,:,0] ) augmented_image_labels.append(dataset_labels[num]) return np.array(augmented_images), np.array(augmented_image_labels) ## This creates an augmented dataset in memory. This can also be done on the fly ## using ImageDataGenerator, but it turns out that's quite slow in this case. x_aug_train, y_aug_train = augment_data(x_train, y_train, 1) ## Have a look at the first digit and its transformations (translation and rotation) plt.matshow(x_aug_train[0]) plt.matshow(x_aug_train[1]) plt.matshow(x_aug_train[2]) print(y_aug_train[0:3]) # + id="o1OHg1NwLlNH" colab_type="code" colab={} tf.reset_default_graph() tf.keras.backend.clear_session() ## the same model as above model = tf.keras.models.Sequential([ tf.keras.layers.Reshape(target_shape=shape, input_shape=[28,28]), tf.keras.layers.Conv2D(16, 5, padding='same', data_format=dataformat, activation='relu'), tf.keras.layers.MaxPooling2D((2,2),(2,2), padding='same', data_format=dataformat), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(16, 5, padding='same', data_format=dataformat, activation='relu'), tf.keras.layers.MaxPooling2D((2,2),(2,2), padding='same', data_format=dataformat), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(50,activation=tf.nn.relu), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10,activation=tf.nn.softmax) ]) logdir = "logs_data_augmentation/{}".format(datetime.now().strftime("%Y%m%d-%H%M%S")) tensorboard = TensorBoard(log_dir=logdir, histogram_freq=1) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_aug_train,y_aug_train,epochs=20,batch_size=3*32,validation_data=(x_test,y_test),callbacks=[tensorboard]) print("Test set performance:") model.evaluate(x_test,y_test) # + [markdown] id="fayiDUEWJAnC" colab_type="text" # ## Next steps # # The model we have now is quite good (I'm getting ~99.5% test accuracy), but there probably still is room for improvement. It contains quite a few parameters we could tweak; the data augmentation could # be refined; we can change the parameters of the optimizer, or the # optimizer itself; and we can add or remove layers or other features # of the model. # # This exercise is called "hyperparameter search". Strategies here range from a simple grid search, to empirical modeling of the outcome as a function of the hyperparameters and directing the search in that way. # # Hyperparameter search is best done on a server or cluster, so # we won't try this in this tutorial. # # Other topics not covered are dilated networks and residual networks, which we have found helpful in modeling DNA. # However, hopefully the two tutorials in this section have # given you a number of entry points with which to start building # your own models. # # #
Machine_learning_applications/practicals/NeuralNetworkTutorial_GMS2019.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np with open('data/src/sample.csv') as f: print(f.read()) a = np.loadtxt('data/src/sample.csv', delimiter=',') print(type(a)) print(a) print(a[1:, :2]) print(a.mean()) print(a.sum(axis=0))
notebook/csv_numpy.ipynb