code
stringlengths
2.5k
150k
kind
stringclasses
1 value
## Some exploratory data analysis and the development of vegetation dependent angle model authors: Tuguldur Sukhbold ``` datapath = '../d/' import numpy as np import pandas as pd import matplotlib.pyplot as plt from itertools import product as iterp from math import sin, cos, radians, pi from sklearn.metrics import mean_absolute_error from skimage.transform import warp_polar from skimage.io import imread as rdtif np.random.seed(123) train = pd.read_csv(f'{datapath}train-unique.csv') etrain = pd.read_csv(f'{datapath}extra_train.csv') #train = pd.concat([train, etrain], ignore_index=True) psz = train.PlotSize_acres x, y = train.x, train.y d = np.sqrt(x*x + y*y) plt.hist(d, density=True, color='steelblue', alpha=0.5, bins=30) plt.xlabel('distance [km]') plt.ylabel('PDF') adeg = np.rad2deg(np.arctan2(y, x)) arad = np.arctan2(y, x) plt.figure(figsize=(10,8)) plt.scatter(adeg, d, c=psz, s=psz*150, cmap='Set3', alpha=0.75) plt.xlabel('angle [deg]') plt.ylabel('distance [km]') plt.ylim(0,1.79) plt.xlim(-180, 180) plt.colorbar(label='Area [acres]') fig = plt.figure(figsize=(12,10)) ax = fig.add_subplot(projection='polar') c = ax.scatter(arad, d, c=psz, s=psz*150, cmap='Set3', alpha=0.75) def mkFreqHist(ax, df, rng): x, y = df.x, df.y d = np.sqrt(x*x + y*y) w = np.ones_like(d) / len(d) ax.hist(d, weights=w, color='gray', alpha=0.75, bins=10) ax.set_xlabel('distance [km]') ax.set_ylabel('Frequency') ax.set_title(f'{rng[0]} < Size [acres] <= {rng[1]} (tot={len(d)})') rngs = [(0, 0.2), (0.2, 0.5), (0.5, 1), (1, 100)] fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(10,8), tight_layout=True) k = 0 for i,j in iterp(range(2), range(2)): subdf = train.loc[(train.PlotSize_acres > rngs[k][0]) & (train.PlotSize_acres <= rngs[k][1])] mkFreqHist(axes[i,j], subdf, rngs[k]) k += 1 ``` ## 2. Models checked on train set ``` def getSentinelTimeData(path2tif, filterCloudy=True): """ process input Sentinel TIFF file to map its contents into a dictionary with filter keys and monthly images as values note: it won't process images with number of channels other than 192 when filterCloudy flag is True: - excludes cloudy images based on cloud-mask (filter#16) - final dictionary contains 13 real filter data """ # filter and month IDs filters = np.arange(16, dtype=int) + 1 months = np.arange(12) + 1 # read TIFF file into 3D array img = rdtif(path2tif) # stop if its a funky image, proceed only if there are usual 192 channels: if img.shape[-1] != 192: exit(f' cannot process this funky image with {img.shape[-1]} channels') # initialize the dict with empty list for each filter d = {} for f in filters: d[f] = [] # populate with 2D images for i, j in iterp(months, filters): channel = (i - 1) * len(filters) + j - 1 # append normalized image maxFrame = np.amax(img[:, :, channel]) if maxFrame == 0.: d[j].append(img[:, :, channel]) else: d[j].append(img[:, :, channel] / maxFrame) # exclude cloudy images if filterCloudy: for f in filters: for month in months: # max value of cloud mask image maxCloudMask = np.amax(d[16][month-1]) # its cloudy if max is not 0 if maxCloudMask != 0: d[f][month-1] = None # we don't need the last 3 elements del d[16] # cloudmask itself del d[15] # QA20 del d[14] # QA10 return d def getIndex(d, ID): l = [] for m in range(len(d[1])): if d[1][m] is None: indx = None else: if ID == 'NDVI': indx = (d[7][m] - d[4][m]) / (d[7][m] + d[4][m]) if ID == 'TBWI': indx = (d[10][m] - d[12][m]) / d[11][m] l.append(indx) return l def getVegFreq(img): radius = int(len(img) / 2) polar = warp_polar(img, radius=radius).T freq = np.array([sum(polar[:, i]) for i in range(max(polar.shape))]) return freq / np.sum(freq) def getSampleF4(dS): NDVI= getIndex(dS, ID = 'NDVI') if dS[4][6] is not None: sample = dS[4][6] k = 6 elif dS[4][5] is not None: sample = dS[4][5] k=5 else: for i in range(3,12): print(i, type(dS[4][i])) if dS[4][i] is not None: sample = dS[4][i] k=i break return sample, NDVI[k] def getPszRange(szval, qual, realTest=False): if not realTest: rng = (0, 0.2) if qual == 3: if szval > 0.2 and szval <= 0.5: rng = (0.25, 0.5) if szval > 0.5 and szval <= 1.0: rng = (0.5, 1) if szval > 1: rng = (1, 100) else: # assume always bad quality in realTest rng = (0, 0.2) return rng def getPszRange_Test(): return (0, 0.2) def getDistance(rng): sdf = train.loc[(train.PlotSize_acres > rng[0]) & (train.PlotSize_acres <= rng[1])] x, y = sdf.x, sdf.y d = np.sqrt(x*x + y*y) w = np.ones_like(d) / len(d) return np.random.choice(d, p=w) def getXY(d, adeg): """ (x,y) displacements in [km] based on distance (d[km]) and angle (adeg[degrees]) """ arad = pi / 2 - radians(adeg) return d * cos(arad)/2, d * sin(arad)/2 def getScore(x,y, M_x, M_y): """ MAE score based on test (x,y) and predicted (M_x, M_y) """ tst = np.vstack((x, y)) pred = np.vstack((M_x, M_y)) return mean_absolute_error(tst, pred) ``` ### 2.1 Zeros and Means Here all (x,y) predictions are set to all zeros or all mean values; these scores will be used as our benchmark: ``` M_x, M_y = np.zeros(len(x)), np.zeros(len(y)) zeros_score = getScore(x, y, M_x, M_y) M_x = np.zeros(len(x)) + np.mean(x) M_y = np.zeros(len(y)) + np.mean(y) means_score = getScore(x, y, M_x, M_y) print(f' zeros: {zeros_score:.4f} means: {means_score:.4f}') ``` ### 2.2 Uniform Angle and Exact Distances Here the angles are drawn randomly from uniform, and distances from the exact training distribution: ``` M_adeg = np.random.uniform(low=-180, high=180, size=(len(train), )) M_d = np.zeros(len(train)) for i in range(len(train)): rng = getPszRange(train.PlotSize_acres[i], train.Quality[i]) M_d[i] = getDistance(rng) M_x, M_y = np.zeros(len(M_d)), np.zeros(len(M_d)) for i in range(len(M_adeg)): M_x[i], M_y[i] = getXY(M_d[i], M_adeg[i]) fig, ax = plt.subplots(ncols=3, figsize=(15,5), tight_layout=True) ax[0].hist(M_d, bins=30) ax[0].set(ylabel='frequency', xlabel='distance [km]') ax[1].scatter(d, M_d) ax[1].plot(d,d, color='black') ax[1].set(ylabel='model distance [km]', xlabel='test distance [km]') ax[2].scatter(y, M_y) ax[2].plot(y,y, color='black') ax[2].set(ylabel='model y-component [km]', xlabel='test y-component [km]') getScore(x,y, M_x, M_y) ``` ### 2.3 Uniform Angle and Uniform Limited Distances Here the angles are still drawn randomly from uniform, but the distances are now draw from a limited uniform in the range [0, 0.25] km: ``` M_adeg = np.random.uniform(low=-180, high=180, size=(len(train), )) M_d = np.random.uniform(low=0, high=0.25, size=(len(train), )) M_x, M_y = np.zeros(len(M_d)), np.zeros(len(M_d)) for i in range(len(M_adeg)): M_x[i], M_y[i] = getXY(M_d[i], M_adeg[i]) fig, ax = plt.subplots(ncols=3, figsize=(15,5), tight_layout=True) ax[0].hist(M_d, bins=30) ax[0].set(ylabel='frequency', xlabel='distance [km]') ax[1].scatter(d, M_d) ax[1].plot(d,d, color='black') ax[1].set(ylabel='model distance [km]', xlabel='test distance [km]', xlim=[0,1]) ax[2].scatter(y, M_y) ax[2].plot(y,y, color='black') ax[2].set(ylabel='model y-component [km]', xlabel='test y-component [km]') getScore(x,y, M_x, M_y) ``` ### 2.4 Vegetation Dependent Angle and Uniform Limited Distances Now the angles are drawn from a distribution based on the vegetation map of Sentinel-2 images. The distances are still draw from a limited uniform in the range [0, 0.25] km: ``` fieldID = 'a5e136b4' img = rdtif(f'{datapath}sentinel/{fieldID}.tif') dS = getSentinelTimeData(f'{datapath}sentinel/{fieldID}.tif') dS[4][6] is None M_adeg = np.array([]) M_d = np.array([]) M_x = np.array([]) M_y = np.array([]) x,y = np.array([]), np.array([]) for i in range(len(train)): fieldID = train.ID[i].split('_')[-1] img = rdtif(f'{datapath}sentinel/{fieldID}.tif') if img.shape[-1] == 192: dS = getSentinelTimeData(f'{datapath}sentinel/{fieldID}.tif') # normalized mask sampleF4, NDVI = getSampleF4(dS) img = sampleF4 * NDVI - NDVI img = img + abs(np.amin(img)) img = img / np.amax(img) img[img > 0.5] = 0 img[img > 0] = 1 # draw angle based on vegetation probability probV = getVegFreq(img) adeg = np.random.choice(np.arange(360)-180, p=probV) M_adeg = np.append(M_adeg, adeg) # random distance dval = np.random.uniform(low=0, high=0.25) M_d = np.append(M_d, dval) # model x,y xval, yval = getXY(dval, adeg) M_x = np.append(M_x, xval) M_y = np.append(M_y, yval) # corresponding test x,y x = np.append(x, train.x[i]) y = np.append(y, train.y[i]) getScore(x,y, M_x, M_y) fig, ax = plt.subplots(tight_layout=True) ax.scatter(y, M_y) ax.plot(y,y, color='black') ax.set(ylabel='model y-component [km]', xlabel='test y-component [km]') ``` ## Test and Submission ``` test = pd.read_csv(f'{datapath}test.csv') np.random.seed(0) M_x = np.array([]) M_y = np.array([]) for i in range(len(test)): fieldID = test.ID[i].split('_')[-1] img = rdtif(f'{datapath}sentinel/{fieldID}.tif') if img.shape[-1] == 192: dS = getSentinelTimeData(f'{datapath}sentinel/{fieldID}.tif') # normalized mask sampleF4, NDVI = getSampleF4(dS) img = sampleF4 * NDVI - NDVI img = img + abs(np.amin(img)) img = img / np.amax(img) img[img > 0.5] = 0 img[img > 0] = 1 # draw angle based on vegetation probability probV = getVegFreq(img) adeg = np.random.choice(np.arange(360)-180, p=probV) # random distance dval = np.random.uniform(low=0.1, high=0.25) # model x,y xval, yval = getXY(dval, adeg) M_x = np.append(M_x, xval) M_y = np.append(M_y, yval) else: M_x = np.append(M_x, 0.0) M_y = np.append(M_y, 0.0) submit = pd.read_csv(f'{datapath}sample_submission.csv') submit.x = M_x submit.y = M_y submit.head() submit.to_csv('limRange_SentinelDepAngle_submission.csv', index=False) plt.scatter(M_x, M_y) ``` ### Zeros and Means ``` train = pd.read_csv(f'{datapath}train-unique.csv') submit = pd.read_csv(f'{datapath}sample_submission.csv') submit.x = 0 submit.y = 0 submit.to_csv('zeros_submission.csv', index=False) submit.x = np.mean(train.x) submit.y = np.mean(train.y) submit.to_csv('means_submission.csv', index=False) submit.head() ```
github_jupyter
``` import time import numpy as np import random def write_table2sql(table, engine, sql=None): def select_col_agg(mask): """ select col agg pair :return: """ col_num = len(table['header']) sel_idx = np.argmax(np.random.rand(col_num) * mask) sel_type = table['types'][sel_idx] if sel_type == 'text': sel_agg = random.sample([0, 3], 1) else: sel_agg = random.sample([0,1,2,3,4,5], 1) sel_agg = sel_agg[0] return sel_idx, sel_agg def select_cond_op(type): if type == 'text': return 0 else: flag = random.randint(0, 2) return flag datas = [] for j in range(1): data = {} sql = {} agg = None sel = None conds = [] data['table_id'] = table['id'] mask = np.asarray([1] * len(table['header'])) ret = None # make sure at least one condition cnt = 0 while(1): cnt += 1 col_num = len(table['header']) sel_idx = np.argmax(np.random.rand(col_num)) sel_type = table['types'][sel_idx] cond_op = select_cond_op(sel_type) rows = table['rows'] if len(rows) == 0: return [] row_num = len(rows) select_row = random.randint(0, row_num-1) cond_value = rows[select_row][sel_idx] if len(str(cond_value).split()) > 20 or str(cond_value) == '': continue conds.append([sel_idx, cond_op, cond_value]) start = time.time() ret = engine.execute(table['id'], 0, 0, conds, ret_rows=True) if time.time() - start > 1: mask[sel_idx] = -1 break if len(ret) != 0: mask[sel_idx] = -1 break conds.pop() if len(ret) != 0: for i in range(min(3, len(ret[0])-1)): col_num = len(table['header']) sel_idx = np.argmax(np.random.rand(col_num) * mask) sel_type = table['types'][sel_idx] cond_op = select_cond_op(sel_type) rows = ret row_num = len(rows) select_row = random.randint(0, row_num-1) cond_value = list(rows[select_row])[sel_idx] conds.append([sel_idx, cond_op, cond_value]) ret = engine.execute(table['id'], 0, 0, conds, ret_rows=True) # result doesn't change if len(ret) == row_num: conds.pop() break if len(str(cond_value).split()) > 20 or str(cond_value) == '': conds.pop() break mask[sel_idx] = -1 if len(ret) == 0: break sel_idx, sel_agg = select_col_agg(mask) sel = sel_idx agg = sel_agg sql['agg'] = agg sql['sel'] = sel sql['conds'] = conds data['sql'] = sql question = sql2qst(sql, table) data['question'] = question datas.append(data) return datas op_sql_dict = {0: "=", 1: ">", 2: "<", 3: "OP"} agg_sql_dict = {0: "", 1: "MAX", 2: "MIN", 3: "COUNT", 4: "SUM", 5: "AVG"} agg_str_dict = {0: "What is ", 1: "What is the maximum of ", 2: "What is the minimum ", 3: "What is the number of ", 4: "What is the sum of ", 5: "What is the average of "} op_str_dict = {0: "is", 1: "is more than", 2: "is less than", 3: ""} def sql2qst(sql, table): select_index = sql['sel'] aggregation_index = sql['agg'] conditions = sql['conds'] # select part select_part = "" select_str = table['header'][select_index] agg_str = agg_str_dict[aggregation_index] select_part += '{}{}'.format(agg_str, select_str) # where part where_part = [] for col_index, op, val in conditions: cond_col = table['header'][col_index] where_part.append('{} {} {}'.format(cond_col, op_str_dict[op], val)) # print('where part:', where_part) final_question = "{} that {}".format(select_part, ' and '.join(where_part)) # print('final question:', final_question) return final_question import records from sqlalchemy import * import re, time from babel.numbers import parse_decimal, NumberFormatError schema_re = re.compile(r'\((.+)\)') # group (.......) dfdf (.... )group num_re = re.compile(r'[-+]?\d*\.\d+|\d+') # ? zero or one time appear of preceding character, * zero or several time appear of preceding character. # Catch something like -34.34, .4543, # | is 'or' agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG'] cond_ops = ['=', '>', '<', 'OP'] class DBEngine: def __init__(self, fdb): self.db = create_engine('sqlite:///{}'.format(fdb)) self.conn = self.db.connect() self.table_id = '' self.schema_str = '' def execute_query(self, table_id, query, *args, **kwargs): return self.execute(table_id, query.sel_index, query.agg_index, query.conditions, *args, **kwargs) def execute(self, table_id, select_index, aggregation_index, conditions, lower=True, ret_rows=False): if not table_id.startswith('table'): table_id = 'table_{}'.format(table_id.replace('-', '_')) start = time.time() if table_id != self.table_id: self.table_id = table_id table_info = self.conn.execute('SELECT sql from sqlite_master WHERE tbl_name = :name', name=table_id).fetchall()[0].sql.replace('\n','') self.schema_str = schema_re.findall(table_info)[0] schema = {} for tup in self.schema_str.split(', '): c, t = tup.split() schema[c] = t select = 'col{}'.format(select_index) agg = agg_ops[aggregation_index] if agg: select = '{}({})'.format(agg, select) if ret_rows is True: select = '*' where_clause = [] where_map = {} for col_index, op, val in conditions: if lower and (isinstance(val, str) or isinstance(val, str)): val = val.lower() if schema['col{}'.format(col_index)] == 'real' and not isinstance(val, (int, float)): try: # print('!!!!!!value of val is: ', val, 'type is: ', type(val)) # val = float(parse_decimal(val)) # somehow it generates error. val = float(parse_decimal(val, locale='en_US')) # print('!!!!!!After: val', val) except NumberFormatError as e: try: val = float(num_re.findall(val)[0]) # need to understand and debug this part. except: # Although column is of number, selected one is not number. Do nothing in this case. pass where_clause.append('col{} {} :col{}'.format(col_index, cond_ops[op], col_index)) where_map['col{}'.format(col_index)] = val where_str = '' if where_clause: where_str = 'WHERE ' + ' AND '.join(where_clause) query = 'SELECT {} FROM {} {}'.format(select, table_id, where_str) out = self.conn.execute(query, **where_map) if ret_rows is False: return [o[0] for o in out] return [o for o in out] def execute_return_query(self, table_id, select_index, aggregation_index, conditions, lower=True): if not table_id.startswith('table'): table_id = 'table_{}'.format(table_id.replace('-', '_')) table_info = self.db.query('SELECT sql from sqlite_master WHERE tbl_name = :name', name=table_id).all()[0].sql.replace('\n','') schema_str = schema_re.findall(table_info)[0] schema = {} for tup in schema_str.split(', '): c, t = tup.split() schema[c] = t select = 'col{}'.format(select_index) agg = agg_ops[aggregation_index] if agg: select = '{}({})'.format(agg, select) where_clause = [] where_map = {} for col_index, op, val in conditions: if lower and (isinstance(val, str) or isinstance(val, str)): val = val.lower() if schema['col{}'.format(col_index)] == 'real' and not isinstance(val, (int, float)): try: # print('!!!!!!value of val is: ', val, 'type is: ', type(val)) # val = float(parse_decimal(val)) # somehow it generates error. val = float(parse_decimal(val, locale='en_US')) # print('!!!!!!After: val', val) except NumberFormatError as e: val = float(num_re.findall(val)[0]) where_clause.append('col{} {} :col{}'.format(col_index, cond_ops[op], col_index)) where_map['col{}'.format(col_index)] = val where_str = '' if where_clause: where_str = 'WHERE ' + ' AND '.join(where_clause) query = 'SELECT {} AS result FROM {} {}'.format(select, table_id, where_str) #print query out = self.db.query(query, **where_map) # return [o.result for o in out], query return [o[0] for o in out], query def show_table(self, table_id): if not table_id.startswith('table'): table_id = 'table_{}'.format(table_id.replace('-', '_')) rows = self.db.query('select * from ' +table_id) print(rows.dataset) import json class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) import numpy as np with open('train.tables.jsonl', 'r') as fr: tables = [] for table in fr: table = json.loads(table) if '' in table['header']: continue tables.append(table) engine = DBEngine('train.db') with open("train_augment.jsonl","w") as f: for i in range(1000): probs = np.random.rand(len(tables)) table_i = tables[np.argmax(probs)] data = write_table2sql(table_i, engine) if len(data) == 0: print('couldnt find a valid sql!') for js in data: js["phase"] = js["table_id"][0] agg_str = ['', 'max ', 'min ', 'count ', 'sum ', 'avg '] op_str = ['=', '>', '<'] js1 = {} sql_str = '' sql_str += 'select ' sql_str += agg_str[js['sql']['agg']] sql_str += table_i['header'][js['sql']['sel']].lower() + ' ' sql_str += 'where ' for j in range(len(js['sql']['conds'])): sql_str += table_i['header'][js['sql']['conds'][j][0]].lower() + ' ' sql_str += op_str[js['sql']['conds'][j][1]] + ' ' sql_str += str(js['sql']['conds'][j][2]).lower() if len(js['sql']['conds']) > 1 and j != len(js['sql']['conds']) - 1: sql_str += ' and ' src = sql_str.split(' ') trg = js['question'].lower().split(' ') while (trg[-1] == ''): trg = trg[:-1] if trg[-1][-1] == '?': trg[-1] = trg[-1][:-1] trg += ['?'] js['src'] = src js['trg'] = trg f.write(json.dumps(js, cls=NpEncoder) + '\n') print('finished!') ```
github_jupyter
###### Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Daniel Koehn based on Jupyter notebooks by Marc Spiegelman [Dynamical Systems APMA 4101](https://github.com/mspieg/dynamical-systems) and Kyle Mandli from his course [Introduction to numerical methods](https://github.com/mandli/intro-numerical-methods), notebook style sheet by L.A. Barba, N.C. Clementi [Engineering Computations](https://github.com/engineersCode) ``` # Execute this cell to load the notebook's style sheet, then ignore it from IPython.core.display import HTML css_file = '../style/custom.css' HTML(open(css_file, "r").read()) ``` # Exploring the Lorenz Equations The Lorenz Equations are a 3-D dynamical system that is a simplified model of Rayleigh-Benard thermal convection. They are derived and described in detail in Edward Lorenz' 1963 paper [Deterministic Nonperiodic Flow](http://journals.ametsoc.org/doi/pdf/10.1175/1520-0469%281963%29020%3C0130%3ADNF%3E2.0.CO%3B2) in the Journal of Atmospheric Science. In their classical form they can be written \begin{equation} \begin{split} \frac{\partial X}{\partial t} &= \sigma( Y - X)\\ \frac{\partial Y}{\partial t} &= rX - Y - XZ \\ \frac{\partial Z}{\partial t} &= XY -b Z \end{split} \tag{1} \end{equation} where $\sigma$ is the "Prandtl number", $r = \mathrm{Ra}/\mathrm{Ra}_c$ is a scaled "Rayleigh number" and $b$ is a parameter that is related to the the aspect ratio of a convecting cell in the original derivation. Here, $X(t)$, $Y(t)$ and $Z(t)$ are the time dependent amplitudes of the streamfunction and temperature fields, expanded in a highly truncated Fourier Series where the streamfunction contains one cellular mode $$ \psi(x,z,t) = X(t)\sin(a\pi x)\sin(\pi z) $$ and temperature has two modes $$ \theta(x,z,t) = Y(t)\cos(a\pi x)\sin(\pi z) - Z(t)\sin(2\pi z) $$ This Jupyter notebook, will provide some simple python routines for numerical integration and visualization of the Lorenz Equations. ## Numerical solution of the Lorenz Equations We have to solve the uncoupled ordinary differential equations (1) using the finite difference method introduced in [this lecture](https://nbviewer.jupyter.org/github/daniel-koehn/Differential-equations-earth-system/blob/master/02_finite_difference_intro/1_fd_intro.ipynb). The approach is similar to the one used in [Exercise: How to sail without wind](https://nbviewer.jupyter.org/github/daniel-koehn/Differential-equations-earth-system/blob/master/02_finite_difference_intro/3_fd_ODE_example_sailing_wo_wind.ipynb), except that eqs.(1) are coupled ordinary differential equations, we have an additional differential equation and the RHS are more complex. Approximating the temporal derivatives in eqs. (1) using the **backward FD operator** \begin{equation} \frac{df}{dt} = \frac{f(t)-f(t-dt)}{dt} \notag \end{equation} with the time sample interval $dt$ leads to \begin{equation} \begin{split} \frac{X(t)-X(t-dt)}{dt} &= \sigma(Y - X)\\ \frac{Y(t)-Y(t-dt)}{dt} &= rX - Y - XZ\\ \frac{Y(t)-Y(t-dt)}{dt} &= XY -b Z\\ \end{split} \notag \end{equation} After solving for $X(t), Y(t), Z(t)$, we get the **explicit time integration scheme** for the Lorenz equations: \begin{equation} \begin{split} X(t) &= X(t-dt) + dt\; \sigma(Y - X)\\ Y(t) &= Y(t-dt) + dt\; (rX - Y - XZ)\\ Z(t) &= Z(t-dt) + dt\; (XY -b Z)\\ \end{split} \notag \end{equation} and by introducing a temporal dicretization $t^n = n * dt$ with $n \in [0,1,...,nt]$, where $nt$ denotes the maximum time steps, the final FD code becomes: \begin{equation} \begin{split} X^{n} &= X^{n-1} + dt\; \sigma(Y^{n-1} - X^{n-1})\\ Y^{n} &= Y^{n-1} + dt\; (rX^{n-1} - Y^{n-1} - X^{n-1}Z^{n-1})\\ Z^{n} &= Z^{n-1} + dt\; (X^{n-1}Y^{n-1} - b Z^{n-1})\\ \end{split} \tag{2} \end{equation} The Python implementation is quite straightforward, because we can reuse some old codes ... ##### Exercise 1 Finish the function `Lorenz`, which computes and returns the RHS of eqs. (1) for a given $X$, $Y$, $Z$. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def Lorenz(X,Y,Z,sigma,r,b): ''' Returns the RHS of the Lorenz equations ''' # ADD RHS OF LORENZ EQUATIONS (1) HERE! X_dot_rhs = Y_dot_rhs = Z_dot_rhs = # return the state derivatives return X_dot_rhs, Y_dot_rhs, Z_dot_rhs ``` Next, we write the function to solve the Lorenz equation `SolveLorenz` based on the `sailing_boring` code from the [Exercise: How to sail without wind](https://nbviewer.jupyter.org/github/daniel-koehn/Differential-equations-earth-system/blob/master/02_finite_difference_intro/3_fd_ODE_example_sailing_wo_wind.ipynb) ##### Exercise 2 Finish the FD-code implementation `SolveLorenz` ``` def SolveLorenz(tmax, dt, X0, Y0, Z0, sigma=10.,r=28.,b=8./3.0): ''' Integrate the Lorenz equations from initial condition (X0,Y0,Z0)^T at t=0 for parameters sigma, r, b Returns: X, Y, Z, time ''' # Compute number of time steps based on tmax and dt nt = (int)(tmax/dt) # vectors for storage of X, Y, Z positions and time t X = np.zeros(nt + 1) Y = np.zeros(nt + 1) Z = np.zeros(nt + 1) t = np.zeros(nt + 1) # define initial position and time X[0] = X0 Y[0] = Y0 Z[0] = Z0 # start time stepping over time samples n for n in range(1,nt + 1): # compute RHS of Lorenz eqs. (1) at current position (X,Y,Z)^T X_dot_rhs, Y_dot_rhs, Z_dot_rhs = Lorenz(X[n-1],Y[n-1],Z[n-1],sigma,r,b) # compute new position using FD approximation of time derivative # ADD FD SCHEME OF THE LORENZ EQS. HERE! X[n] = Y[n] = Z[n] = t[n] = n * dt return X, Y, Z, t ``` Finally, we create a function to plot the solution (X,Y,Z)^T of the Lorenz eqs. ... ``` def PlotLorenzXvT(X,Y,Z,t,sigma,r,b): ''' Create time series plots of solutions of the Lorenz equations X(t),Y(t),Z(t) ''' plt.figure() ax = plt.subplot(111) ax.plot(t,X,'r',label='X') ax.plot(t,Y,'g',label='Y') ax.plot(t,Z,'b',label='Z') ax.set_xlabel('time t') plt.title('Lorenz Equations: $\sigma=${}, $r=${}, $b=${}'.format(sigma,r,b)) # Shrink current axis's height by 10% on the bottom box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) # Put a legend below current axis ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),ncol=3) plt.show() ``` ... and a function to plot the trajectory in the **phase space portrait**: ``` def PlotLorenz3D(X,Y,Z,sigma,r,b): ''' Show 3-D Phase portrait using mplot3D ''' # do some fancy 3D plotting fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(X,Y,Z) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.title('Lorenz Equations: $\sigma=${}, $r=${}, $b=${}'.format(sigma,r,b)) plt.show() ``` ##### Exercise 3 Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=0.5$, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 0.01 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ##### Exercise 4 Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=10$, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 0.01 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ##### Exercise 5 Solve the Lorenz equations again for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=10$. However, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(-2,-3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. How does the solution change compared to exercise 4? ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 0.01 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ##### Exercise 6 Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=28$, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. Compare with the previous results. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 5e-4 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ##### Exercise 7 In his 1963 paper Lorenz also investigated the influence of small changes of the initial conditions on the long-term evolution of the thermal convection problem for large Rayleigh numbers. Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=28$, however starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3.001,4)^T$. Plot the temporal evolution and compare with the solution of exercise 6. Describe and interpret the results. Explain why Lorenz introduced the term **Butterfly effect** based on your results. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 5e-4 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X1, Y1, Z1, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize differences as a time series PlotLorenzXvT(X-X1,Y-Y1,Z-Z1,t,sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X1,Y1,Z1,t,sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) ``` ##### Exercise 8 Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=350$, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. Compare with the previous result from exercise 8. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 8. dt = 5e-4 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ## What we learned: - How to solve the Lorenz equations using a simple finite-difference scheme. - How to visualize the solution of ordinary differential equations using the temporal evolution and phase portrait. - Exporing the dynamic of non-linear differential equations and the sensitivity of small changes of the initial conditions to the long term evolution of the system. - Why physicists can only predict the time evolution of complex dynamical systems to some extent.
github_jupyter
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i> <i>Licensed under the MIT License.</i> # Vowpal Wabbit Deep Dive <center> <img src="https://github.com/VowpalWabbit/vowpal_wabbit/blob/master/logo_assets/vowpal-wabbits-github-logo.png?raw=true" height="30%" width="30%" alt="Vowpal Wabbit"> </center> [Vowpal Wabbit](https://github.com/VowpalWabbit/vowpal_wabbit) is a fast online machine learning library that implements several algorithms relevant to the recommendation use case. The main advantage of Vowpal Wabbit (VW) is that training is done in an online fashion typically using Stochastic Gradient Descent or similar variants, which allows it to scale well to very large datasets. Additionally, it is optimized to run very quickly and can support distributed training scenarios for extremely large datasets. VW is best applied to problems where the dataset is too large to fit into memory but can be stored on disk in a single node. Though distributed training is possible with additional setup and configuration of the nodes. The kinds of problems that VW handles well mostly fall into the supervised classification domain of machine learning (Linear Regression, Logistic Regression, Multiclass Classification, Support Vector Machines, Simple Neural Nets). It also supports Matrix Factorization approaches and Latent Dirichlet Allocation, as well as a few other algorithms (see the [wiki](https://github.com/VowpalWabbit/vowpal_wabbit/wiki) for more information). A good example of a typical deployment use case is a Real Time Bidding scenario, where an auction to place an ad for a user is being decided in a matter of milliseconds. Feature information about the user and items must be extracted and passed into a model to predict likelihood of click (or other interaction) in short order. And if the user and context features are constantly changing (e.g. user browser and local time of day) it may be infeasible to score every possible input combination before hand. This is where VW provides value, as a platform to explore various algorithms offline to train a highly accurate model on a large set of historical data then deploy the model into production so it can generate rapid predictions in real time. Of course this isn't the only manner VW can be deployed, it is also possible to use it entirely online where the model is constantly updating, or use active learning approaches, or work completely offline in a pre-scoring mode. <h3>Vowpal Wabbit for Recommendations</h3> In this notebook we demonstrate how to use the VW library to generate recommendations on the [Movielens](https://grouplens.org/datasets/movielens/) dataset. Several things are worth noting in how VW is being used in this notebook: By leveraging an Azure Data Science Virtual Machine ([DSVM](https://azure.microsoft.com/en-us/services/virtual-machines/data-science-virtual-machines/)), VW comes pre-installed and can be used directly from the command line. If you are not using a DSVM you must install vw yourself. There are also python bindings to allow VW use within a python environment and even a wrapper conforming to the SciKit-Learn Estimator API. However, the python bindings must be installed as an additional python package with Boost dependencies, so for simplicity's sake execution of VW is done via a subprocess call mimicking what would happen from the command line execution of the model. VW expects a specific [input format](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format), in this notebook to_vw() is a convenience function that converts the standard movielens dataset into the required data format. Datafiles are then written to disk and passed to VW for training. The examples shown are to demonstrate functional capabilities of VW not to indicate performance advantages of different approaches. There are several hyper-parameters (e.g. learning rate and regularization terms) that can greatly impact performance of VW models which can be adjusted using [command line options](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments). To properly compare approaches it is helpful to learn about and tune these parameters on the relevant dataset. # 0. Global Setup ``` import sys sys.path.append('../..') import os from subprocess import run from tempfile import TemporaryDirectory from time import process_time import pandas as pd import papermill as pm from reco_utils.common.notebook_utils import is_jupyter from reco_utils.dataset.movielens import load_pandas_df from reco_utils.dataset.python_splitters import python_random_split from reco_utils.evaluation.python_evaluation import (rmse, mae, exp_var, rsquared, get_top_k_items, map_at_k, ndcg_at_k, precision_at_k, recall_at_k) print("System version: {}".format(sys.version)) print("Pandas version: {}".format(pd.__version__)) def to_vw(df, output, logistic=False): """Convert Pandas DataFrame to vw input format Args: df (pd.DataFrame): input DataFrame output (str): path to output file logistic (bool): flag to convert label to logistic value """ with open(output, 'w') as f: tmp = df.reset_index() # we need to reset the rating type to an integer to simplify the vw formatting tmp['rating'] = tmp['rating'].astype('int64') # convert rating to binary value if logistic: tmp['rating'] = tmp['rating'].apply(lambda x: 1 if x >= 3 else -1) # convert each row to VW input format (https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format) # [label] [tag]|[user namespace] [user id feature] |[item namespace] [movie id feature] # label is the true rating, tag is a unique id for the example just used to link predictions to truth # user and item namespaces separate the features to support interaction features through command line options for _, row in tmp.iterrows(): f.write('{rating} {index}|user {userID} |item {itemID}\n'.format_map(row)) def run_vw(train_params, test_params, test_data, prediction_path, logistic=False): """Convenience function to train, test, and show metrics of interest Args: train_params (str): vw training parameters test_params (str): vw testing parameters test_data (pd.dataFrame): test data prediction_path (str): path to vw prediction output logistic (bool): flag to convert label to logistic value Returns: (dict): metrics and timing information """ # train model train_start = process_time() run(train_params.split(' '), check=True) train_stop = process_time() # test model test_start = process_time() run(test_params.split(' '), check=True) test_stop = process_time() # read in predictions pred_df = pd.read_csv(prediction_path, delim_whitespace=True, names=['prediction'], index_col=1).join(test_data) test_df = test_data.copy() if logistic: # make the true label binary so that the metrics are captured correctly test_df['rating'] = test['rating'].apply(lambda x: 1 if x >= 3 else -1) else: # ensure results are integers in correct range pred_df['prediction'] = pred_df['prediction'].apply(lambda x: int(max(1, min(5, round(x))))) # calculate metrics result = dict() result['RMSE'] = rmse(test_df, pred_df) result['MAE'] = mae(test_df, pred_df) result['R2'] = rsquared(test_df, pred_df) result['Explained Variance'] = exp_var(test_df, pred_df) result['Train Time (ms)'] = (train_stop - train_start) * 1000 result['Test Time (ms)'] = (test_stop - test_start) * 1000 return result # create temp directory to maintain data files tmpdir = TemporaryDirectory() model_path = os.path.join(tmpdir.name, 'vw.model') saved_model_path = os.path.join(tmpdir.name, 'vw_saved.model') train_path = os.path.join(tmpdir.name, 'train.dat') test_path = os.path.join(tmpdir.name, 'test.dat') train_logistic_path = os.path.join(tmpdir.name, 'train_logistic.dat') test_logistic_path = os.path.join(tmpdir.name, 'test_logistic.dat') prediction_path = os.path.join(tmpdir.name, 'prediction.dat') all_test_path = os.path.join(tmpdir.name, 'new_test.dat') all_prediction_path = os.path.join(tmpdir.name, 'new_prediction.dat') ``` # 1. Load & Transform Data ``` # Select Movielens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' TOP_K = 10 # load movielens data (use the 1M dataset) df = load_pandas_df(MOVIELENS_DATA_SIZE) # split data to train and test sets, default values take 75% of each users ratings as train, and 25% as test train, test = python_random_split(df, 0.75) # save train and test data in vw format to_vw(df=train, output=train_path) to_vw(df=test, output=test_path) # save data for logistic regression (requires adjusting the label) to_vw(df=train, output=train_logistic_path, logistic=True) to_vw(df=test, output=test_logistic_path, logistic=True) ``` # 2. Regression Based Recommendations When considering different approaches for solving a problem with machine learning it is helpful to generate a baseline approach to understand how more complex solutions perform across dimensions of performance, time, and resource (memory or cpu) usage. Regression based approaches are some of the simplest and fastest baselines to consider for many ML problems. ## 2.1 Linear Regression As the data provides a numerical rating between 1-5, fitting those values with a linear regression model is easy approach. This model is trained on examples of ratings as the target variable and corresponding user ids and movie ids as independent features. By passing each user-item rating in as an example the model will begin to learn weights based on average ratings for each user as well as average ratings per item. This however can generate predicted ratings which are no longer integers, so some additional adjustments should be made at prediction time to convert them back to the integer scale of 1 through 5 if necessary. Here, this is done in the evaluate function. ``` """ Quick description of command line parameters used Other optional parameters can be found here: https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments VW uses linear regression by default, so no extra command line options -f <model_path>: indicates where the final model file will reside after training -d <data_path>: indicates which data file to use for training or testing --quiet: this runs vw in quiet mode silencing stdout (for debugging it's helpful to not use quiet mode) -i <model_path>: indicates where to load the previously model file created during training -t: this executes inference only (no learned updates to the model) -p <prediction_path>: indicates where to store prediction output """ train_params = 'vw -f {model} -d {data} --quiet'.format(model=model_path, data=train_path) # save these results for later use during top-k analysis test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) comparison = pd.DataFrame(result, index=['Linear Regression']) comparison ``` ## 2.2 Linear Regression with Interaction Features Previously we treated the user features and item features independently, but taking into account interactions between features can provide a mechanism to learn more fine grained preferences of the users. To generate interaction features use the quadratic command line argument and specify the namespaces that should be combined: '-q ui' combines the user and item namespaces based on the first letter of each. Currently the userIDs and itemIDs used are integers which means the feature ID is used directly, for instance when user ID 123 rates movie 456, the training example puts a 1 in the values for features 123 and 456. However when interaction is specified (or if a feature is a string) the resulting interaction feature is hashed into the available feature space. Feature hashing is a way to take a very sparse high dimensional feature space and reduce it into a lower dimensional space. This allows for reduced memory while retaining fast computation of feature and model weights. The caveat with feature hashing, is that it can lead to hash collisions, where separate features are mapped to the same location. In this case it can be beneficial to increase the size of the space to support interactions between features of high cardinality. The available feature space is dictated by the --bit_precision (-b) <N> argument. Where the total available space for all features in the model is 2<sup>N</sup>. See [Feature Hashing and Extraction](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Feature-Hashing-and-Extraction) for more details. ``` """ Quick description of command line parameters used -b <N>: sets the memory size to 2<sup>N</sup> entries -q <ab>: create quadratic feature interactions between features in namespaces starting with 'a' and 'b' """ train_params = 'vw -b 26 -q ui -f {model} -d {data} --quiet'.format(model=saved_model_path, data=train_path) test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=saved_model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) saved_result = result comparison = comparison.append(pd.DataFrame(result, index=['Linear Regression w/ Interaction'])) comparison ``` ## 2.3 Multinomial Logistic Regression An alternative to linear regression is to leverage multinomial logistic regression, or multiclass classification, which treats each rating value as a distinct class. This avoids any non integer results, but also reduces the training data for each class which could lead to poorer performance if the counts of different rating levels are skewed. Basic multiclass logistic regression can be accomplished using the One Against All approach specified by the '--oaa N' option, where N is the number of classes and proving the logistic option for the loss function to be used. ``` """ Quick description of command line parameters used --loss_function logistic: sets the model loss function for logistic regression --oaa <N>: trains N separate models using One-Against-All approach (all models are captured in the single model file) This expects the labels to be contiguous integers starting at 1 --link logistic: converts the predicted output from logit to probability The predicted output is the model (label) with the largest likelihood """ train_params = 'vw --loss_function logistic --oaa 5 -f {model} -d {data} --quiet'.format(model=model_path, data=train_path) test_params = 'vw --link logistic -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) comparison = comparison.append(pd.DataFrame(result, index=['Multinomial Regression'])) comparison ``` ## 2.4 Logistic Regression Additionally, one might simply be interested in whether the user likes or dislikes an item and we can adjust the input data to represent a binary outcome, where ratings in (1,3] are dislikes (negative results) and (3,5] are likes (positive results). This framing allows for a simple logistic regression model to be applied. To perform logistic regression the loss_function parameter is changed to 'logistic' and the target label is switched to [0, 1]. Also, be sure to set '--link logistic' during prediction to convert the logit output back to a probability value. ``` train_params = 'vw --loss_function logistic -f {model} -d {data} --quiet'.format(model=model_path, data=train_logistic_path) test_params = 'vw --link logistic -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_logistic_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path, logistic=True) comparison = comparison.append(pd.DataFrame(result, index=['Logistic Regression'])) comparison ``` # 3. Matrix Factorization Based Recommendations All of the above approaches train a regression model, but VW also supports matrix factorization with two different approaches. As opposed to learning direct weights for specific users, items and interactions when training a regression model, matrix factorization attempts to learn latent factors that determine how a user rates an item. An example of how this might work is if you could represent user preference and item categorization by genre. Given a smaller set of genres we can associate how much each item belongs to each genre class, and we can set weights for a user's preference for each genre. Both sets of weights could be represented as a vectors where the inner product would be the user-item rating. Matrix factorization approaches learn low rank matrices for latent features of users and items such that those matrices can be combined to approximate the original user item matrix. ## 3.1. Singular Value Decomposition Based Matrix Factorization The first approach performs matrix factorization based on Singular Value Decomposition (SVD) to learn a low rank approximation for the user-item rating matix. It is is called using the '--rank' command line argument. See the [Matrix Factorization Example](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example) for more detail. ``` """ Quick description of command line parameters used --rank <N>: sets the number of latent factors in the reduced matrix """ train_params = 'vw --rank 5 -q ui -f {model} -d {data} --quiet'.format(model=model_path, data=train_path) test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) comparison = comparison.append(pd.DataFrame(result, index=['Matrix Factorization (Rank)'])) comparison ``` ## 3.2. Factorization Machine Based Matrix Factorization An alternative approach based on [Rendel's factorization machines](https://cseweb.ucsd.edu/classes/fa17/cse291-b/reading/Rendle2010FM.pdf) is called using '--lrq' (low rank quadratic). More LRQ details in this [demo](https://github.com/VowpalWabbit/vowpal_wabbit/tree/master/demo/movielens). This learns two lower rank matrices which are multiplied to generate an approximation of the user-item rating matrix. Compressing the matrix in this way leads to learning generalizable factors which avoids some of the limitations of using regression models with extremely sparse interaction features. This can lead to better convergence and smaller on-disk models. An additional term to improve performance is --lrqdropout which will dropout columns during training. This however tends to increase the optimal rank size. Other parameters such as L2 regularization can help avoid overfitting. ``` """ Quick description of command line parameters used --lrq <abN>: learns approximations of rank N for the quadratic interaction between namespaces starting with 'a' and 'b' --lrqdroupout: performs dropout during training to improve generalization """ train_params = 'vw --lrq ui7 -f {model} -d {data} --quiet'.format(model=model_path, data=train_path) test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) comparison = comparison.append(pd.DataFrame(result, index=['Matrix Factorization (LRQ)'])) comparison ``` # 4. Conclusion The table above shows a few of the approaches in the VW library that can be used for recommendation prediction. The relative performance can change when applied to different datasets and properly tuned, but it is useful to note the rapid speed at which all approaches are able to train (75,000 examples) and test (25,000 examples). # 5. Scoring After training a model with any of the above approaches, the model can be used to score potential user-pairs in offline batch mode, or in a real-time scoring mode. The example below shows how to leverage the utilities in the reco_utils directory to generate Top-K recommendations from offline scored output. ``` # First construct a test set of all items (except those seen during training) for each user users = df[['userID']].drop_duplicates() users['key'] = 1 items = df[['itemID']].drop_duplicates() items['key'] = 1 all_pairs = pd.merge(users, items, on='key').drop(columns=['key']) # now combine with training data and filter only those entries that don't match merged = pd.merge(train, all_pairs, on=["userID", "itemID"], how="outer") all_user_items = merged[merged['rating'].isnull()].copy() all_user_items['rating'] = 0 # save in vw format (this can take a while) to_vw(df=all_user_items, output=all_test_path) # run the saved model (linear regression with interactions) on the new dataset test_start = process_time() test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=saved_model_path, data=all_test_path, pred=prediction_path) run(test_params.split(' '), check=True) test_stop = process_time() test_time = test_stop - test_start # load predictions and get top-k from previous saved results pred_data = pd.read_csv(prediction_path, delim_whitespace=True, names=['prediction'], index_col=1).join(test) pred_data['prediction'] = pred_data['prediction'].apply(lambda x: int(max(1, min(5, round(x))))) top_k = get_top_k_items(pred_data, col_rating='prediction', k=TOP_K)[['prediction', 'userID', 'itemID', 'rating']] # convert dtypes of userID and itemID columns. for col in ['userID', 'itemID']: top_k[col] = top_k[col].astype(int) top_k.head() # get ranking metrics args = [test, top_k] kwargs = dict(col_user='userID', col_item='itemID', col_rating='rating', col_prediction='prediction', relevancy_method='top_k', k=TOP_K) rank_metrics = {'MAP': map_at_k(*args, **kwargs), 'NDCG': ndcg_at_k(*args, **kwargs), 'Precision': precision_at_k(*args, **kwargs), 'Recall': recall_at_k(*args, **kwargs)} # final results all_results = ['{k}: {v}'.format(k=k, v=v) for k, v in saved_result.items()] all_results += ['{k}: {v}'.format(k=k, v=v) for k, v in rank_metrics.items()] print('\n'.join(all_results)) ``` # 6. Cleanup ``` # record results for testing if is_jupyter(): pm.record('rmse', saved_result['RMSE']) pm.record('mae', saved_result['MAE']) pm.record('rsquared', saved_result['R2']) pm.record('exp_var', saved_result['Explained Variance']) pm.record("train_time", saved_result['Train Time (ms)']) pm.record("test_time", test_time) pm.record('map', rank_metrics['MAP']) pm.record('ndcg', rank_metrics['NDCG']) pm.record('precision', rank_metrics['Precision']) pm.record('recall', rank_metrics['Recall']) tmpdir.cleanup() ``` ## References 1. John Langford, et. al. Vowpal Wabbit Wiki. URL: https://github.com/VowpalWabbit/vowpal_wabbit/wiki 2. Steffen Rendel. Factorization Machines. 2010 IEEE International Conference on Data Mining. 3. Jake Hoffman. Matrix Factorization Example. URL: https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example 4. Paul Minero. Low Rank Quadratic Example. URL: https://github.com/VowpalWabbit/vowpal_wabbit/tree/master/demo/movielens
github_jupyter
# Neural Network In this tutorial, we'll create a simple neural network classifier in TensorFlow. The key advantage of this model over the [Linear Classifier](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/3_Neural_Network/Tutorials/1_Neural_Network.ipynb) trained in the previous tutorial is that it can separate data which is __NOT__ linearly separable. We will implement this model for classifying images of hand-written digits from the so-called MNIST data-set. We assume that you have the basic knowledge over the concept and you are just interested in the __Tensorflow__ implementation of the Neural Nets. If you want to know more about the Neural Nets we suggest you to take [this](https://www.coursera.org/learn/machine-learning) amazing course on machine learning or check out the following tutorials: [Neural Networks Part 1: Setting up the Architecture](https://cs231n.github.io/neural-networks-1/) [Neural Networks Part 2: Setting up the Data and the Loss](https://cs231n.github.io/neural-networks-2/) [Neural Networks Part 3: Learning and Evaluation](https://cs231n.github.io/neural-networks-3/) The structure of the neural network that we're going to implement is as follows. Like before, we're using images of handw-ritten digits of the MNIST data which has 10 classes (i.e. digits from 0 to 9). The implemented network has 2 hidden layers: the first one with 200 hidden units (neurons) and the second one (also known as classifier layer) with 10 (number of classes) neurons. <img src="files/files/nn.png"> ___Fig. 1-___ Sample Neural Network architecture with two layers implemented for classifying MNIST digits ## 0. Import the required libraries: We will start with importing the required Python libraries. ``` # imports import tensorflow as tf import numpy as np import matplotlib.pyplot as plt ``` ## 1. Load the MNIST data For this tutorial we use the MNIST dataset. MNIST is a dataset of handwritten digits. If you are into machine learning, you might have heard of this dataset by now. MNIST is kind of benchmark of datasets for deep learning and is easily accesible through Tensorflow The dataset contains $55,000$ examples for training, $5,000$ examples for validation and $10,000$ examples for testing. The digits have been size-normalized and centered in a fixed-size image ($28\times28$ pixels) with values from $0$ to $1$. For simplicity, each image has been flattened and converted to a 1-D numpy array of $784$ features ($28\times28$). <img src="files/files/mnist.png"> If you want to know more about the MNIST dataset you can check __Yann Lecun__'s [website](http://yann.lecun.com/exdb/mnist/). ### 1.1. Data dimension Here, we specify the dimensions of the images which will be used in several places in the code below. Defining these variables makes it easier (compared with using hard-coded number all throughout the code) to modify them later. Ideally these would be inferred from the data that has been read, but here we will just write the numbers. It's important to note that in a linear model, we have to flatten the input images into a vector. Here, each of the $28\times28$ images are flattened into a $1\times784$ vector. ``` img_h = img_w = 28 # MNIST images are 28x28 img_size_flat = img_h * img_w # 28x28=784, the total number of pixels n_classes = 10 # Number of classes, one class per digit ``` ### 1.2. Helper functions to load the MNIST data In this section, we'll write the function which automatically loads the MNIST data and returns it in our desired shape and format. If you wanna learn more about loading your data, you may read our __How to Load Your Data in TensorFlow __ tutorial which explains all the available methods to load your own data; no matter how big it is. Here, we'll simply write a function (__`load_data`__) which has two modes: train (which loads the training and validation images and their corresponding labels) and test (which loads the test images and their corresponding labels). You can replace this function to use your own dataset. Other than a function for loading the images and corresponding labels, we define two more functions: 1. __randomize__: which randomizes the order of images and their labels. This is important to make sure that the input images are sorted in a completely random order. Moreover, at the beginning of each __epoch__, we will re-randomize the order of data samples to make sure that the trained model is not sensitive to the order of data. 2. __get_next_batch__: which only selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method) ``` def load_data(mode='train'): """ Function to (download and) load the MNIST data :param mode: train or test :return: images and the corresponding labels """ from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) if mode == 'train': x_train, y_train, x_valid, y_valid = mnist.train.images, mnist.train.labels, \ mnist.validation.images, mnist.validation.labels return x_train, y_train, x_valid, y_valid elif mode == 'test': x_test, y_test = mnist.test.images, mnist.test.labels return x_test, y_test def randomize(x, y): """ Randomizes the order of data samples and their corresponding labels""" permutation = np.random.permutation(y.shape[0]) shuffled_x = x[permutation, :] shuffled_y = y[permutation] return shuffled_x, shuffled_y def get_next_batch(x, y, start, end): x_batch = x[start:end] y_batch = y[start:end] return x_batch, y_batch ``` ### 1.3. Load the data and display the sizes Now we can use the defined helper function in __train__ mode which loads the train and validation images and their corresponding labels. We'll also display their sizes: ``` # Load MNIST data x_train, y_train, x_valid, y_valid = load_data(mode='train') print("Size of:") print("- Training-set:\t\t{}".format(len(y_train))) print("- Validation-set:\t{}".format(len(y_valid))) ``` To get a better sense of the data, let's checkout the shapes of the loaded arrays. ``` print('x_train:\t{}'.format(x_train.shape)) print('y_train:\t{}'.format(y_train.shape)) print('x_train:\t{}'.format(x_valid.shape)) print('y_valid:\t{}'.format(y_valid.shape)) ``` As you can see, __`x_train`__ and __`x_valid`__ arrays contain $55000$ and $5000$ flattened images ( of size $28\times28=784$ values). __`y_train`__ and __`y_valid`__ contain the corresponding labels of the images in the training and validation set respectively. Based on the dimesnion of the arrays, for each image, we have 10 values as its label. Why? This technique is called __One-Hot Encoding__. This means the labels have been converted from a single number to a vector whose length equals the number of possible classes. All elements of the vector are zero except for the $i^{th}$ element which is one and means the class is $i$. For example, the One-Hot encoded labels for the first 5 images in the validation set are: ``` y_valid[:5, :] ``` where the $10$ values in each row represents the label assigned to that partiular image. ## 2. Hyperparameters Here, we have about $55,000$ images in our training set. It takes a long time to calculate the gradient of the model using all these images. We therefore use __Stochastic Gradient Descent__ which only uses a small batch of images in each iteration of the optimizer. Let's define some of the terms usually used in this context: - __epoch__: one forward pass and one backward pass of __all__ the training examples. - __batch size__: the number of training examples in one forward/backward pass. The higher the batch size, the more memory space you'll need. - __iteration__: one forward pass and one backward pass of __one batch of images__ the training examples. ``` # Hyper-parameters epochs = 10 # Total number of training epochs batch_size = 100 # Training batch size display_freq = 100 # Frequency of displaying the training results learning_rate = 0.001 # The optimization initial learning rate h1 = 200 # number of nodes in the 1st hidden layer ``` Given the above definitions, each epoch consists of $55,000/100=550$ iterations. ## 3. Helper functions for creating the network ### 3.1. Helper functions for creating new variables As explained (and also illustrated in Fig. 1), we need to define two variables $\mathbf{W}$ and $\mathbf{b}$ to construt our linear model. These are generally called model parameters and as explained in our [Tensor Types](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/2_Tensor_Types.ipynb) tutorial, we use __Tensorflow Variables__ of proper size and initialization to define them.The following functions are written to be later used for generating the weight and bias variables of the desired shape: ``` # weight and bais wrappers def weight_variable(name, shape): """ Create a weight variable with appropriate initialization :param name: weight name :param shape: weight shape :return: initialized weight variable """ initer = tf.truncated_normal_initializer(stddev=0.01) return tf.get_variable('W_' + name, dtype=tf.float32, shape=shape, initializer=initer) def bias_variable(name, shape): """ Create a bias variable with appropriate initialization :param name: bias variable name :param shape: bias variable shape :return: initialized bias variable """ initial = tf.constant(0., shape=shape, dtype=tf.float32) return tf.get_variable('b_' + name, dtype=tf.float32, initializer=initial) ``` ### 3.2. Helper-function for creating a fully-connected layer Neural network consists of stacks of fully-connected (dense) layers. Having the weight ($\mathbf{W}$) and bias ($\mathbf{b}$) variables, a fully-connected layer is defined as $activation(\mathbf{W}\times \mathbf{x} + \mathbf{b})$. We define __`fc_layer`__ function as follows: ``` def fc_layer(x, num_units, name, use_relu=True): """ Create a fully-connected layer :param x: input from previous layer :param num_units: number of hidden units in the fully-connected layer :param name: layer name :param use_relu: boolean to add ReLU non-linearity (or not) :return: The output array """ in_dim = x.get_shape()[1] W = weight_variable(name, shape=[in_dim, num_units]) b = bias_variable(name, [num_units]) layer = tf.matmul(x, W) layer += b if use_relu: layer = tf.nn.relu(layer) return layer ``` ## 4. Create the network graph Now that we have defined all the helped functions to create our model, we can create our network. ### 4.1. Placeholders for the inputs (x) and corresponding labels (y) First we need to define the proper tensors to feed in the input values to our model. As explained in the [Tensor Types](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/2_Tensor_Types.ipynb) tutorial, placeholder variable is the suitable choice for the input images and corresponding labels. This allows us to change the inputs (images and labels) to the TensorFlow graph. ``` # Create the graph for the linear model # Placeholders for inputs (x) and outputs(y) x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='X') y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y') ``` Placeholder __`x`__ is defined for the images; its data-type is set to __`float32`__ and the shape is set to __[None, img_size_flat]__, where __`None`__ means that the tensor may hold an arbitrary number of images with each image being a vector of length __`img_size_flat`__. Next we have __`y`__ which is the placeholder variable for the true labels associated with the images that were input in the placeholder variable __`x`__. The shape of this placeholder variable is __[None, num_classes]__ which means it may hold an arbitrary number of labels and each label is a vector of length __`num_classes`__ which is $10$ in this case. ### 4.2. Create the network layers After creating the proper input, we have to pass it to our model. Since we have a neural network, we can stack multiple fully-connected layers using __`fc_layer`__ method. Note that we will not use any activation function (`use_relu=False`) in the last layer. The reason is that we can use `tf.nn.softmax_cross_entropy_with_logits` to calculate the `loss`. ``` # Create a fully-connected layer with h1 nodes as hidden layer fc1 = fc_layer(x, h1, 'FC1', use_relu=True) # Create a fully-connected layer with n_classes nodes as output layer output_logits = fc_layer(fc1, n_classes, 'OUT', use_relu=False) ``` ### 4.3. Define the loss function, optimizer, accuracy, and predicted class After creating the network, we have to calculate the loss and optimize it. Also, to evaluate our model, we have to calculate the `correct_prediction` and `accuracy`. We will also define `cls_prediction` to visualize our results. ``` # Define the loss function, optimizer, and accuracy loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output_logits), name='loss') optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name='Adam-op').minimize(loss) correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name='correct_pred') accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy') # Network predictions cls_prediction = tf.argmax(output_logits, axis=1, name='predictions') ``` ### 4.4. Initialize all variables As explained in the [Tensor Types](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/2_Tensor_Types.ipynb) tutorial, we have to invoke a variable initializer operation to initialize all variables. ``` # Create the op for initializing all variables init = tf.global_variables_initializer() ``` ## 5. Train After creating the graph, it is time to train our model. To train the model, As explained in the [Graph_and_Session](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/1_Graph_and_Session.ipynb) tutorial, we have to create a session and run the graph in our session. ``` # Create an interactive session (to keep the session in the other cells) sess = tf.InteractiveSession() # Initialize all variables sess.run(init) # Number of training iterations in each epoch num_tr_iter = int(len(y_train) / batch_size) for epoch in range(epochs): print('Training epoch: {}'.format(epoch + 1)) # Randomly shuffle the training data at the beginning of each epoch x_train, y_train = randomize(x_train, y_train) for iteration in range(num_tr_iter): start = iteration * batch_size end = (iteration + 1) * batch_size x_batch, y_batch = get_next_batch(x_train, y_train, start, end) # Run optimization op (backprop) feed_dict_batch = {x: x_batch, y: y_batch} sess.run(optimizer, feed_dict=feed_dict_batch) if iteration % display_freq == 0: # Calculate and display the batch loss and accuracy loss_batch, acc_batch = sess.run([loss, accuracy], feed_dict=feed_dict_batch) print("iter {0:3d}:\t Loss={1:.2f},\tTraining Accuracy={2:.01%}". format(iteration, loss_batch, acc_batch)) # Run validation after every epoch feed_dict_valid = {x: x_valid[:1000], y: y_valid[:1000]} loss_valid, acc_valid = sess.run([loss, accuracy], feed_dict=feed_dict_valid) print('---------------------------------------------------------') print("Epoch: {0}, validation loss: {1:.2f}, validation accuracy: {2:.01%}". format(epoch + 1, loss_valid, acc_valid)) print('---------------------------------------------------------') ``` ## 6. Test After the training is done, we have to test our model to see how good it performs on a new dataset. There are multiple approaches to for this purpose. We will use two different methods. ## 6.1. Accuracy One way that we can evaluate our model is reporting the accuracy on the test set. ``` # Test the network after training # Accuracy x_test, y_test = load_data(mode='test') feed_dict_test = {x: x_test[:1000], y: y_test[:1000]} loss_test, acc_test = sess.run([loss, accuracy], feed_dict=feed_dict_test) print('---------------------------------------------------------') print("Test loss: {0:.2f}, test accuracy: {1:.01%}".format(loss_test, acc_test)) print('---------------------------------------------------------') ``` ## 6.2. plot some results Another way to evaluate the model is to visualize the input and the model results and compare them with the true label of the input. This is advantages in numerous ways. For example, even if you get a decent accuracy, when you plot the results, you might see all the samples have been classified in one class. Another example is when you plot, you can have a rough idea on which examples your model failed. Let's define the helper functions to plot some correct and missclassified examples. ### 6.2.1 Helper functions for plotting the results ``` def plot_images(images, cls_true, cls_pred=None, title=None): """ Create figure with 3x3 sub-plots. :param images: array of images to be plotted, (9, img_h*img_w) :param cls_true: corresponding true labels (9,) :param cls_pred: corresponding true labels (9,) """ fig, axes = plt.subplots(3, 3, figsize=(9, 9)) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(28, 28), cmap='binary') # Show true and predicted classes. if cls_pred is None: ax_title = "True: {0}".format(cls_true[i]) else: ax_title = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) ax.set_title(ax_title) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) if title: plt.suptitle(title, size=20) plt.show(block=False) def plot_example_errors(images, cls_true, cls_pred, title=None): """ Function for plotting examples of images that have been mis-classified :param images: array of all images, (#imgs, img_h*img_w) :param cls_true: corresponding true labels, (#imgs,) :param cls_pred: corresponding predicted labels, (#imgs,) """ # Negate the boolean array. incorrect = np.logical_not(np.equal(cls_pred, cls_true)) # Get the images from the test-set that have been # incorrectly classified. incorrect_images = images[incorrect] # Get the true and predicted classes for those images. cls_pred = cls_pred[incorrect] cls_true = cls_true[incorrect] # Plot the first 9 images. plot_images(images=incorrect_images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9], title=title) ``` ### 6.2.2 Visualize correct and missclassified examples ``` # Plot some of the correct and misclassified examples cls_pred = sess.run(cls_prediction, feed_dict=feed_dict_test) cls_true = np.argmax(y_test[:1000], axis=1) plot_images(x_test, cls_true, cls_pred, title='Correct Examples') plot_example_errors(x_test[:1000], cls_true, cls_pred, title='Misclassified Examples') plt.show() ``` After we finished, we have to close the __`session`__ to free the memory. We could have also used: ```python with tf.Session as sess: ... ``` Please check our [Graph_and_Session](https://github.com/easy-tensorflow/easy-tensorflow/blob/master/1_TensorFlow_Basics/Tutorials/1_Graph_and_Session.ipynb) tutorial if you do not know the differences between these two implementations. ``` sess.close() ``` Thanks for reading! If you have any question or doubt, feel free to leave a comment in our [website](http://easy-tensorflow.com/).
github_jupyter
``` import numpy as np from scipy.optimize import least_squares from scipy.optimize import basinhopping from pandas import Series, DataFrame import pandas as pd import matplotlib import matplotlib.pyplot as plt matplotlib.use('Qt5Agg') %matplotlib qt5 # # if pade.py is not in the current directory, set this path: # #import sys from rac_aux import * Angs2Bohr=1.8897259886 au2eV=27.211386027 au2cm=219474.63068 # # files in the current directory do not need the path name # #df = pd.read_csv("/home/thomas/Python/StabPlots/Stab_data/1D_a0.2_b0_c0.14/crossing_1.dat", delim_whitespace=True) df = pd.read_csv("sb_rac.csv") #df = pd.read_csv("crossing_1.dat", delim_whitespace=True) plt.cla() plt.plot(df.l.values, df.E1.values, 'o-') plt.plot(df.l.values, df.E2.values, 'o-') plt.plot(df.l.values, df.E3.values, 'o-') plt.show() df[:5] i_neg = np.argmin(abs(df.E1.values)) if df.E1[i_neg] > 0: i_neg += 1 ls = df.l.values[i_neg:] print('N=',len(ls)) Es = df.E1.values[i_neg:] plt.cla() plt.plot(df.l.values, df.E1.values, 'b-') plt.plot(df.l.values, df.E2.values, 'b-') plt.plot(df.l.values, df.E3.values, 'b-') plt.plot(ls, Es, 'o', color="orange") plt.plot([df.l[0],df.l.values[-1]],[0,0],'-', color='black') plt.show() # # kappas, kappa**2, and sigmas (weights = sigma**2) # least_squares() passes parg to each pade_nm function # k2s = -Es ks = np.sqrt(k2s) sigmas = weights(len(Es), 'ones') #sigmas = weights(len(Es), 'energy', E0=Es[11], Es=Es) parg=(ks,k2s,ls,sigmas) # # So far, nm can be in [21, 31, 32, 42, 53] # nm=53 pade_fns = {"21":(pade_21_lsq, pade_21j_lsq), "31":(pade_31_lsq, pade_31j_lsq), "32":(pade_32_lsq, pade_32j_lsq), "42":(pade_42_lsq, pade_42j_lsq), "53":(pade_53_lsq, pade_53j_lsq)} fun=pade_fns[str(nm)][0] jac=pade_fns[str(nm)][1] # start params depend on nm # basin_hopping should be less sensitive to good p0s p31_opt = [2.4022, 0.2713, 1.2813, 0.4543] p42_opt = [2.3919, 0.2964, 1.3187, 1.3736, 0.29655, 0.5078] E0 = linear_extra(ls,Es) G0 = 0.2*E0 if nm == 21: p0s=[ls[0]] + guess(E0, G0) elif nm == 31: p0s=[ls[0]] + guess(E0, G0) + [10] elif nm == 32: p0s=[ls[0]] + guess(E0, G0) + [10, 1] #p0s=p31_opt + [0.2] elif nm == 42: p0s=[ls[0]] + guess(E0, G0) + guess(5*E0,10*G0) + [10] elif nm == 53: p0s = p42_opt[0:5] + p31_opt[3:] + p42_opt[5:] + [1] else: print("Warning", nm, "not implemented") print(p0s) print(chi2_gen(p0s, ks, k2s, ls, sigmas, fun)) # # Because basin_hopping calls a minimize()-like function, # calling instead least_squares() requires jumping through some hoops # # We minimize chi2 = 1/M sum_i (rac(k_i) - lambda_i)**2 # # basin_hopping needs bh_chi2() as parameter and will call this function directly # (not just the local minimizer) # # # To call least_squares() a wrapper-function knowing how to call f_lsq(k)=rac(k)-lambda and # a function returning the gradient matrix of f_lsq(k_i) with respect to the parameter p_j # arg_nm = (ks, k2s, ls, sigmas, fun, jac) def bh_chi2(params, args=()): """ at the moment 'args':(ks, k2s, ls, f_lsq, j_lsq) """ (ks, k2s, ls, sigmas, f_lsq, j_lsq) = args diffs = f_lsq(params, ks, k2s, ls, sigmas) return np.sum(np.square(diffs)) def lsq_wrapper(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None): (ks, k2s, ls, sigmas, f_lsq, j_lsq) = args res = least_squares(f_lsq, x0, method='trf', jac=j_lsq, args=(ks, k2s, ls, sigmas)) res.fun = res.cost*2 #print(res.fun, res.x) #print('wrapper:', res.fun) #delattr(res, 'njev') return res def bh_call_back(x, f, accepted): global jbh, chi2s, alphas, betas #nonlocal jbh, chi2s, alphas, betas chi2s[jbh] = f alphas[jbh], betas[jbh] = x[1], x[2] jbh += 1 # check lsq_wrapper and bh_chi res = lsq_wrapper(bh_chi2, p0s, args=arg_nm) print(res.fun) print(bh_chi2(res.x, args=arg_nm)) # # for least_squares: # min_kwargs = {'method':lsq_wrapper, 'args':arg_nm, 'jac':True} jbh=0 n_bh = 1000 chi2s = np.zeros(n_bh) alphas = np.zeros(n_bh) betas = np.zeros(n_bh) res = basinhopping(bh_chi2, p0s, minimizer_kwargs=min_kwargs, niter=n_bh, T=1e-2, seed=1, callback=bh_call_back) print(res.fun) print(res.x) print(res_ene(res.x[1],res.x[2])) Ers, Gms = res_ene(alphas, betas) logs = np.log10(chi2s) srt=np.sort(logs) print(srt[0:3], srt[-3:]) dic = {'logs':logs, 'Er':Ers, 'G':Gms} rf = DataFrame(dic) #print(rf[:5]) rf = rf.sort_values('logs') rf[:5] plt.cla() pop, edges, patches = plt.hist(logs, bins=50) pop if 'cb' in vars(): cb.remove() del cb plt.cla() N=100 # needed for [5,3] # 'viridis', 'plasma' plt.scatter(rf.Er.values[:N], rf.G.values[:N], c=rf.logs[:N], s=20, cmap='viridis') #plt.xlim(0,3) #plt.ylim(0,1) cb = plt.colorbar() plt.tick_params(labelsize=12) plt.xlabel('$E_r$ [eV]', fontsize=10) plt.ylabel('$\Gamma$ [eV]', fontsize=10) plt.show() ```
github_jupyter
# Peak Detection Feature detection, also referred to as peak detection, is the process by which local maxima that fulfill certain criteria (such as sufficient signal-to-noise ratio) are located in the signal acquired by a given analytical instrument. This process results in “features” associated with the analysis of molecular analytes from the sample under study or from chemical, instrument, or random noise. Typically, feature detection involves a mass dimension (*m/z*) as well as one or more separation dimensions (e.g. drift and/or retention time), the latter offering distinction among isobaric/isotopic features. DEIMoS implements an N-dimensional maximum filter from [scipy.ndimage](https://docs.scipy.org/doc/scipy/reference/ndimage.html) that convolves the instrument signal with a structuring element, also known as a kernel, and compares the result against the input array to identify local maxima as candidate features or peaks. To demonstrate, we will operate on a subset of 2D data to minimize memory usage and computation time. ``` import deimos import numpy as np import matplotlib.pyplot as plt # load data, excluding scanid column ms1 = deimos.load('example_data.h5', key='ms1', columns=['mz', 'drift_time', 'retention_time', 'intensity']) # sum over retention time ms1_2d = deimos.collapse(ms1, keep=['mz', 'drift_time']) # take a subset in m/z ms1_2d = deimos.slice(ms1_2d, by='mz', low=200, high=400) %%time # perform peak detection ms1_peaks = deimos.peakpick.local_maxima(ms1_2d, dims=['mz', 'drift_time'], bins=[9.5, 4.25]) ``` ## Selecting Kernel Size Key to this process is the selection of kernel size, which can vary by instrument, dataset, and even compound. For example, in LC-IMS-MS/MS data, peak width increases with increasing *m/z* and drift time, and also varies in retention time. Ideally, the kernel would be the same size as the N-dimensional peak (i.e. wavelets), though computational efficiency considerations for high-dimensional data currently limit the ability to dynamically adjust kernel size. Thus, the selected kernel size should be representative of likely features of interest. This process is exploratory, and selections can be further refined pending an initial processing of the data. To start, we will get a sense of our data by visualizing a high-intensity feature. ``` # get maximal data point mz_i, dt_i, rt_i, intensity_i = ms1.loc[ms1['intensity'] == ms1['intensity'].max(), :].values[0] # subset the raw data feature = deimos.slice(ms1, by=['mz', 'drift_time', 'retention_time'], low=[mz_i - 0.1, dt_i - 1, rt_i - 1], high=[mz_i + 0.2, dt_i + 1, rt_i + 2]) # visualize deimos.plot.multipanel(feature, dpi=150) plt.tight_layout() plt.show() print('{}:\t\t{}'.format('mz', len(feature['mz'].unique()))) print('{}:\t{}'.format('drift_time', len(feature['drift_time'].unique()))) print('{}:\t{}'.format('retention_time', len(feature['retention_time'].unique()))) ``` The number of sampled data points in each dimension informs selection of suitable peak detection parameters, in this case 38 values in *m/z*, 17 values in drift time, and 74 values in retention time. For the kernel to be centered on each "voxel", however, selections must be odd. Due to the multidimensional nature of the data, kernel size need not be exact: two features need only be separated in one dimension, not all dimensions simultaneously. ## Partitioning This dataset is comprised of almost 200,000 unique *m/z* values, 416 unique drift times, and 568 unique retention times. In order to process the data by N-dimensional filter convolution, the data frame-based coordinate format must be converted into a dense array. In this case, a dense array would comprise 4.7E9 cells and, for 32-bit intensities, requiring approximately 174 GB of memory. ``` print('{}:\t\t{}'.format('mz', len(ms1['mz'].unique()))) print('{}:\t{}'.format('drift_time', len(ms1['drift_time'].unique()))) print('{}:\t{}'.format('retention_time', len(ms1['retention_time'].unique()))) ``` This is of course not tenable for many workstations, necessitating a partitioning utility by which the input may be split along a given dimension, each partition processed separately. Here, we create a `Partitions` object to divide the *m/z* dimension into chunks of 1000 unique values, with a partition overlap of 0.2 Da to ameliorate artifacts arising from artificial partition "edges". Next, its `map` method is invoked to apply peak detection to each partition. The `processes` flag may also be specified to spread the computational load over multiple cores. Memory footprint scales linearly with number of processes. ``` %%time # partition the data partitions = deimos.partition(ms1_2d, split_on='mz', size=500, overlap=0.2) # map peak detection over partitions ms1_peaks_partitioned = partitions.map(deimos.peakpick.local_maxima, dims=['mz', 'drift_time'], bins=[9.5, 4.25], processes=4) ``` With `overlap` selected appropriately, the partitioned result should be identical to the previous result. ``` all(ms1_peaks_partitioned == ms1_peaks) ``` ## Kernel Scaling Peak width in *m/z* and drift time increase with *m/z*. In the example data used here, the sample inverval in *m/z* also increases with increasing *m/z*. This means that our kernel effectively "grows" as *m/z* increases, as kernel is selected by number of such intervals rather than an *m/z* range. ``` # unique m/z values mz_unq = np.unique(ms1_2d['mz']) # m/z sample intervals mz_diff = np.diff(mz_unq) # visualize plt.figure(dpi=150) plt.plot(mz_unq[1:], mz_diff) plt.xlabel('m/z', fontweight='bold') plt.ylabel('Interval', fontweight='bold') plt.show() ``` However, the drift time sample interval is constant throughout the acquisition. To accommodate increasing peak width in drift time, we can scale the kernel in that dimension by the *m/z* per partition, scaled by a reference resolution (i.e. the minimum interval in the above). Thus, the drift time kernel size of the first partition will be scaled by a factor of 1 (no change), the last by a factor of ~1.4. This represents an advanced usage scenario and should only be considered with sufficient justification. That is, knowledge of sample intervals in each dimension, peak widths as a function of these sample intervals, and whether the relationship(s) scale linearly. ``` %%time # partition the data partitions = deimos.partition(ms1_2d, split_on='mz', size=500, overlap=0.2) # map peak detection over partitions ms1_peaks_partitioned = partitions.map(deimos.peakpick.local_maxima, dims=['mz', 'drift_time'], bins=[9.5, 4.25], scale_by='mz', ref_res=mz_diff.min(), scale=['drift_time'], processes=4) ``` Note that, though we have ignored retention time, its sample interval in these data is also constant. However, there is no discernable relationship with *m/z*, thus barring use of this scaling functionality. In such cases, simply determining an average, representative kernel size is typically sufficient.
github_jupyter
# Homework 0 ### Due Tuesday, September 10 (but no submission is required) --- Welcome to CS109 / STAT121 / AC209 / E-109 (http://cs109.org/). In this class, we will be using a variety of tools that will require some initial configuration. To ensure everything goes smoothly moving forward, we will setup the majority of those tools in this homework. While some of this will likely be dull, doing it now will enable us to do more exciting work in the weeks that follow without getting bogged down in further software configuration. This homework will not be graded, however it is essential that you complete it timely since it will enable us to set up your accounts. You do not have to hand anything in, with the exception of filling out the online survey. ## Class Survey, Piazza, and Introduction **Class Survey** Please complete the mandatory course survey located [here](https://docs.google.com/spreadsheet/viewform?formkey=dFg1ZFJwLWJ6ZWhWR1JJb0tES3lGMEE6MA#gid=0). It should only take a few moments of your time. Once you fill in the survey we will sign you up to the course forum on Piazza and the dropbox system that you will use to hand in the homework. It is imperative that you fill out the survey on time as we use the provided information to sign you up for these services. **Piazza** Go to [Piazza](https://piazza.com/harvard/fall2013/cs109/home) and sign up for the class using your Harvard e-mail address. You will use Piazza as a forum for discussion, to find team members, to arrange appointments, and to ask questions. Piazza should be your primary form of communication with the staff. Use the staff e-mail (staff@cs109.org) only for individual requests, e.g., to excuse yourself from a mandatory guest lecture. All readings, homeworks, and project descriptions will be announced on Piazza first. **Introduction** Once you are signed up to the Piazza course forum, introduce yourself to your classmates and course staff with a follow-up post in the introduction thread. Include your name/nickname, your affiliation, why you are taking this course, and tell us something interesting about yourself (e.g., an industry job, an unusual hobby, past travels, or a cool project you did, etc.). Also tell us whether you have experience with data science. ## Programming expectations All the assignments and labs for this class will use Python and, for the most part, the browser-based IPython notebook format you are currently viewing. Knowledge of Python is not a prerequisite for this course, **provided you are comfortable learning on your own as needed**. While we have strived to make the programming component of this course straightforward, we will not devote much time to teaching prorgramming or Python syntax. Basically, you should feel comfortable with: * How to look up Python syntax on Google and StackOverflow. * Basic programming concepts like functions, loops, arrays, dictionaries, strings, and if statements. * How to learn new libraries by reading documentation. * Asking questions on StackOverflow or Piazza. There are many online tutorials to introduce you to scientific python programming. [Here is one](https://github.com/jrjohansson/scientific-python-lectures) that is very nice. Lectures 1-4 are most relevant to this class. ## Getting Python You will be using Python throughout the course, including many popular 3rd party Python libraries for scientific computing. [Anaconda](http://continuum.io/downloads) is an easy-to-install bundle of Python and most of these libraries. We recommend that you use Anaconda for this course. Please visit [this page](https://github.com/cs109/content/wiki/Installing-Python) and follow the instructions to set up Python <hline> ## Hello, Python The IPython notebook is an application to build interactive computational notebooks. You'll be using them to complete labs and homework. Once you've set up Python, please <a href=https://raw.github.com/cs109/content/master/HW0.ipynb download="HW0.ipynb">download this page</a>, and open it with IPython by typing ``` ipython notebook <name_of_downloaded_file> ``` For the rest of the assignment, use your local copy of this page, running on IPython. Notebooks are composed of many "cells", which can contain text (like this one), or code (like the one below). Double click on the cell below, and evaluate it by clicking the "play" button above, for by hitting shift + enter ``` x = [10, 20, 30, 40, 50] for item in x: print "Item is ", item ``` ## Python Libraries We will be using a several different libraries throughout this course. If you've successfully completed the [installation instructions](https://github.com/cs109/content/wiki/Installing-Python), all of the following statements should run. ``` #IPython is what you are using now to run the notebook import IPython print "IPython version: %6.6s (need at least 1.0)" % IPython.__version__ # Numpy is a library for working with Arrays import numpy as np print "Numpy version: %6.6s (need at least 1.7.1)" % np.__version__ # SciPy implements many different numerical algorithms import scipy as sp print "SciPy version: %6.6s (need at least 0.12.0)" % sp.__version__ # Pandas makes working with data tables easier import pandas as pd print "Pandas version: %6.6s (need at least 0.11.0)" % pd.__version__ # Module for plotting import matplotlib print "Mapltolib version: %6.6s (need at least 1.2.1)" % matplotlib.__version__ # SciKit Learn implements several Machine Learning algorithms import sklearn print "Scikit-Learn version: %6.6s (need at least 0.13.1)" % sklearn.__version__ # Requests is a library for getting data from the Web import requests print "requests version: %6.6s (need at least 1.2.3)" % requests.__version__ # Networkx is a library for working with networks import networkx as nx print "NetworkX version: %6.6s (need at least 1.7)" % nx.__version__ #BeautifulSoup is a library to parse HTML and XML documents import BeautifulSoup print "BeautifulSoup version:%6.6s (need at least 3.2)" % BeautifulSoup.__version__ #MrJob is a library to run map reduce jobs on Amazon's computers import mrjob print "Mr Job version: %6.6s (need at least 0.4)" % mrjob.__version__ #Pattern has lots of tools for working with data from the internet import pattern print "Pattern version: %6.6s (need at least 2.6)" % pattern.__version__ ``` If any of these libraries are missing or out of date, you will need to [install them](https://github.com/cs109/content/wiki/Installing-Python#installing-additional-libraries) and restart IPython ## Hello matplotlib The notebook integrates nicely with Matplotlib, the primary plotting package for python. This should embed a figure of a sine wave: ``` #this line prepares IPython for working with matplotlib %matplotlib inline # this actually imports matplotlib import matplotlib.pyplot as plt x = np.linspace(0, 10, 30) #array of 30 points from 0 to 10 y = np.sin(x) z = y + np.random.normal(size=30) * .2 plt.plot(x, y, 'ro-', label='A sine wave') plt.plot(x, z, 'b-', label='Noisy sine') plt.legend(loc = 'lower right') plt.xlabel("X axis") plt.ylabel("Y axis") ``` If that last cell complained about the `%matplotlib` line, you need to update IPython to v1.0, and restart the notebook. See the [installation page](https://github.com/cs109/content/wiki/Installing-Python) ## Hello Numpy The Numpy array processing library is the basis of nearly all numerical computing in Python. Here's a 30 second crash course. For more details, consult Chapter 4 of Python for Data Analysis, or the [Numpy User's Guide](http://docs.scipy.org/doc/numpy-dev/user/index.html) ``` print "Make a 3 row x 4 column array of random numbers" x = np.random.random((3, 4)) print x print print "Add 1 to every element" x = x + 1 print x print print "Get the element at row 1, column 2" print x[1, 2] print # The colon syntax is called "slicing" the array. print "Get the first row" print x[0, :] print print "Get every 2nd column of the first row" print x[0, ::2] print ``` Print the maximum, minimum, and mean of the array. This does **not** require writing a loop. In the code cell below, type `x.m<TAB>`, to find built-in operations for common array statistics like this ``` #your code here print "Max is ", x.max() print "Min is ", x.min() print "Mean is ", x.mean() ``` Call the `x.max` function again, but use the `axis` keyword to print the maximum of each row in x. ``` #your code here print x.max(axis=1) ``` Here's a way to quickly simulate 500 coin "fair" coin tosses (where the probabily of getting Heads is 50%, or 0.5) ``` x = np.random.binomial(500, .5) print "number of heads:", x ``` Repeat this simulation 500 times, and use the [plt.hist() function](http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hist) to plot a histogram of the number of Heads (1s) in each simulation ``` #your code here # 3 ways to run the simulations # loop heads = [] for i in range(500): heads.append(np.random.binomial(500, .5)) # "list comprehension" heads = [np.random.binomial(500, .5) for i in range(500)] # pure numpy heads = np.random.binomial(500, .5, size=500) histogram = plt.hist(heads, bins=10) heads.shape ``` ## The Monty Hall Problem Here's a fun and perhaps surprising statistical riddle, and a good way to get some practice writing python functions In a gameshow, contestants try to guess which of 3 closed doors contain a cash prize (goats are behind the other two doors). Of course, the odds of choosing the correct door are 1 in 3. As a twist, the host of the show occasionally opens a door after a contestant makes his or her choice. This door is always one of the two the contestant did not pick, and is also always one of the goat doors (note that it is always possible to do this, since there are two goat doors). At this point, the contestant has the option of keeping his or her original choice, or swtiching to the other unopened door. The question is: is there any benefit to switching doors? The answer surprises many people who haven't heard the question before. We can answer the problem by running simulations in Python. We'll do it in several parts. First, write a function called `simulate_prizedoor`. This function will simulate the location of the prize in many games -- see the detailed specification below: ``` """ Function -------- simulate_prizedoor Generate a random array of 0s, 1s, and 2s, representing hiding a prize between door 0, door 1, and door 2 Parameters ---------- nsim : int The number of simulations to run Returns ------- sims : array Random array of 0s, 1s, and 2s Example ------- >>> print simulate_prizedoor(3) array([0, 0, 2]) """ def simulate_prizedoor(nsim): #compute here return answer #your code here def simulate_prizedoor(nsim): return np.random.randint(0, 3, (nsim)) ``` Next, write a function that simulates the contestant's guesses for `nsim` simulations. Call this function `simulate_guess`. The specs: ``` """ Function -------- simulate_guess Return any strategy for guessing which door a prize is behind. This could be a random strategy, one that always guesses 2, whatever. Parameters ---------- nsim : int The number of simulations to generate guesses for Returns ------- guesses : array An array of guesses. Each guess is a 0, 1, or 2 Example ------- >>> print simulate_guess(5) array([0, 0, 0, 0, 0]) """ #your code here def simulate_guess(nsim): return np.zeros(nsim, dtype=np.int) ``` Next, write a function, `goat_door`, to simulate randomly revealing one of the goat doors that a contestant didn't pick. ``` """ Function -------- goat_door Simulate the opening of a "goat door" that doesn't contain the prize, and is different from the contestants guess Parameters ---------- prizedoors : array The door that the prize is behind in each simulation guesses : array THe door that the contestant guessed in each simulation Returns ------- goats : array The goat door that is opened for each simulation. Each item is 0, 1, or 2, and is different from both prizedoors and guesses Examples -------- >>> print goat_door(np.array([0, 1, 2]), np.array([1, 1, 1])) >>> array([2, 2, 0]) """ #your code here def goat_door(prizedoors, guesses): #strategy: generate random answers, and #keep updating until they satisfy the rule #that they aren't a prizedoor or a guess result = np.random.randint(0, 3, prizedoors.size) while True: bad = (result == prizedoors) | (result == guesses) if not bad.any(): return result result[bad] = np.random.randint(0, 3, bad.sum()) ``` Write a function, `switch_guess`, that represents the strategy of always switching a guess after the goat door is opened. ``` """ Function -------- switch_guess The strategy that always switches a guess after the goat door is opened Parameters ---------- guesses : array Array of original guesses, for each simulation goatdoors : array Array of revealed goat doors for each simulation Returns ------- The new door after switching. Should be different from both guesses and goatdoors Examples -------- >>> print switch_guess(np.array([0, 1, 2]), np.array([1, 2, 1])) >>> array([2, 0, 0]) """ #your code here def switch_guess(guesses, goatdoors): result = np.zeros(guesses.size) switch = {(0, 1): 2, (0, 2): 1, (1, 0): 2, (1, 2): 1, (2, 0): 1, (2, 1): 0} for i in [0, 1, 2]: for j in [0, 1, 2]: mask = (guesses == i) & (goatdoors == j) if not mask.any(): continue result = np.where(mask, np.ones_like(result) * switch[(i, j)], result) return result ``` Last function: write a `win_percentage` function that takes an array of `guesses` and `prizedoors`, and returns the percent of correct guesses ``` """ Function -------- win_percentage Calculate the percent of times that a simulation of guesses is correct Parameters ----------- guesses : array Guesses for each simulation prizedoors : array Location of prize for each simulation Returns -------- percentage : number between 0 and 100 The win percentage Examples --------- >>> print win_percentage(np.array([0, 1, 2]), np.array([0, 0, 0])) 33.333 """ #your code here def win_percentage(guesses, prizedoors): return 100 * (guesses == prizedoors).mean() ``` Now, put it together. Simulate 10000 games where contestant keeps his original guess, and 10000 games where the contestant switches his door after a goat door is revealed. Compute the percentage of time the contestant wins under either strategy. Is one strategy better than the other? ``` #your code here nsim = 10000 #keep guesses print "Win percentage when keeping original door" print win_percentage(simulate_prizedoor(nsim), simulate_guess(nsim)) #switch pd = simulate_prizedoor(nsim) guess = simulate_guess(nsim) goats = goat_door(pd, guess) guess = switch_guess(guess, goats) print "Win percentage when switching doors" print win_percentage(pd, guess).mean() ``` Many people find this answer counter-intuitive (famously, PhD mathematicians have incorrectly claimed the result must be wrong. Clearly, none of them knew Python). One of the best ways to build intuition about why opening a Goat door affects the odds is to re-run the experiment with 100 doors and one prize. If the game show host opens 98 goat doors after you make your initial selection, would you want to keep your first pick or switch? Can you generalize your simulation code to handle the case of `n` doors?
github_jupyter
``` import os import numpy as np from glob import glob from deformation_functions import * from menpo_functions import * from logging_functions import * from data_loading_functions import * from time import time from scipy.misc import imsave %matplotlib inline dataset='training' img_dir='/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/' train_crop_dir = 'crop_gt_margin_0.25' img_dir_ns=os.path.join(img_dir,train_crop_dir+'_ns') bb_dir = os.path.join(img_dir, 'Bounding_Boxes') bb_type='gt' gt = bb_type=='gt' margin = 0.25 image_size = 256 mode='TRAIN' augment_basic=True augment_texture=True augment_geom=True bb_dictionary = load_bb_dictionary(bb_dir, mode=mode, test_data=dataset) def augment_menpo_img_ns(img, img_dir_ns, p_ns=0, ns_ind=None): """texture style image augmentation using stylized copies in *img_dir_ns*""" img = img.copy() if p_ns > 0.5: ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '*')) num_augs = len(ns_augs) if num_augs > 0: if ns_ind is None or ns_ind >= num_augs: ns_ind = np.random.randint(0, num_augs) ns_aug = mio.import_image(ns_augs[ns_ind]) ns_pixels = ns_aug.pixels img.pixels = ns_pixels return img def augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=0, ns_ind=None): """texture style image augmentation using stylized copies in *img_dir_ns*""" img = img.copy() if p_ns > 0.5: ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '*')) num_augs = len(ns_augs) if num_augs > 0: if ns_ind is None or ns_ind >= num_augs: ns_ind = np.random.randint(0, num_augs) ns_aug = mio.import_image(ns_augs[ns_ind]) ns_pixels = ns_aug.pixels return img def augment_menpo_img_geom_dont_apply(img, p_geom=0): """geometric style image augmentation using random face deformations""" img = img.copy() if p_geom > 0.5: lms_geom_warp = deform_face_geometric_style(img.landmarks['PTS'].points.copy(), p_scale=p_geom, p_shift=p_geom) return img def load_menpo_image_list( img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0, verbose=False,ns_ind=None): def crop_to_face_image_gt(img): return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size) def crop_to_face_image_init(img): return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size) def augment_menpo_img_ns_rand(img): return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind) def augment_menpo_img_geom_rand(img): return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom)) if mode is 'TRAIN': if train_crop_dir is None: img_set_dir = os.path.join(img_dir, 'training_set') out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: img_set_dir = os.path.join(img_dir, train_crop_dir) out_image_list = mio.import_images(img_set_dir, verbose=verbose) if augment_texture: out_image_list = out_image_list.map(augment_menpo_img_ns_rand) if augment_geom: out_image_list = out_image_list.map(augment_menpo_img_geom_rand) if augment_basic: out_image_list = out_image_list.map(augment_face_image) else: img_set_dir = os.path.join(img_dir, test_data + '_set') if test_data in ['full', 'challenging', 'common', 'training', 'test']: out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: out_image_list = mio.import_images(img_set_dir, verbose=verbose) return out_image_list def load_menpo_image_list_no_geom( img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0, verbose=False,ns_ind=None): def crop_to_face_image_gt(img): return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size) def crop_to_face_image_init(img): return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size) def augment_menpo_img_ns_rand(img): return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind) def augment_menpo_img_geom_rand(img): return augment_menpo_img_geom_dont_apply(img, p_geom=1. * (np.random.rand() < p_geom)) if mode is 'TRAIN': if train_crop_dir is None: img_set_dir = os.path.join(img_dir, 'training_set') out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: img_set_dir = os.path.join(img_dir, train_crop_dir) out_image_list = mio.import_images(img_set_dir, verbose=verbose) if augment_texture: out_image_list = out_image_list.map(augment_menpo_img_ns_rand) if augment_geom: out_image_list = out_image_list.map(augment_menpo_img_geom_rand) if augment_basic: out_image_list = out_image_list.map(augment_face_image) else: img_set_dir = os.path.join(img_dir, test_data + '_set') if test_data in ['full', 'challenging', 'common', 'training', 'test']: out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: out_image_list = mio.import_images(img_set_dir, verbose=verbose) return out_image_list def load_menpo_image_list_no_texture( img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0, verbose=False,ns_ind=None): def crop_to_face_image_gt(img): return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size) def crop_to_face_image_init(img): return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size) def augment_menpo_img_ns_rand(img): return augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind) def augment_menpo_img_geom_rand(img): return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom)) if mode is 'TRAIN': if train_crop_dir is None: img_set_dir = os.path.join(img_dir, 'training_set') out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: img_set_dir = os.path.join(img_dir, train_crop_dir) out_image_list = mio.import_images(img_set_dir, verbose=verbose) if augment_texture: out_image_list = out_image_list.map(augment_menpo_img_ns_rand) if augment_geom: out_image_list = out_image_list.map(augment_menpo_img_geom_rand) if augment_basic: out_image_list = out_image_list.map(augment_face_image) else: img_set_dir = os.path.join(img_dir, test_data + '_set') if test_data in ['full', 'challenging', 'common', 'training', 'test']: out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: out_image_list = mio.import_images(img_set_dir, verbose=verbose) return out_image_list def load_menpo_image_list_no_artistic( img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0, verbose=False,ns_ind=None): def crop_to_face_image_gt(img): return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size) def crop_to_face_image_init(img): return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size) def augment_menpo_img_ns_rand(img): return augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind) def augment_menpo_img_geom_rand(img): return augment_menpo_img_geom_dont_apply(img, p_geom=1. * (np.random.rand() < p_geom)) if mode is 'TRAIN': if train_crop_dir is None: img_set_dir = os.path.join(img_dir, 'training_set') out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: img_set_dir = os.path.join(img_dir, train_crop_dir) out_image_list = mio.import_images(img_set_dir, verbose=verbose) if augment_texture: out_image_list = out_image_list.map(augment_menpo_img_ns_rand) if augment_geom: out_image_list = out_image_list.map(augment_menpo_img_geom_rand) if augment_basic: out_image_list = out_image_list.map(augment_face_image) else: img_set_dir = os.path.join(img_dir, test_data + '_set') if test_data in ['full', 'challenging', 'common', 'training', 'test']: out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: out_image_list = mio.import_images(img_set_dir, verbose=verbose) return out_image_list plt.figure(figsize=[10,10]) num_augs=9 ns_inds = np.arange(num_augs) for i in range(16): if i % num_augs == 0: np.random.shuffle(ns_inds) print ns_inds img_list = load_menpo_image_list( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) plt.subplot(4,4,i +1) img_list[0].view() # plt.savefig('g.png',bbox='tight') outdir = os.path.join('/Users/arik/Desktop/test_art_data3') if not os.path.exists(outdir): os.mkdir(outdir) aug_geom_dir = os.path.join(outdir,'aug_geom') aug_texture_dir = os.path.join(outdir,'aug_texture') aug_geom_texture_dir = os.path.join(outdir,'aug_geom_texture') aug_basic_dir = os.path.join(outdir,'aug_basic') if not os.path.exists(aug_texture_dir): os.mkdir(aug_texture_dir) if not os.path.exists(aug_geom_dir): os.mkdir(aug_geom_dir) if not os.path.exists(aug_geom_texture_dir): os.mkdir(aug_geom_texture_dir) if not os.path.exists(aug_basic_dir): os.mkdir(aug_basic_dir) num_train_images = 3148. train_iter=100000 batch_size = 6 num_epochs = int(np.ceil((1. * train_iter) / (1. * num_train_images / batch_size)))+1 num_augs=9 num_epochs = 10 debug_data_size =5 debug=True aug_geom = True aug_texture = True np.random.seed(1234) ns_inds = np.arange(num_augs) if not aug_geom and aug_texture: save_aug_path = aug_texture_dir elif aug_geom and not aug_texture: save_aug_path = aug_geom_dir elif aug_geom and aug_texture: save_aug_path = aug_geom_texture_dir else: save_aug_path = aug_basic_dir print ('saving augmented images: aug_geom='+str(aug_geom)+' aug_texture='+str(aug_texture)+' : '+str(save_aug_path)) for i in range(num_epochs): print ('saving augmented images of epoch %d/%d'%(i+1,num_epochs)) if not os.path.exists(os.path.join(save_aug_path,str(i))): os.mkdir(os.path.join(save_aug_path,str(i))) if i % num_augs == 0: np.random.shuffle(ns_inds) if not aug_geom and aug_texture: img_list = load_menpo_image_list_no_geom( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) elif aug_geom and not aug_texture: img_list = load_menpo_image_list_no_texture( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) elif aug_geom and aug_texture: img_list = load_menpo_image_list( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) else: img_list = load_menpo_image_list_no_artistic( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) if debug: img_list=img_list[:debug_data_size] for im in img_list: if im.pixels.shape[0] == 1: im_pixels = gray2rgb(np.squeeze(im.pixels)) else: im_pixels = np.rollaxis(im.pixels,0,3) imsave( os.path.join(os.path.join(save_aug_path,str(i)),im.path.name.split('.')[0]+'.png'),im_pixels) mio.export_landmark_file(im.landmarks['PTS'],os.path.join(os.path.join(save_aug_path,str(i)),im.path.name.split('.')[0]+'.pts'),overwrite=True) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import cython import timeit import math %load_ext cython ``` # Native code compilation We will see how to convert Python code to native compiled code. We will use the example of calculating the pairwise distance between a set of vectors, a $O(n^2)$ operation. For native code compilation, it is usually preferable to use explicit for loops and minimize the use of `numpy` vectorization and broadcasting because - It makes it easier for the `numba` JIT to optimize - It is easier to "cythonize" - It is easier to port to C++ However, use of vectors and matrices is fine especially if you will be porting to use a C++ library such as Eigen. ## Timing code ### Manual ``` import time def f(n=1): start = time.time() time.sleep(n) elapsed = time.time() - start return elapsed f(1) ``` ### Clock time ``` %%time time.sleep(1) ``` ### Using `timeit` The `-r` argument says how many runs to average over, and `-n` says how many times to run the function in a loop per run. ``` %timeit time.sleep(0.01) %timeit -r3 time.sleep(0.01) %timeit -n10 time.sleep(0.01) %timeit -r3 -n10 time.sleep(0.01) ``` ### Time unit conversions ``` 1 s = 1,000 ms 1 ms = 1,000 µs 1 µs = 1,000 ns ``` ## Profiling If you want to identify bottlenecks in a Python script, do the following: - First make sure that the script is modular - i.e. it consists mainly of function calls - Each function should be fairly small and only do one thing - Then run a profiler to identify the bottleneck function(s) and optimize them See the Python docs on [profiling Python code](https://docs.python.org/3/library/profile.html) Profiling can be done in a notebook with %prun, with the following readouts as column headers: - ncalls - for the number of calls, - tottime - for the total time spent in the given function (and excluding time made in calls to sub-functions), - percall - is the quotient of tottime divided by ncalls - cumtime - is the total time spent in this and all subfunctions (from invocation till exit). This figure is accurate even for recursive functions. - percall - is the quotient of cumtime divided by primitive calls - filename:lineno(function) - provides the respective data of each function ``` def foo1(n): return np.sum(np.square(np.arange(n))) def foo2(n): return sum(i*i for i in range(n)) def foo3(n): [foo1(n) for i in range(10)] foo2(n) def foo4(n): return [foo2(n) for i in range(100)] def work(n): foo1(n) foo2(n) foo3(n) foo4(n) %%time work(int(1e5)) %prun -q -D work.prof work(int(1e5)) import pstats p = pstats.Stats('work.prof') p.print_stats() pass p.sort_stats('time', 'cumulative').print_stats('foo') pass p.sort_stats('ncalls').print_stats(5) pass ``` ## Optimizing a function Our example will be to optimize a function that calculates the pairwise distance between a set of vectors. We first use a built-in function from`scipy` to check that our answers are right and also to benchmark how our code compares in speed to an optimized compiled routine. ``` from scipy.spatial.distance import squareform, pdist n = 100 p = 100 xs = np.random.random((n, p)) sol = squareform(pdist(xs)) %timeit -r3 -n10 squareform(pdist(xs)) ``` ## Python ### Simple version ``` def pdist_py(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(n): for k in range(p): A[i,j] += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(A[i,j]) return A ``` Note that we - first check that the output is **right** - then check how fast the code is ``` func = pdist_py print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Exploiting symmetry ``` def pdist_sym(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): for k in range(p): A[i,j] += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(A[i,j]) A += A.T return A func = pdist_sym print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Vectorizing inner loop ``` def pdist_vec(xs): """Vectorize inner loop.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): A[i,j] = np.sqrt(np.sum((xs[i] - xs[j])**2)) A += A.T return A func = pdist_vec print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Broadcasting and vectorizing Note that the broadcast version does twice as much work as it does not exploit symmetry. ``` def pdist_numpy(xs): """Fully vectroized version.""" return np.sqrt(np.square(xs[:, None] - xs[None, :]).sum(axis=-1)) func = pdist_numpy print(np.allclose(func(xs), sol)) %timeit -r3 -n10 squareform(func(xs)) ``` ## JIT with `numba` We use the `numba.jit` decorator which will trigger generation and execution of compiled code when the function is first called. ``` from numba import jit ``` ### Using `jit` as a function ``` pdist_numba_py = jit(pdist_py, nopython=True, cache=True) func = pdist_numba_py print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Using `jit` as a decorator ``` @jit(nopython=True, cache=True) def pdist_numba_py_1(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(n): for k in range(p): A[i,j] += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(A[i,j]) return A func = pdist_numba_py_1 print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Can we make the code faster? Note that in the inner loop, we are updating a matrix when we only need to update a scalar. Let's fix this. ``` @jit(nopython=True, cache=True) def pdist_numba_py_2(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(n): d = 0.0 for k in range(p): d += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(d) return A func = pdist_numba_py_2 print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Can we make the code even faster? We can also try to exploit symmetry. ``` @jit(nopython=True, cache=True) def pdist_numba_py_sym(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(d) A += A.T return A func = pdist_numba_py_sym print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Does `jit` work with vectorized code? ``` pdist_numba_vec = jit(pdist_vec, nopython=True, cache=True) %timeit -r3 -n10 pdist_vec(xs) func = pdist_numba_vec print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Does `jit` work with broadcasting? ``` pdist_numba_numpy = jit(pdist_numpy, nopython=True, cache=True) %timeit -r3 -n10 pdist_numpy(xs) func = pdist_numba_numpy try: print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) except Exception as e: print(e) ``` #### We need to use `reshape` to broadcast ``` def pdist_numpy_(xs): """Fully vectroized version.""" return np.sqrt(np.square(xs.reshape(n,1,p) - xs.reshape(1,n,p)).sum(axis=-1)) pdist_numba_numpy_ = jit(pdist_numpy_, nopython=True, cache=True) %timeit -r3 -n10 pdist_numpy_(xs) func = pdist_numba_numpy_ print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Summary - `numba` appears to work best with converting fairly explicit Python code - This might change in the future as the `numba` JIT compiler becomes more sophisticated - Always check optimized code for correctness - We can use `timeit` magic as a simple way to benchmark functions ## Cython Cython is an Ahead Of Time (AOT) compiler. It compiles the code and replaces the function invoked with the compiled version. In the notebook, calling `%cython -a` magic shows code colored by how many Python C API calls are being made. You want to reduce the yellow as much as possible. ``` %%cython -a import numpy as np def pdist_cython_1(xs): n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += (xs[i,k] - xs[j,k])**2 A[i,j] = np.sqrt(d) A += A.T return A def pdist_base(xs): n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += (xs[i,k] - xs[j,k])**2 A[i,j] = np.sqrt(d) A += A.T return A %timeit -r3 -n1 pdist_base(xs) func = pdist_cython_1 print(np.allclose(func(xs), sol)) %timeit -r3 -n1 func(xs) ``` ## Cython with static types - We provide types for all variables so that Cython can optimize their compilation to C code. - Note `numpy` functions are optimized for working with `ndarrays` and have unnecessary overhead for scalars. We therefor replace them with math functions from the C `math` library. ``` %%cython -a import cython import numpy as np cimport numpy as np from libc.math cimport sqrt, pow @cython.boundscheck(False) @cython.wraparound(False) def pdist_cython_2(double[:, :] xs): cdef int n, p cdef int i, j, k cdef double[:, :] A cdef double d n = xs.shape[0] p = xs.shape[1] A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += pow(xs[i,k] - xs[j,k],2) A[i,j] = sqrt(d) for i in range(1, n): for j in range(i): A[i, j] = A[j, i] return A func = pdist_cython_2 print(np.allclose(func(xs), sol)) %timeit -r3 -n1 func(xs) ``` ## Wrapping C++ cdoe ### Function to port ```python def pdist_base(xs): n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += (xs[i,k] - xs[j,k])**2 A[i,j] = np.sqrt(d) A += A.T return A ``` ### First check that the function works as expected ``` %%file main.cpp #include <iostream> #include <Eigen/Dense> #include <cmath> using std::cout; // takes numpy array as input and returns another numpy array Eigen::MatrixXd pdist(Eigen::MatrixXd xs) { int n = xs.rows() ; int p = xs.cols(); Eigen::MatrixXd A = Eigen::MatrixXd::Zero(n, n); for (int i=0; i<n; i++) { for (int j=i+1; j<n; j++) { double d = 0; for (int k=0; k<p; k++) { d += std::pow(xs(i,k) - xs(j,k), 2); } A(i, j) = std::sqrt(d); } } A += A.transpose().eval(); return A; } int main() { using namespace Eigen; MatrixXd A(3,2); A << 0, 0, 3, 4, 5, 12; std::cout << pdist(A) << "\n"; } %%bash g++ -o main.exe main.cpp -I./eigen3 %%bash ./main.exe A = np.array([ [0, 0], [3, 4], [5, 12] ]) squareform(pdist(A)) ``` ### Now use the boiler plate for wrapping ``` %%file wrap.cpp <% cfg['compiler_args'] = ['-std=c++11'] cfg['include_dirs'] = ['./eigen3'] setup_pybind11(cfg) %> #include <pybind11/pybind11.h> #include <pybind11/eigen.h> // takes numpy array as input and returns another numpy array Eigen::MatrixXd pdist(Eigen::MatrixXd xs) { int n = xs.rows() ; int p = xs.cols(); Eigen::MatrixXd A = Eigen::MatrixXd::Zero(n, n); for (int i=0; i<n; i++) { for (int j=i+1; j<n; j++) { double d = 0; for (int k=0; k<p; k++) { d += std::pow(xs(i,k) - xs(j,k), 2); } A(i, j) = std::sqrt(d); } } A += A.transpose().eval(); return A; } PYBIND11_PLUGIN(wrap) { pybind11::module m("wrap", "auto-compiled c++ extension"); m.def("pdist", &pdist); return m.ptr(); } import cppimport import numpy as np code = cppimport.imp("wrap") print(code.pdist(A)) func = code.pdist print(np.allclose(func(xs), sol)) %timeit -r3 -n1 func(xs) ```
github_jupyter
# MSOA Mapping - England ``` import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import numpy as np from shapely.geometry import Point from sklearn.neighbors import KNeighborsRegressor import rasterio as rst from rasterstats import zonal_stats %matplotlib inline path = r"[CHANGE THIS PATH]\England\\" data = pd.read_csv(path + "final_data.csv", index_col = 0) ``` # Convert to GeoDataFrame ``` geo_data = gpd.GeoDataFrame(data = data, crs = {'init':'epsg:27700'}, geometry = data.apply(lambda geom: Point(geom['oseast1m'],geom['osnrth1m']),axis=1)) geo_data.head() f, (ax1, ax2, ax3) = plt.subplots(1,3, figsize = (16,6), sharex = True, sharey = True) geo_data[geo_data['Year'] == 2016].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax1); geo_data[geo_data['Year'] == 2017].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax2); geo_data[geo_data['Year'] == 2018].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax3); ``` ## k-nearest neighbour interpolation Non-parametric interpolation of loneliness based on local set of _k_ nearest neighbours for each cell in our evaluation grid. Effectively becomes an inverse distance weighted (idw) interpolation when weights are set to be distance based. ``` def idw_model(k, p): def _inv_distance_index(weights, index=p): return (test==0).astype(int) if np.any(weights == 0) else 1. / weights**index return KNeighborsRegressor(k, weights=_inv_distance_index) def grid(xmin, xmax, ymin, ymax, cellsize): # Set x and y ranges to accommodate cellsize xmin = (xmin // cellsize) * cellsize xmax = -(-xmax // cellsize) * cellsize # ceiling division ymin = (ymin // cellsize) * cellsize ymax = -(-ymax // cellsize) * cellsize # Make meshgrid x = np.linspace(xmin,xmax,(xmax-xmin)/cellsize) y = np.linspace(ymin,ymax,(ymax-ymin)/cellsize) return np.meshgrid(x,y) def reshape_grid(xx,yy): return np.append(xx.ravel()[:,np.newaxis],yy.ravel()[:,np.newaxis],1) def reshape_image(z, xx): return np.flip(z.reshape(np.shape(xx)),0) def idw_surface(locations, values, xmin, xmax, ymin, ymax, cellsize, k=5, p=2): # Make and fit the idw model idw = idw_model(k,p).fit(locations, values) # Make the grid to estimate over xx, yy = grid(xmin, xmax, ymin, ymax, cellsize) # reshape the grid for estimation xy = reshape_grid(xx,yy) # Predict the grid values z = idw.predict(xy) # reshape to image array z = reshape_image(z, xx) return z ``` ## 2016 data ``` # Get point locations and values from data points = geo_data[geo_data['Year'] == 2016][['oseast1m','osnrth1m']].values vals = geo_data[geo_data['Year'] == 2016]['loneills'].values surface2016 = idw_surface(points, vals, 90000,656000,10000,654000,250,7,2) # Look at surface f, ax = plt.subplots(figsize = (8,10)) ax.imshow(surface2016, cmap='Reds') ax.set_aspect('equal') ``` ## 2017 Data ``` # Get point locations and values from data points = geo_data[geo_data['Year'] == 2017][['oseast1m','osnrth1m']].values vals = geo_data[geo_data['Year'] == 2017]['loneills'].values surface2017 = idw_surface(points, vals, 90000,656000,10000,654000,250,7,2) # Look at surface f, ax = plt.subplots(figsize = (8,10)) ax.imshow(surface2017, cmap='Reds') ax.set_aspect('equal') ``` ## 2018 Data Get minimum and maximum bounds from the data. Round these down (in case of the 'min's) and up (in case of the 'max's) to get the values for `idw_surface()` ``` print("xmin = ", geo_data['oseast1m'].min(), "\n\r", "xmax = ", geo_data['oseast1m'].max(), "\n\r", "ymin = ", geo_data['osnrth1m'].min(), "\n\r", "ymax = ", geo_data['osnrth1m'].max()) xmin = 90000 xmax = 656000 ymin = 10000 ymax = 654000 # Get point locations and values from data points = geo_data[geo_data['Year'] == 2018][['oseast1m','osnrth1m']].values vals = geo_data[geo_data['Year'] == 2018]['loneills'].values surface2018 = idw_surface(points, vals, xmin,xmax,ymin,ymax,250,7,2) # Look at surface f, ax = plt.subplots(figsize = (8,10)) ax.imshow(surface2018, cmap='Reds') ax.set_aspect('equal') ``` # Extract Values to MSOAs Get 2011 MSOAs from the Open Geography Portal: http://geoportal.statistics.gov.uk/ ``` # Get MSOAs which we use to aggregate the loneills variable. #filestring = './Data/MSOAs/Middle_Layer_Super_Output_Areas_December_2011_Full_Clipped_Boundaries_in_England_and_Wales.shp' filestring = r'[CHANGE THIS PATH]\Data\Boundaries\England and Wales\Middle_Layer_Super_Output_Areas_December_2011_Super_Generalised_Clipped_Boundaries_in_England_and_Wales.shp' msoas = gpd.read_file(filestring) msoas.to_crs({'init':'epsg:27700'}) # drop the Wales MSOAs msoas = msoas[msoas['msoa11cd'].str[:1] == 'E'].copy() # Get GB countries data to use for representation #gb = gpd.read_file('./Data/GB/Countries_December_2017_Generalised_Clipped_Boundaries_in_UK_WGS84.shp') #gb = gb.to_crs({'init':'epsg:27700'}) # get England #eng = gb[gb['ctry17nm'] == 'England'].copy() # Make affine transform for raster trans = rst.Affine.from_gdal(xmin-125,250,0,ymax+125,0,-250) # NB This process is slooow - write bespoke method? # 2016 #msoa_zones = zonal_stats(msoas['geometry'], surface2016, affine = trans, stats = 'mean', nodata = np.nan) #msoas['loneills_2016'] = list(map(lambda x: x['mean'] , msoa_zones)) # 2017 #msoa_zones = zonal_stats(msoas['geometry'], surface2017, affine = trans, stats = 'mean', nodata = np.nan) #msoas['loneills_2017'] = list(map(lambda x: x['mean'] , msoa_zones)) # 2018 msoa_zones = zonal_stats(msoas['geometry'], surface2018, affine = trans, stats = 'mean', nodata = np.nan) msoas['loneills_2018'] = list(map(lambda x: x['mean'] , msoa_zones)) # Check out the distributions of loneills by MSOA f, [ax1, ax2, ax3] = plt.subplots(1,3, figsize=(14,5), sharex = True, sharey=True) #ax1.hist(msoas['loneills_2016'], bins = 30) #ax2.hist(msoas['loneills_2017'], bins = 30) ax3.hist(msoas['loneills_2018'], bins = 30) ax1.set_title("2016") ax2.set_title("2017") ax3.set_title("2018"); bins = [-10, -5, -3, -2, -1, 1, 2, 3, 5, 10, 22] labels = ['#01665e','#35978f', '#80cdc1','#c7eae5','#f5f5f5','#f6e8c3','#dfc27d','#bf812d','#8c510a','#543005'] #msoas['loneills_2016_class'] = pd.cut(msoas['loneills_2016'], bins, labels = labels) #msoas['loneills_2017_class'] = pd.cut(msoas['loneills_2017'], bins, labels = labels) msoas['loneills_2018_class'] = pd.cut(msoas['loneills_2018'], bins, labels = labels) msoas['loneills_2018_class'] = msoas.loneills_2018_class.astype(str) # convert categorical to string f, (ax1, ax2, ax3) = plt.subplots(1,3,figsize = (16,10)) #msoas.plot(color = msoas['loneills_2016_class'], ax=ax1) #msoas.plot(color = msoas['loneills_2017_class'], ax=ax2) msoas.plot(color = msoas['loneills_2018_class'], ax=ax3) #gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax1) #gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax2) #gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax3) # restrict to England #ax1.set_xlim([82672,656000]) #ax1.set_ylim([5342,658000]) #ax2.set_xlim([82672,656000]) #ax2.set_ylim([5342,658000]) #ax3.set_xlim([82672,656000]) #ax3.set_ylim([5342,658000]) # Make a legend # make bespoke legend from matplotlib.patches import Patch handles = [] ranges = ["-10, -5","-5, -3","-3, -2","-2, -1","-1, 1","1, 2","3, 3","3, 5","5, 10","10, 22"] for color, label in zip(labels,ranges): handles.append(Patch(facecolor = color, label = label)) ax1.legend(handles = handles, loc = 2); # Save out msoa data as shapefile and geojson msoas.to_file(path + "msoa_loneliness.shp", driver = 'ESRI Shapefile') #msoas.to_file(path + "msoa_loneliness.geojson", driver = 'GeoJSON') # save out msoa data as csv msoas.to_csv(path + "msoa_loneliness.csv") ```
github_jupyter
# Intro to Object Detection Colab Welcome to the object detection colab! This demo will take you through the steps of running an "out-of-the-box" detection model in SavedModel format on a collection of images. Imports ``` !pip install -U --pre tensorflow=="2.2.0" import os import pathlib # Clone the tensorflow models repository if it doesn't already exist if "models" in pathlib.Path.cwd().parts: while "models" in pathlib.Path.cwd().parts: os.chdir('..') elif not pathlib.Path('models').exists(): !git clone --depth 1 https://github.com/tensorflow/models # Install the Object Detection API %%bash cd models/research/ protoc object_detection/protos/*.proto --python_out=. cp object_detection/packages/tf2/setup.py . python -m pip install . import io import os import scipy.misc import numpy as np import six import time from six import BytesIO import matplotlib import matplotlib.pyplot as plt from PIL import Image, ImageDraw, ImageFont import tensorflow as tf from object_detection.utils import visualization_utils as viz_utils %matplotlib inline def load_image_into_numpy_array(path): """Load an image from file into a numpy array. Puts image into numpy array to feed into tensorflow graph. Note that by convention we put it into a numpy array with shape (height, width, channels), where channels=3 for RGB. Args: path: a file path (this can be local or on colossus) Returns: uint8 numpy array with shape (img_height, img_width, 3) """ img_data = tf.io.gfile.GFile(path, 'rb').read() image = Image.open(BytesIO(img_data)) (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) # Load the COCO Label Map category_index = { 1: {'id': 1, 'name': 'person'}, 2: {'id': 2, 'name': 'bicycle'}, 3: {'id': 3, 'name': 'car'}, 4: {'id': 4, 'name': 'motorcycle'}, 5: {'id': 5, 'name': 'airplane'}, 6: {'id': 6, 'name': 'bus'}, 7: {'id': 7, 'name': 'train'}, 8: {'id': 8, 'name': 'truck'}, 9: {'id': 9, 'name': 'boat'}, 10: {'id': 10, 'name': 'traffic light'}, 11: {'id': 11, 'name': 'fire hydrant'}, 13: {'id': 13, 'name': 'stop sign'}, 14: {'id': 14, 'name': 'parking meter'}, 15: {'id': 15, 'name': 'bench'}, 16: {'id': 16, 'name': 'bird'}, 17: {'id': 17, 'name': 'cat'}, 18: {'id': 18, 'name': 'dog'}, 19: {'id': 19, 'name': 'horse'}, 20: {'id': 20, 'name': 'sheep'}, 21: {'id': 21, 'name': 'cow'}, 22: {'id': 22, 'name': 'elephant'}, 23: {'id': 23, 'name': 'bear'}, 24: {'id': 24, 'name': 'zebra'}, 25: {'id': 25, 'name': 'giraffe'}, 27: {'id': 27, 'name': 'backpack'}, 28: {'id': 28, 'name': 'umbrella'}, 31: {'id': 31, 'name': 'handbag'}, 32: {'id': 32, 'name': 'tie'}, 33: {'id': 33, 'name': 'suitcase'}, 34: {'id': 34, 'name': 'frisbee'}, 35: {'id': 35, 'name': 'skis'}, 36: {'id': 36, 'name': 'snowboard'}, 37: {'id': 37, 'name': 'sports ball'}, 38: {'id': 38, 'name': 'kite'}, 39: {'id': 39, 'name': 'baseball bat'}, 40: {'id': 40, 'name': 'baseball glove'}, 41: {'id': 41, 'name': 'skateboard'}, 42: {'id': 42, 'name': 'surfboard'}, 43: {'id': 43, 'name': 'tennis racket'}, 44: {'id': 44, 'name': 'bottle'}, 46: {'id': 46, 'name': 'wine glass'}, 47: {'id': 47, 'name': 'cup'}, 48: {'id': 48, 'name': 'fork'}, 49: {'id': 49, 'name': 'knife'}, 50: {'id': 50, 'name': 'spoon'}, 51: {'id': 51, 'name': 'bowl'}, 52: {'id': 52, 'name': 'banana'}, 53: {'id': 53, 'name': 'apple'}, 54: {'id': 54, 'name': 'sandwich'}, 55: {'id': 55, 'name': 'orange'}, 56: {'id': 56, 'name': 'broccoli'}, 57: {'id': 57, 'name': 'carrot'}, 58: {'id': 58, 'name': 'hot dog'}, 59: {'id': 59, 'name': 'pizza'}, 60: {'id': 60, 'name': 'donut'}, 61: {'id': 61, 'name': 'cake'}, 62: {'id': 62, 'name': 'chair'}, 63: {'id': 63, 'name': 'couch'}, 64: {'id': 64, 'name': 'potted plant'}, 65: {'id': 65, 'name': 'bed'}, 67: {'id': 67, 'name': 'dining table'}, 70: {'id': 70, 'name': 'toilet'}, 72: {'id': 72, 'name': 'tv'}, 73: {'id': 73, 'name': 'laptop'}, 74: {'id': 74, 'name': 'mouse'}, 75: {'id': 75, 'name': 'remote'}, 76: {'id': 76, 'name': 'keyboard'}, 77: {'id': 77, 'name': 'cell phone'}, 78: {'id': 78, 'name': 'microwave'}, 79: {'id': 79, 'name': 'oven'}, 80: {'id': 80, 'name': 'toaster'}, 81: {'id': 81, 'name': 'sink'}, 82: {'id': 82, 'name': 'refrigerator'}, 84: {'id': 84, 'name': 'book'}, 85: {'id': 85, 'name': 'clock'}, 86: {'id': 86, 'name': 'vase'}, 87: {'id': 87, 'name': 'scissors'}, 88: {'id': 88, 'name': 'teddy bear'}, 89: {'id': 89, 'name': 'hair drier'}, 90: {'id': 90, 'name': 'toothbrush'}, } # Download the saved model and put it into models/research/object_detection/test_data/ !wget http://download.tensorflow.org/models/object_detection/tf2/20200711/efficientdet_d5_coco17_tpu-32.tar.gz !tar -xf efficientdet_d5_coco17_tpu-32.tar.gz !mv efficientdet_d5_coco17_tpu-32/ models/research/object_detection/test_data/ start_time = time.time() tf.keras.backend.clear_session() detect_fn = tf.saved_model.load('models/research/object_detection/test_data/efficientdet_d5_coco17_tpu-32/saved_model/') end_time = time.time() elapsed_time = end_time - start_time print('Elapsed time: ' + str(elapsed_time) + 's') import time image_dir = 'models/research/object_detection/test_images' elapsed = [] for i in range(2): image_path = os.path.join(image_dir, 'image' + str(i + 1) + '.jpg') image_np = load_image_into_numpy_array(image_path) input_tensor = np.expand_dims(image_np, 0) start_time = time.time() detections = detect_fn(input_tensor) end_time = time.time() elapsed.append(end_time - start_time) plt.rcParams['figure.figsize'] = [42, 21] label_id_offset = 1 image_np_with_detections = image_np.copy() viz_utils.visualize_boxes_and_labels_on_image_array( image_np_with_detections, detections['detection_boxes'][0].numpy(), detections['detection_classes'][0].numpy().astype(np.int32), detections['detection_scores'][0].numpy(), category_index, use_normalized_coordinates=True, max_boxes_to_draw=200, min_score_thresh=.40, agnostic_mode=False) plt.subplot(2, 1, i+1) plt.imshow(image_np_with_detections) mean_elapsed = sum(elapsed) / float(len(elapsed)) print('Elapsed time: ' + str(mean_elapsed) + ' second per image') ```
github_jupyter
# Multi-Agent Reinforcement Learning ### Where do we see it? Multi agent reinforcement learning is a type of Reinforcement Learning which involves more than one agent interacting with an environment. There are many examples of Multi Agent systems around us Be it early morning with all the cars going to work <img src="image/cars_on_the_street.png" alt="Cars on the Street" align="left"/> Or your soccer players on the field <img src="image/soccer.png" alt="Soccer Players" align="left"/> Or Bees working inside a honeycomb <img src="image/bees.png" alt="Honeybees in a honeycomb" align="left" /> Let's consider a case where an autonomous car is driving you to office. And its goal is to safely take you to the office in time <img src="image/autonomous_car.png" alt="Autonomous Cars" align="left" /> Anytime it has to accelerate, break or change lanes.... <img src="image/car_learning.png" alt="car_learning" align="left"/> It does so by considering the other cars in it vicinity <img src="image/car_overtaking.png" alt="Car Overtaking" align="left"/> Other cars are trying to do the same as they get more and more driving experience If you contrast this to a scenario where the car is alone on the street and driving around, it can go as fast as possible so the task of driving becomes relatively simpler and so the agent does not ever learn the complications that come with driving. This is nothing but a Multi Agent system where multiple agents interact with one another. In a multi-agent system each agent may or may not know about the other agents that are present in the system ## Markov Game Framework Consider a single RL Drone Agent and its task is to pickup a package <img src="image/drone1.png" align="left"/> It has a set of possible actions that it can take. It can go Right <img src="image/drone2.png" align="left"/> Left <img src="image/drone3.png"/> Up <img src="image/drone4.png"/> Down <img src="image/drone5.png"/> And Grasping <img src="image/drone6.png"/> And we decide that we give it a reward of +50 if it picks up the package <img src="image/drone7.png"/> And -1 if it drops the package <img src="image/drone8.png"/> Now the difference in Multi Agent RL is that we have another agent, in this case another drone in our environment. And now both the drones will collectively try to grasp the package. They're both trying to observe the package from their own perspective. <img src="image/drone9.png"/> They both have their own policies that returned an action for their observations <img src="image/drone10.png"/> Both also have their own set of actions <img src="image/drone12.png" /> The main thing about Multi-Agent RL is that they have a joint set of actions that they can take to interact in their environment. Both the left drone and the right drone must begin the action. <img src="image/drone13.png"/> For example, the pair DL is when the left drone moves down and the right drone moves to the left <img src="image/drone15.png"/> This example illustrates the Markov Game Framework A Markov Game is a tuple written as this <img src="image/drone16.png"/> n: Number of Agents S: Set of states in the environment Ai: A is the set of actions in the environment by agent i Oi: O is the set of observations in the environment by agent i Ri: R is the set of rewards for the actions taken in the environment by agent i $\pi$i: $\pi$ is the set of policy of agent i T: T is Transition Function, given the current state and joint action it gives a probability distribution over the set of possible states <img src="image/markov_eqn.png"/> Even here, the State transitions are Markovian, that is the MDP only depends upon the current state and the action take in this state. However, when it comes to the Transition function, it depends upon the Joint action taken in the current state ## Multi Agent Environment Types: * **Cooperative Environments**: An environment where multiple agents have to accomplish a goal by working together is called a cooperative environment. In this kind of environment, when both agents are able to successfully complete a task together, they are rewarded and so both of them learn to cooperate in the environment. A good example for this kind of environment is the example we covered above where two drones have to work together to pick a package and deliver it. * **Competitive Environments**: An environment where multiple agents have to compete with each other to reach their goal is called a Competitive Environment. In this kind of environment, each agent is tasked with a similar goal which can only be achieved by one of them and the agent that is able to achieve the goal is rewarded. This reinforces the idea of competing with the other agent in the environment. A good example for this kind of environment is one you will see an implementation of very soon, which is the Tennis Environment in which we work with two agents present on opposite sides of the net and both are tasked with not letting the ball drop on their side. And if the ball drops on the opponent's side then they are rewarded. * **Mixed Environments**: An environment where multiple agents have to both cooperate and compete with each other by interacting with their environment is called a Mixed Environment. In this kind of environment, each agent is tasked with achieving a goal for which they not only have to cooperate with other agents in the environment but also compete with them. Here depending upon which has more preference, you will assign higher rewards for the kind of action you prefer your agent to fulfill. Say, giving higher positive reward to cooperation and lower positive reward to competition. In this kind of setting the agents would cooperate more and compete less, but they will do both the actions. A good example for Mixed Environment is a Traffic Control setting, where each agent is tasked with reaching their goal as fast as possible. For a Traffic Control environment, each agent has to adhere to the traffic rules and make sure that it does not crash into other agents while driving, but at the same time they have to overtake the other agents in order to reach their goal faster while driving within the speed limits. #### Copyright 2020 Sonali Vinodkumar Singh Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
github_jupyter
``` %matplotlib notebook from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt ``` # Reflect Tables into SQLAlchemy ORM ``` # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func engine = create_engine("sqlite:///hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # We can view all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine) ``` # Exploratory Climate Analysis ``` # Design a query to retrieve the last 12 months of precipitation data and plot the results # Calculate the date 1 year ago from today prev_year = dt.date.today() - dt.timedelta(days=365) # Perform a query to retrieve the data and precipitation scores results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all() # Save the query results as a Pandas DataFrame and set the index to the date column df = pd.DataFrame(results, columns=['date', 'precipitation']) df.set_index(df['date'], inplace=True) # Sort the dataframe by date df.sort_values("date") # Use Pandas Plotting with Matplotlib to plot the data df.plot(x_compat=True) # Rotate the xticks for the dates plt.xticks(rotation='45') plt.tight_layout() # Use Pandas to calcualte the summary statistics for the precipitation data df.describe() # How many stations are available in this dataset? session.query(func.count(Station.station)).all() # What are the most active stations? # List the stations and the counts in descending order. session.query(Measurement.station, func.count(Measurement.station)).\ group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all() # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature most active station? session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\ filter(Measurement.station == 'USC00519281').all() # Choose the station with the highest number of temperature observations. # Query the last 12 months of temperature observation data for this station and plot the results as a histogram import datetime as dt from pandas.plotting import table prev_year = dt.date.today() - dt.timedelta(days=365) results = session.query(Measurement.tobs).\ filter(Measurement.station == 'USC00519281').\ filter(Measurement.date >= prev_year).all() df = pd.DataFrame(results, columns=['tobs']) df.plot.hist(bins=12) plt.tight_layout() # Write a function called `calc_temps` that will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates def calc_temps(start_date, end_date): """TMIN, TAVG, and TMAX for a list of dates. Args: start_date (string): A date string in the format %Y-%m-%d end_date (string): A date string in the format %Y-%m-%d Returns: TMIN, TAVE, and TMAX """ return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() print(calc_temps('2012-02-28', '2012-03-05')) # Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax # for your trip using the previous year's data for those same dates. import datetime as dt prev_year_start = dt.date(2018, 1, 1) - dt.timedelta(days=365) prev_year_end = dt.date(2018, 1, 7) - dt.timedelta(days=365) tmin, tavg, tmax = calc_temps(prev_year_start.strftime("%Y-%m-%d"), prev_year_end.strftime("%Y-%m-%d"))[0] print(tmin, tavg, tmax) # Plot the results from your previous query as a bar chart. # Use "Trip Avg Temp" as your Title # Use the average temperature for the y value # Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr) fig, ax = plt.subplots(figsize=plt.figaspect(2.)) xpos = 1 yerr = tmax-tmin bar = ax.bar(xpos, tmax, yerr=yerr, alpha=0.5, color='coral', align="center") ax.set(xticks=range(xpos), xticklabels="a", title="Trip Avg Temp", ylabel="Temp (F)") ax.margins(.2, .2) # fig.autofmt_xdate() fig.tight_layout() fig.show() # Calculate the rainfall per weather station for your trip dates using the previous year's matching dates. # Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation start_date = '2012-01-01' end_date = '2012-01-07' sel = [Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation, func.sum(Measurement.prcp)] results = session.query(*sel).\ filter(Measurement.station == Station.station).\ filter(Measurement.date >= start_date).\ filter(Measurement.date <= end_date).\ group_by(Station.name).order_by(func.sum(Measurement.prcp).desc()).all() print(results) ``` ## Optional Challenge Assignment ``` # Create a query that will calculate the daily normals # (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day) def daily_normals(date): """Daily Normals. Args: date (str): A date string in the format '%m-%d' Returns: A list of tuples containing the daily normals, tmin, tavg, and tmax """ sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all() daily_normals("01-01") # calculate the daily normals for your trip # push each tuple of calculations into a list called `normals` # Set the start and end date of the trip trip_start = '2018-01-01' trip_end = '2018-01-07' # Use the start and end date to create a range of dates trip_dates = pd.date_range(trip_start, trip_end, freq='D') # Stip off the year and save a list of %m-%d strings trip_month_day = trip_dates.strftime('%m-%d') # Loop through the list of %m-%d strings and calculate the normals for each date normals = [] for date in trip_month_day: normals.append(*daily_normals(date)) normals # Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index df = pd.DataFrame(normals, columns=['tmin', 'tavg', 'tmax']) df['date'] = trip_dates df.set_index(['date'],inplace=True) df.head() # Plot the daily normals as an area plot with `stacked=False` df.plot(kind='area', stacked=False, x_compat=True, alpha=.2) plt.tight_layout() ```
github_jupyter
<font size = "5"> **Chapter 4: [Spectroscopy](CH4-Spectroscopy.ipynb)** </font> <hr style="height:1px;border-top:4px solid #FF8200" /> # Analysis of Core-Loss Spectra <font size = "5"> **This notebook does not work in Google Colab** </font> [Download](https://raw.githubusercontent.com/gduscher/MSE672-Introduction-to-TEM/main/Spectroscopy/CH4_09-Analyse_Core_Loss.ipynb) part of <font size = "5"> **[MSE672: Introduction to Transmission Electron Microscopy](../_MSE672_Intro_TEM.ipynb)**</font> by Gerd Duscher, Spring 2021 Microscopy Facilities<br> Joint Institute of Advanced Materials<br> Materials Science & Engineering<br> The University of Tennessee, Knoxville Background and methods to analysis and quantification of data acquired with transmission electron microscopes. ## Content Quantitative determination of chemical composition from a core-loss EELS spectrum Please cite: [M. Tian et al. *Measuring the areal density of nanomaterials by electron energy-loss spectroscopy* Ultramicroscopy Volume 196, 2019, pages 154-160](https://doi.org/10.1016/j.ultramic.2018.10.009) as a reference of this quantification method. ## Load important packages ### Check Installed Packages ``` import sys from pkg_resources import get_distribution, DistributionNotFound def test_package(package_name): """Test if package exists and returns version or -1""" try: version = (get_distribution(package_name).version) except (DistributionNotFound, ImportError) as err: version = '-1' return version # pyTEMlib setup ------------------ if test_package('sidpy') < '0.0.5': print('installing sidpy') !{sys.executable} -m pip install --upgrade sidpy -q if test_package('pyTEMlib') < '0.2021.4.20': print('installing pyTEMlib') !{sys.executable} -m pip install --upgrade pyTEMlib -q # ------------------------------ print('done') ``` ### Import all relevant libraries Please note that the EELS_tools package from pyTEMlib is essential. ``` %pylab --no-import-all notebook %gui qt # Import libraries from pyTEMlib import pyTEMlib import pyTEMlib.file_tools as ft # File input/ output library import pyTEMlib.image_tools as it import pyTEMlib.eels_tools as eels # EELS methods import pyTEMlib.interactive_eels as ieels # Dialogs for EELS input and quantification # For archiving reasons it is a good idea to print the version numbers out at this point print('pyTEM version: ',pyTEMlib.__version__) __notebook__ = 'analyse_core_loss' __notebook_version__ = '2021_04_22' ``` ## Load and plot a spectrum As an example we load the spectrum **1EELS Acquire (high-loss).dm3** from the *example data* folder. Please see [Loading an EELS Spectrum](LoadEELS.ipynb) for details on storage and plotting. First a dialog to select a file will apear. Then the spectrum plot and ``Spectrum Info`` dialog will appear, in which we set the experimental parameters. Please use the ``Set Energy Scale`` button to change the energy scale. When pressed a new dialog and a cursor will appear in which one is able to set the energy scale based on known features in the spectrum. ``` # -----Input -------# load_example = True try: main_dataset.h5_dataset.file.close() except: pass if load_example: main_dataset = ft.open_file('../example_data/EELS_STO.dm3') else: main_dataset = ft.open_file() current_channel = main_dataset.h5_dataset.parent if 'experiment' not in main_dataset.metadata: main_dataset.metadata['experiment']= eels.read_dm3_eels_info(main_dataset.original_metadata) eels.set_previous_quantification(main_dataset) # US 200 does not set acceleration voltage correctly. # comment out next line for other microscopes # current_dataset.metadata['experiment']['acceleration_voltage'] = 200000 info = ieels.InfoDialog(main_dataset) ``` ## Chemical Composition The fit of the cross-section and background to the spectrum results in the chemical composition. If the calibration is correct this composition is given as areal density in atoms/nm$^2$ ### Fit of Data A dialog window will open, enter the elements first (0 will open a periodic table) and press ``Fit Composition`` button (bottom right). Adjust parameters as needed and check fit by pressing the ``Fit Composition`` button again. Select the ``Region`` checkbox to see which parts of the spectrum you choose to fit. Changing the multiplier value will make a simulation of your spectrum. The ``InfoDialog``, if open, still works to change experimental parameters and the energy scale. ``` # current_dataset.metadata['edges'] = {'0': {}, 'model': {}} composition = ieels.CompositionDialog(main_dataset) ``` ### Output of Results ``` edges = main_dataset.metadata['edges'] element = [] areal_density = [] for key, edge in edges.items(): if key.isdigit(): element.append(edge['element']) areal_density.append(edge['areal_density']) print('Relative chemical composition of ', main_dataset.title) for i in range(len(element)): print(f'{element[i]}: {areal_density[i]/np.sum(areal_density)*100:.1f} %') saved_edges_metadata = edges ``` ### Log Data We write all the data to the hdf5 file associated with our dataset. In our case that is only the ``metadata``, in which we stored the ``experimental parameters`` and the ``fitting parameters and result``. ``` current_group = main_dataset.h5_dataset.parent.parent if 'Log_000' in current_group: del current_group['Log_000'] log_group = current_group.create_group('Log_000') log_group['analysis'] = 'EELS_quantification' log_group['EELS_quantification'] = '' flat_dict = ft.flatten_dict(main_dataset.metadata) if 'peak_fit-peak_out_list' in flat_dict: del flat_dict['peak_fit-peak_out_list'] for key, item in flat_dict.items(): if not key == 'peak_fit-peak_out_list': log_group.attrs[key]= item current_group.file.flush() ft.h5_tree(main_dataset.h5_dataset.file) ``` ## ELNES The electron energy-loss near edge structure is determined by fititng the spectrum after quantification model subtraction. First smooth the spectrum (2 iterations are ususally sufficient) and then find the number of peaks you want (Can be repeated as oftern as one wants). ``` peak_dialog = ieels.PeakFitDialog(main_dataset) ``` ### Output ``` areas = [] for p, peak in peak_dialog.peaks['peaks'].items(): area = np.sqrt(2* np.pi)* peak['amplitude'] * np.abs(peak['width'] / np.sqrt(2 *np.log(2))) areas.append(area) if 'associated_edge' not in peak: peak['associated_edge']= '' print(f"peak {p}: position: {peak['position']:7.1f}, area: {area:12.3f} associated edge: {peak['associated_edge']}") #print(f'\n M4/M5 peak 2 to peak 1 ratio: {(areas[1])/areas[0]:.2f}') ``` ### Log Data ``` current_group = main_dataset.h5_dataset.parent.parent if 'Log_001' in current_group: del current_group['Log_001'] log_group = current_group.create_group('Log_001') log_group['analysis'] = 'ELNES_fit' log_group['ELNES_fit'] = '' metadata = ft.flatten_dict(main_dataset.metadata) if 'peak_fit-peak_out_list' in flat_dict: del flat_dict['peak_fit-peak_out_list'] for key, item in metadata.items(): if not key == 'peak_fit-peak_out_list': log_group.attrs[key]= item current_group.file.flush() print('Logged Data of ', main_dataset.title) for key in current_group: if 'Log_' in key: if 'analysis' in current_group[key]: print(f" {key}: {current_group[key]['analysis'][()]}") ``` ## Close File File needs to be closed to be used with other notebooks ``` main_dataset.h5_dataset.file.close() ``` ## Navigation
github_jupyter
``` # Método para resolver las energías y eigenfunciones de un sistema cuántico numéricamente por Teoría de Pertubaciones # Modelado Molecular 2 # By: José Manuel Casillas Martín 22-oct-2017 import numpy as np from sympy import * from sympy.physics.qho_1d import E_n, psi_n from sympy.physics.hydrogen import E_nl, R_nl from sympy import init_printing; init_printing(use_latex = 'mathjax') from scipy import integrate from scipy.constants import hbar, m_e, m_p, e from mpmath import spherharm from numpy import inf, array import numpy as np import matplotlib.pyplot as plt import traitlets from IPython.display import display from ipywidgets import Layout, Box, Text, Dropdown, Label, IntRangeSlider, IntSlider, RadioButtons ``` <h1><center>Teoría de Perturbaciones</center></h1> Consiste en resolver un sistema perturbado(se conoce la solución al no perturbado), y donde el interés es conocer la contribución de la parte perturbada $H'$ al nuevo sistema total. $$ H = H^{0}+H'$$ La resolución adecuada del problema, depende en gran parte, de una correcta elección de $H'$. ``` form_item_layout = Layout(display='flex',flex_flow='row',justify_content='space-between') PType=Dropdown(options=['Particle in a one-dimensional box', 'Harmonic oscilator', 'Hydrogen atom (Helium correction)']) Pert=Text() Rang=IntRangeSlider(min=0, max=20, step=1, disabled=False, continuous_update=False, orientation='horizontal',\ readout=True, readout_format='d') M=Text() Correc=Dropdown(options=['1', '2']) hbarra=Dropdown(options=[1, 1.0545718e-34]) form_items = [ Box([Label(value='Problem'),PType], layout=form_item_layout), Box([Label(value='Perturbation'),Pert], layout=form_item_layout), Box([Label(value='Correction order'),Correc], layout=form_item_layout), Box([Label(value='n Range'),Rang], layout=form_item_layout), Box([Label(value='Mass'),M], layout=form_item_layout), Box([Label(value='Hbar'),hbarra], layout=form_item_layout),] form = Box(form_items, layout=Layout(display='flex',flex_flow='column',border='solid 2px',align_items='stretch',width='40%')) form ``` En esta caja interactiva llena los datos del problema que deseas resolver. # Nota 1: Es recomendable usar unidades atómicas de Hartree para eficientar los cálculos. 1 u.a. (energía)= 27.211eV. # Nota 2: Para la partícula en una caja unidimensional es recomendable que n sea mayor o igual a 1. ## Nota 3: Para la correción a la energía del átomo de Helio sólo es necesario seleccionar el problema, automáticamente se calcula la correción a primer orden y no se corrigen las funciones de onda. ``` Problem=PType.value form_item_layout = Layout(display='flex',flex_flow='row',justify_content='space-between') L=Text() W=Text() atomic_number=RadioButtons(options=['1 (Show Hydrogen energies)','2 (Correct Helium first energy)'],disabled=False) if Problem=='Particle in a one-dimensional box': form_items = [Box([Label(value='Large of box'),L], layout=form_item_layout)] if Problem=='Harmonic oscilator': form_items = [Box([Label(value='Angular Fr'),W], layout=form_item_layout)] if Problem=='Hydrogen atom (Helium correction)': form_items = [Box([Label(value='Atomic number'),atomic_number], layout=form_item_layout)] form = Box(form_items, layout=Layout(display='flex',flex_flow='column',border='solid 2px',align_items='stretch',width='40%')) form # Variables que se utilizarán # x=variable de integracion, l=largo del pozo, m=masa del electrón, w=frecuencia angular # n=número cuántico principal, Z=Número atómico, q=número cuántico angular(l) var('x theta phi') var('r1 r2', real=True) var('l m hbar w n Z', positive=True, real=True) # Perturbación if Pert.value!='': H_p=sympify(Pert.value) h_p=eval(Pert.value) else: H_p=0 h_p=0 # Constantes h=hbarra.value a0=5.2917721067e-11 if M.value!='': mass=float(eval(M.value)) else: mass=1 # Energías y funciones que se desea corregir n_inf=min(Rang.value) n_sup=max(Rang.value) if Problem=='Particle in a one-dimensional box': if L.value=='': large=1 else: large=float(eval(L.value)) omega=0 # Energías del pozo de potencial infinito k=n*pi/l En=hbar**2*k**2/(2*m) # Funciones de onda del pozo de potencial infinito Psin=sqrt(2/l)*sin(n*pi*x/l) # Límites del pozo definido de 0 a l para sympy li_sympy=0 ls_sympy=l # Mismo limites para scipy li_scipy=0 ls_scipy=large if Problem=='Harmonic oscilator': large=0 if W.value=='': omega=1 else: omega=float(eval(W.value)) # Energías del oscilador armónico cuántico En=E_n(n,w) # Funciones de onda del oscilador armónico cuántico Psin=psi_n(n,x,m,w) # Límites del pozo definido de -oo a oo para sympy li_sympy=-oo ls_sympy=oo # Límites del pozo definido de -oo a oo para scipy li_scipy=-inf ls_scipy=inf if Problem=='Hydrogen atom (Helium correction)': if atomic_number.value=='1 (Show Hydrogen energies)': z=1 if atomic_number.value=='2 (Correct Helium first energy)': z=2 large=0 omega=0 # Energías del átomo hidrogenoide En=z*E_nl(n,z) # Funciones de onda del átomo de hidrógeno # Número cuántico l=0 q=0 # La variable l ya esta siendo utilizada para el largo de la caja por ello se sustituyo por q Psin=(R_nl(n,q,r1,z)*R_nl(n,q,r2,z)) # Límites del átomo de hidrógeno de 0 a oo para sympy li_sympy=0 ls_sympy=oo # Límites del átomo de hidrógeno de 0 a oo para scipy li_scipy=0 ls_scipy=inf ``` Para sistemas no degenerados, la corrección a la energía a primer orden se calcula como $$ E_{n}^{(1)} = \int\psi_{n}^{(0)*} H' \psi_{n}^{(0)}d\tau$$ ** Tarea 1 : Programar esta ecuación si conoces $H^{0}$ y sus soluciones. ** ``` def correcion_1st_order_Energy(E_n,Psi_n,H_p,li,ls): E1_n=Integral(Psi_n*(H_p)*Psi_n,(x,li,ls)).doit() return(E_n+E1_n) # Correción de la energía a primer orden E=[] Eev=[] Ec1=[] if Problem=='Particle in a one-dimensional box' or Problem=='Harmonic oscilator': for i in range(n_inf,n_sup+1): E.append(En.subs({n:i})) Eev.append(E[i-n_inf].subs({m:mass, l:large, hbar:h}).evalf()) Ec1.append(correcion_1st_order_Energy(En.subs({n:i}),Psin.subs({n:i}),H_p,li_sympy,ls_sympy)) if Problem=='Hydrogen atom (Helium correction)': for i in range(n_inf,n_sup+1): E.append(En.subs({n:i})) Eev.append(E[i-n_inf]) if z==2: integral_1=Integral(Integral((16*z**6*r1*r2**2*exp(-2*z*(r1+r2))),(r2,0,r1)),(r1,0,oo)).doit() integral_2=Integral(Integral((16*z**6*r1**2*r2*exp(-2*z*(r1+r2))),(r2,r1,oo)),(r1,0,oo)).doit() integral_total=(integral_1+integral_2) Ec1.append(E[0]+integral_total) ``` Y la corrección a la función de onda, también a primer orden, se obtiene como: $$ \psi_{n}^{(1)} = \sum_{m\neq n} \frac{\langle\psi_{m}^{(0)} | H' | \psi_{n}^{(0)} \rangle}{E_{n}^{(0)} - E_{m}^{(0)}} \psi_{m}^{(0)}$$ **Tarea 2: Programar esta ecuación si conoces $H^{0}$ y sus soluciones. ** ``` # Correción de las funciones a primer orden if Pert.value!='': if Problem=='Particle in a one-dimensional box' or Problem=='Harmonic oscilator': Psi_c=[] integrals=np.zeros((n_sup+1,n_sup+1)) for i in range(n_inf,n_sup+1): a=0 for j in range(n_inf,n_sup+1): if i!=j: integ= lambda x: eval(str(Psin.subs({n:j})*(h_p)*Psin.subs({n:i}))).subs({m:mass,l:large,w:omega,hbar:h}) integrals[i,j]=integrate.quad(integ,li_scipy,ls_scipy)[0] cte=integrals[i,j]/(En.subs({n:i,m:mass,l:large})-En.subs({n:j,m:mass,l:large})).evalf() a=a+cte*Psin.subs({n:j}) Psi_c.append(Psin.subs({n:i})+a) ``` **Tarea 3: Investigue las soluciones a segundo orden y también programe las soluciones. ** Y la corrección a la energía a segundo orden, se obtiene como: $$ E_{n}^{(2)} = \sum_{m\neq n} \frac{|\langle\psi_{m}^{(0)} | H' | \psi_{n}^{(0)} \rangle|^{2}}{E_{n}^{(0)} - E_{m}^{(0)}} $$ ``` # Correción a la energía a segundo orden if Pert.value!='': if Problem=='Particle in a one-dimensional box' or Problem=='Harmonic oscilator': if Correc.value=='2': Ec2=[] for i in range(n_inf,n_sup+1): a=0 for j in range(n_inf,n_sup+1): if i!=j: cte=((integrals[i,j])**2)/(En.subs({n:i,m:mass,l:large,hbar:h})-En.subs({n:j,m:mass,l:large,hbar:h})).evalf() a=a+cte Ec2.append(Ec1[i-n_inf]+a) ``` **A continuación se muestran algunos de los resultados al problema resuelto** Las energías sin perturbación son: ``` E ``` La correción a primer orden de las energías son: ``` Ec1 ``` Si seleccionaste en los parámetros iniciales una correción a segundo orden entonces... Las correciones a la energía a segundo orden son: ``` Ec2 ``` Ahora vamos con la función de onda $(\psi)$ ``` form_item_layout = Layout( display='flex', flex_flow='row', justify_content='space-between') Graph=IntSlider(min=n_inf, max=n_sup, step=1, disabled=False, continuous_update=False, orientation='horizontal',\ readout=True, readout_format='d') form_items = [ Box([Label(value='What function do you want to see?'), Graph], layout=form_item_layout)] form = Box(form_items, layout=Layout( display='flex', flex_flow='column', border='solid 2px', align_items='stretch', width='40%')) form ``` La función de onda original es: ``` Psin.subs({n:Graph.value}) ``` La correción a primer orden a la función de onda (utilizando todas las funciones en el rango seleccionado) es: ``` Psi_c[Graph.value-n_inf] ``` Vamos a graficarlas para verlas mejor... La función de onda original es: ``` if Problem=='Particle in a one-dimensional box': plot(eval(str(Psin)).subs({n:Graph.value,m:mass,l:large,w:omega,hbar:h}),xlim=(li_scipy,ls_scipy),\ title='$\psi_{%d}$'%Graph.value) if Problem=='Harmonic oscilator': plot(eval(str(Psin)).subs({n:Graph.value,m:mass,l:large,w:omega,hbar:h}),xlim=(-10*h/(mass*omega),10*h/(mass*omega)),\ title='$\psi_{%d}$'%Graph.value) if Problem=='Hydrogen atom (Helium correction)': print('Densidad de probabilidad para un electrón') plot(eval(str((4*pi*x**2*R_nl(Graph.value,q,x,z)**2))),xlim=(0,10),ylim=(0,20/Graph.value), title='$\psi_{%ds}$'%Graph.value) print('Tome en cuenta que debido a la dificultad para seleccionar los límites de la gráfica se muestran bien los primeros\n\ 3 estados. A partir de ahí visualizar la gráfica se complica.') ``` La corrección a la función de onda es: ``` if Problem=='Particle in a one-dimensional box': if Pert.value!='': plot(eval(str(Psi_c[Graph.value-n_inf])).subs({n:Graph.value,m:mass,l:large,w:omega,hbar:h}),\ xlim=(li_scipy,ls_scipy),title='$\psi_{%d}$'%Graph.value) if Pert.value=='': print('No se ingreso ninguna perturbación') if Problem=='Harmonic oscilator': if Pert.value!='': plot(eval(str(Psi_c[Graph.value-n_inf])).subs({n:Graph.value,m:mass,l:large,w:omega,hbar:h}),\ xlim=(-10*h/(mass*omega),10*h/(mass*omega)),title='$\psi_{%d}$' %Graph.value) if Pert.value=='': print('No se ingreso ninguna perturbación') if Problem=='Hydrogen atom (Helium correction)': print('Este programa no corrige las fucniones de un átomo hidrogenoide') ``` **Tarea 4. Resolver el átomo de helio aplicando los programas anteriores.** Para el cálculo a las correciones del átomo de Helio se tomó en cuenta lo siguiente... La función de onda del átomo de Helio puede ser representada como: $$ \psi_{nlm} = \psi(r1)_{nlm} \psi(r2)_{nlm}$$ Donde, para el estado fundamental: $$ \psi(r_{1}.r_{2})_{100} = \frac{Z^{3}}{\pi a_{0}^{3}} e^{\frac{-Z}{a_{0}}(r_{1}+r_{2})}$$ Y la perturbación sería el término de repulsión entre los dos electrones, es decir: $$ H'= \frac{e^{2}}{r_{12}}=\frac{e^{2}}{|r_{1}-r_{2}|}$$ Finalmente la correción a primer orden de la energía sería: $$ E^{(1)}= \langle\psi_{n}^{(0)} | H' | \psi_{n}^{(0)} \rangle =\frac{Z^{6}e^{2}}{\pi^{2} a_{0}^{6}} \int_{0}^{2\pi}\int_{0}^{2\pi}\int_{0}^{\pi}\int_{0}^{\pi}\int_{0}^{\infty}\int_{0}^{\infty} \frac{e^{\frac{-2Z}{a_{0}}(r_{1}+r_{2})}}{r_{12}} r_{1}^{2}r_{2}^{2}sen{\theta_{1}}sen{\theta_{2}} dr_{2} dr_{1} d\theta_{2} d\theta_{1} d\phi_{2} d\phi_{1}$$ Se utiliza una expansión del termino de repulsión con los armónicos esféricos y se integra la parte angular. Una vez hecho eso, la integral queda expresada de la siguiente manera: $$ E^{(1)}= \frac{16Z^{6}e^{2}}{a_{0}^{6}} \left[\int_{0}^{\infty} r_{1}^{2} e^{\frac{-2Z}{a_{0}}r_{1}} \left(\int_{0}^{r_{1}} \frac{r_{2}^{2}}{r_{1}} e^{\frac{-2Z}{a_{0}}r_{2}} dr_{2}+\int_{r_{1}}^{\infty}r_{2} e^{\frac{-2Z}{a_{0}}r_{2}}dr_{2}\right) dr_{1} \right]$$ Esta fue la integral que se programó para hacer la correción a la energía del Helio **Tarea 5: Método variacional-perturbativo. ** Este método nos permite estimar de forma precisa $E^{(2)}$ y correcciones perturbativas de la energía de órdenes más elevados para el estado fundamental del sistema, sin evaluar sumas infinitas. Ver ecuación 9.38 del libro. $$ \langle u | H^{0} - E_{g}^{(0)} | u \rangle + \langle u | H' - E_{g}^{(1)} | \psi_{g}^{(0)} \rangle +\langle\psi_{g}^{(0)} | H' - E_{g}^{(1)} | u \rangle \geq E_{g}^{(2)} $$ Donde: u, es cualquier función que se comporte bien y satisfaga condiciones de frontera. Por lo general, u tiene un parámetro variacional que permite minimizar el lado izquierdo de la ecuación para estimar $E_{g}^{(2)} $. La función u resulta ser una aproximación a $\psi_{g}^{(1)}$, por lo que se puede utilizar está misma función para estimar $E_{g}^{(3)}$ y seguir haciendo iteraciones para hacer correciones de orden superior tanto a la energía como a la función de onda. Es necesario meter parámetros de optimización a las funciones de prueba para que tenga sentido intentar programar esto. Esto nos limita a usar Sympy para resolver la integral simbólicamente. Entonces, estamos limitados a las capacidades de esta librería para resolver las integrales y a la capacidad de nuestro procesador. A continuación se muestra un código que, aprovechando los datos introducidos anteriormente, encuentra por medio del método variacional-perturbativo la correción a segundo orden de la energía y a primer orden de la función de onda. ``` form_item_layout = Layout(display='flex',flex_flow='row',justify_content='space-between') F_prueba=Text() Ran_n=IntSlider(min=n_inf, max=n_sup, step=1, disabled=False, continuous_update=False, orientation='horizontal',\ readout=True, readout_format='d') Correcc=RadioButtons(options=[2],disabled=False) form_items = [ Box([Label(value='Test function'),F_prueba], layout=form_item_layout), Box([Label(value='Correction order'),Correcc], layout=form_item_layout), Box([Label(value='Function to correct'),Ran_n], layout=form_item_layout),] form = Box(form_items, layout=Layout(display='flex',flex_flow='column',border='solid 2px',align_items='stretch',width='40%')) form ``` Para la función de prueba se espera que el usuario introduzca una función que sea cuadrado-integrable y que sea compatible en las fronteras con el tipo de problema que se esta solucionando. Además, puede intruducir una constante de optimización (utilice "c") ``` # Variables que se utilizarán # c y d=constantes de optimización var('c')#, real=True) u=eval(F_prueba.value) order=Correcc.value n_n=Ran_n.value if Problem=='Particle in a one-dimensional box': V=0 if Problem=='Harmonic oscilator': V=(m/2)*w**2*x**2 if Problem=='Particle in a one-dimensional box' or Problem=='Harmonic oscilator': integrando_a=(u)*((-hbar**2)/(2*m)*diff(u,x,2))+(u)*V*(u)-(u)*En.subs({n:n_n})*(u) integrando_b=(u)*h_p*(Psin.subs({n:n_n}))-(u)*(Ec1[n_n-n_inf]-En.subs({n:n_n}))*(Psin.subs({n:n_n})) integral_a=Integral(eval(str(integrando_a)),(x,li_sympy,ls_sympy)).doit() integral_b=Integral(eval(str(integrando_b)),(x,li_sympy,ls_sympy)).doit() integral_T=integral_a+2*integral_b f_opt=diff(integral_T,c,1) c2=solve(f_opt,c) E_c2=Ec1[n_n-n_inf]+integral_T.subs({c:c2[0]}) Psi_c1=(Psin.subs({n:n_n})+u.subs({c:c2[0]})) ``` Se considero que para fines de esta tarea que: $$\langle u | H' - E_{g}^{(1)} | \psi_{g}^{(0)} \rangle =\langle\psi_{g}^{(0)} | H' - E_{g}^{(1)} | u \rangle$$ Lo cual se cumple cuando $H'$ es una función sin operadores diferenciales, y además, u y $\psi_{g}^{(0)}$ son funciones reales. Las correciones a la energía son: ``` E[n_n-n_inf] Ec1[n_n-n_inf] E_c2 ``` Las correción a la función de onda: ``` Psin.subs({n:n_n}) Psi_c1 ``` Veamos ahora las gráficas de estas funciones... La función de onda original es: ``` if Problem=='Particle in a one-dimensional box': plot(eval(str(Psin)).subs({n:n_n,m:mass,l:large,w:omega,hbar:h}),xlim=(li_scipy,ls_scipy),\ title='$\psi_{%d}$'%n_n) if Problem=='Harmonic oscilator': plot(eval(str(Psin)).subs({n:n_n,m:mass,l:large,w:omega,hbar:h}),xlim=(-10*h/(mass*omega),10*h/(mass*omega)),\ title='$\psi_{%d}$'%n_n) ``` Su correción por medio del método variacional-perturbativo es: ``` if Problem=='Particle in a one-dimensional box': if Pert.value!='': plot(Psi_c1.subs({m:mass,l:large,w:omega,hbar:h}),\ xlim=(li_scipy,ls_scipy),ylim=(-1.5,1.5),title='$\psi_{%d}$'%n_n) if Pert.value=='': print('No se ingreso ninguna perturbación') if Problem=='Harmonic oscilator': if Pert.value!='': plot(Psi_c1.subs({m:mass,l:large,w:omega,hbar:h}),\ xlim=(-10*h/(mass*omega),10*h/(mass*omega)),title='$\psi_{%d}$' %n_n) ``` **Tarea 6. Revisar sección 9.7. ** Inicialmente a mano, y en segunda instancia favor de intentar programar sección del problema, i.e. integral de Coulomb e integral de intercambio. Para comenzar a solucionar este problema, es necesario ser capaz de escribir el termino de repulsión que aparece en la fuerza coulombiana ($\frac {1} {r_{12} }$) en términos de los armónicos esféricos de la siguiente manera: $$ \frac {1} {r_{12} } = \sum_{l=0}^{\infty} \sum_{m=-l}^{l} \frac{4\pi}{2l+1} \frac{r_{<}^{l}}{r_{>}^{l+1}} \left[Y_{l}^{m}(\theta_{1},\phi_{1}) \right]^{*} Y_{l}^{m}(\theta_{2},\phi_{2}) $$ Se tiene el problema de que para programar esta ecuación los armónicos esféricos no están programados de forma simbólica, así como tampoco estan programados los polinomios asociados de Legendre. Se podría intentar programar las integral de intercambio y de Coulomb de forma numérica o intentar aprovechar la ortogonalidad de los armónicos esféricos, que al integrar aparecen los términos $\delta_{l,l'}$ y $\delta_{m,m'}$, por lo que la mayoría de las integrales se hacen $0$. Aún cuando se lograra programar todo esto, hace falta verificar que se seleccionaron correctamente los límites de integración radiales para que el término $\frac{r_{<}^{l}}{r_{>}^{l+1}}$ tenga sentido. ``` form_item_layout = Layout(display='flex',flex_flow='row',justify_content='space-between') T1=RadioButtons(options=['1s','2s','2p','3s'],disabled=False) T2=RadioButtons(options=['1s','2s','2p','3s'],disabled=False) form_items = [ Box([Label(value='Type electron 1'),T1], layout=form_item_layout), Box([Label(value='Type electron 2'),T2], layout=form_item_layout)] form = Box(form_items, layout=Layout(display='flex',flex_flow='column',border='solid 2px',align_items='stretch',width='40%')) form # Se puede intentar programar aprovechando la ortogonalidad de los armónicos esféricos para evitar las 4 integrales angulares # y solo integrar sobre las coordenadas radiales var('rmenor rmayor') var('lq',integrer=True, positive=True) Type1=T1.value Type2=T2.value n_a=2 if Type1=='1s': n1=1 l1=0 if Type1=='2s': n1=2 l1=0 if Type1=='2p': n1=2 l1=1 if Type1=='3s': n1=3 l1=0 if Type2=='1s': n2=1 l2=0 if Type2=='2s': n2=2 l2=0 if Type2=='2p': n2=2 l2=1 if Type2=='3s': n2=3 l2=0 r12_inv=1/(2*lq+1)*rmenor**lq/rmayor**(lq+1) if n1>n2: n_m=n1 else: n_m=n2 for n in range(1,n_m+1): a=[] for nn in range(1,n_m+1): b=[] for o in range(n_m): c=[] if o<nn: for m in range(-o,o+1): c.append(r12_inv.subs({lq:o})) b.append(c) a.append(b) # Después de esto tengo una lista que para utilizarla debo usar a[n-1][l][m+l] Psi1=R_nl(n1,l1,r1,n_a) Psi2=R_nl(n2,l2,r2,n_a) Psi_t=Psi1*Psi2 Integral_Coulumb1=0 Integral_Coulumb2=0 Integral_Intercambio1=0 Integral_Intercambio2=0 if l1==l2: for m in range(-l1,l1+1): Integral_Coulumb1=Integral_Coulumb1+Integral(Integral(r1**2*r2**2*Psi1**2*Psi2**2*\ a[n1-1][l1][l1].subs({rmayor:r1,rmenor:r2,lq:0}),(r2,0,r1)),(r1,0,oo)).doit() Integral_Coulumb2=Integral_Coulumb2+Integral(Integral(r1**2*r2**2*Psi1**2*Psi2**2*\ a[n1-1][l1][l1].subs({rmayor:r2,rmenor:r1,lq:0}),(r2,r1,oo)),(r1,0,oo)).doit() Integral_CoulumbT=Integral_Coulumb1+Integral_Coulumb2 else: Integral_CoulumbT=0 if l1!=l2: Integral_Intercambio1=Integral(Integral(r1**2*r2**2*Psi1*Psi2*Psi1.subs({r1:r2})*Psi2.subs({r2:r1})*\ a[n1-1][l1][l1].subs({rmayor:r1,rmenor:r2,lq:0}),(r2,0,r1)),(r1,0,oo)).doit() Integral_Intercambio2=Integral(Integral(r1**2*r2**2*Psi1*Psi2*Psi1.subs({r1:r2})*Psi2.subs({r2:r1})*\ a[n1-1][l1][l1].subs({rmayor:r2,rmenor:r1,lq:0}),(r2,r1,oo)),(r1,0,oo)).doit() Integral_IntercambioT=Integral_Intercambio1+Integral_Intercambio2 else: Integral_IntercambioT=0 # Programa funciona para 1s # Encontrar la manera de hacer matrices nxlxm que puedan ser llenadas con variables para posteriormente revisar # como encontrar las integrales aprovechando ortogonalidad de los armónicos esféricos. # Una opción para la matriz puedes hacer una lista y hacer reshape cada ciertas iteraciones en el ciclo for ``` Falta verificar la ortogonalidad de los armónicos esféricos y como aprovecharla para hacer únicamente las integrales radiales pero aún no esta listo.
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 François Chollet # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ``` # Film yorumları ile metin sınıflandırma <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/tr/r1/tutorials/keras/basic_text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Google Colab’da Çalıştır</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/tr/r1/tutorials/keras/basic_text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />GitHub'da Kaynağı Görüntüle</a> </td> </table> Note: Bu dökümanlar TensorFlow gönüllü kullanıcıları tarafından çevirilmiştir. Topluluk tarafından sağlananan çeviriler gönüllülerin ellerinden geldiğince güncellendiği için [Resmi İngilizce dökümanlar](https://www.tensorflow.org/?hl=en) ile bire bir aynı olmasını garantileyemeyiz. Eğer bu tercümeleri iyileştirmek için önerileriniz var ise lütfen [tensorflow/docs](https://github.com/tensorflow/docs) havuzuna pull request gönderin. Gönüllü olarak çevirilere katkıda bulunmak için [docs-tr@tensorflow.org](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-tr) listesi ile iletişime geçebilirsiniz. Bu yardımcı döküman, yorum metinlerini kullanarak film yorumlarını *olumlu* veya *olumsuz* olarak sınıflandırmaktadır. Bu örnek, yoğun olarak kullanılan ve önemli bir makina öğrenmesi uygulaması olan *ikili* veya *iki kategorili sınıflandırma*' yı kapsamaktadır. Bu örnekte, [Internet Film Veritabanı](https://www.imdb.com/) sitesinde yer alan 50,000 film değerlendirme metnini içeren [IMDB veri seti](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) 'ni kullancağız. Bu veri seti içerisindeki 25,000 yorum modelin eğitimi için, 25,000 yorum ise modelin testi için ayrılmıştır. Eğitim ve test veri setleri eşit miktarda olumlu ve olumsuz yorum içerecek şekilde dengelenmiştir. Bu yardımcı döküman, Tensorflow'da modellerin oluşturulması ve eğitilmesinde kullanına yüksek-seviye API [tf.keras](https://www.tensorflow.org/r1/guide/keras) 'ı kullanır. `tf.keras` ile ileri seviye metin sınıflandımayı öğrenmek için [MLCC Metin Sınıflandırma ](https://developers.google.com/machine-learning/guides/text-classification/)'a göz atabilirsiniz. ``` # keras.datasets.imdb is broken in 1.13 and 1.14, by np 1.16.3 !pip install tf_nightly from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf from tensorflow import keras import numpy as np print(tf.__version__) ``` ## IMDB veri setini indirelim IMDB veri seti TensorFlow ile birlikte bütünleşik olarak gelmektedir. Yorumların kelime diziliş sıraları, her bir sayının bir kelimeyi temsil ettiği sıralı bir tam sayı dizisine çevrilerek veri seti ön işlemden geçirilmiştir. Aşağıdaki kodlar, IMDB veri setini bilgisayarınıza indirir (eğer daha önceden indirme yapmışsanız, önbellekteki veri kullanılır) : ``` imdb = keras.datasets.imdb (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000) ``` `num_words=10000` değişkeni eğitim veri setinde en sık kullanılan 10,000 kelimeyi tutar, az kullanılan kelimeleri veri boyutunun yönetilebilir olması için ihmal eder. ## Veriyi inceleyelim Veri formatını aşağıdaki kodlar yardımı ile birlikte inceleyelim. Veri seti, ön işlem uygulanmış şekilde gelmektedir: tüm film yorum örnekleri, her bir sayının yorumundaki bir kelimeye denk geldiği tam sayı dizisi olarak gelmektedir. Tüm etiketler 0 veya 1 değerine sahiptir (0 olumsuz değerlendirme, 1 olumlu değerlendirme). ``` print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels))) ``` Yorum metinleri, her bir sayının sözlükte yer alan bir kelimeye denk geldiği sayı dizisine çevrilmiştir. İlk yorum metni, aşağıdaki gibidir: ``` print(train_data[0]) ``` Farklı film yorumlarının uzunlukları farklı olabilir. Aşağıdaki kod, ilk ve ikinci yorumda yer alan kelime sayılarını göstermektedir. Sinir ağlarında girdi boyutlarının aynı olması gerekmektedir, bu problemi daha sonra çözeceğiz. ``` len(train_data[0]), len(train_data[1]) ``` ### Tam sayıları kelimelere geri çevirelerim Tam sayıları metin'e çevirme işlemini bilmemiz, bazı durumlarda işimize yarayabilir. Bunun için bir yardımcı fonksiyon oluşturacağız. Bu fonksiyon, tam sayı-karakter eşleştirmesi içeren bir sözlük nesnesini sorguyabilmemizi sağlayacak: ``` # A dictionary mapping words to an integer index word_index = imdb.get_word_index() # İlk indisler rezervedir word_index = {k:(v+3) for k,v in word_index.items()} word_index["<PAD>"] = 0 word_index["<START>"] = 1 word_index["<UNK>"] = 2 # unknown word_index["<UNUSED>"] = 3 reverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) def decode_review(text): return ' '.join([reverse_word_index.get(i, '?') for i in text]) ``` 'decode_review' fonksiyonunu kullanarak ilk yorum metnini şimdi ekranda gösterebiliriz: ``` decode_review(train_data[0]) ``` ## Veriyi hazırlayalım Yorumlar -tam sayı dizileri- sinir ağına beslenmeden önce ilk olarak tensor yapısına çevrilmelidir. Bu çevirme işlemi birkaç farklı şekilde yapabilir: * Bu ilk yöntemde, one-hot encoding işlemine benzer şekilde, tam sayı dizileri kelimelerin mevcut olup olmamasına göre 0 ve 1 ler içeren, vektörlere çevrilir. Örnek olarak, [3, 5] dizisini vektör'e dönüştürdüğümüzde, bu dizi 3üncü ve 5inci indeksleri dışında tüm değerleri 0 olan 10,000 boyutlu bir vektor'e dönüşür. Sonrasında, ağımızın ilk katmanını floating point vektor verisini işleyebilen yoğun katman (dense layer) olarak oluşturabiliriz. Bu yöntem, 'num_words * num_reviews' boyutlu bir matris oluşturduğumuz için, yoğun hafıza kullanımına ihtiyaç duyar. * Alternatif olarak, tüm dizileri aynı boyutta olacak şekilde doldurabiliriz. Sonrasında 'max_length * max_review' boyutlu bir tam sayı vektorü oluşturabiliriz. Son olarak, bu boyuttaki vektörleri işleyebilen gömülü katmanı, ağımızın ilk katmanı olarak oluşturabiliriz. Bu örnekte ikinci yöntem ile ilerleyeceğiz. Film yorumlarımızın aynı boyutta olması gerektiği için, yorum boyutlarını standart uzunluğa dönüştüren [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) fonksiyonunu kullanacağız: ``` train_data = keras.preprocessing.sequence.pad_sequences(train_data, value=word_index["<PAD>"], padding='post', maxlen=256) test_data = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index["<PAD>"], padding='post', maxlen=256) ``` Şimdi, ilk yorum örneklerinin uzunluklarına birlikte bakalım: ``` len(train_data[0]), len(train_data[1]) ``` Ve ilk yorumu (doldurulmuş şekliyle) inceleyelim: ``` print(train_data[0]) ``` ## Modeli oluşturalım Sinir ağları, katmanların birleştirilmesiyle oluşturulur. Bu noktada, modelin yapısıyla ilgili iki temel karar vermemiz gerekmektedir: * Modeli oluşturuken kaç adet katman kullanacağız? * Her bir katmanda kaç adet *gizli birim* (hidden units) kullanacağız? Bu örnekte modelimizin girdi verisi, kelime indekslerini kapsayan bir tam sayı dizisidir. Tahmin edilecek etiket değerleri 0 ve 1'dir. Problemimiz için modelimizi oluşturalım: ``` # Girdiler film yorumları için kullanılan kelime sayısıdır (10,000 kelime) vocab_size = 10000 model = keras.Sequential() model.add(keras.layers.Embedding(vocab_size, 16)) model.add(keras.layers.GlobalAveragePooling1D()) model.add(keras.layers.Dense(16, activation=tf.nn.relu)) model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid)) model.summary() ``` Sınıflandırıcı modelimizi oluşturmak için katmanlar sıralı bir şekilde birleştirilmiştir: 1. İlk katmanımız 'gömülü-embedding' katmandır. Bu katman tam sayı olarak şifrelenmiş sözcük grubu içerisinden kelime değerlerini alıp, her bir kelime indeksi için bu değeri gömülü vektör içerisinde arar. Bu vektörler modelin eğitimi sırasında öğrenilirler ve çıktı dizisine bir boyut eklerler. Sonuç olarak boyutlar '(batch, sequence, embedding)' şeklinde oluşur: 2. Sonrasında, `GlobalAveragePooling1D` katmanı, her bir yorum örneği için, ardaşık boyutların ortalamasını alarak sabit uzunlukta bir çıktı vektörü oluştur. Bu işlem, en basit şekliyle, modelimizin faklı boyutlardaki girdileri işleyebilmesini sağlar. 3. Bu sabit boyutlu çıktı vektörü, 16 gizli birim (hidden units) içeren tam-bağlı (fully-connected) yoğun katman'a beslenir. 4. Son katman, tek bir çıktı düğümü içeren yoğun bağlı bir katmandır. 'sigmoid' aktivasyon fonksiyonunu kullanarak, bu düğümün çıktısı 0 ile 1 arasında, olasılık veya güven değerini temsil eden bir değer alır. ### Gizli birimler (Hidden units) Yukarıdaki model, girdi ve çıktı arasında iki adet ara veya "gizli" katman içerir. Çıktıların sayısı (birimler, düğümler veya neronlar), mevcut katman içerisinde yapılan çıkarımların boyutudur. Başka bir ifade ile, ağın öğrenirken yapabileceği ara çıkarım miktarını, katmanın çıktı boyutu belirler. Eğer model fazla gizli birim (daha fazla boyutta çıkarım alanı) veya fazla katmana sahipse, model daha kompleks çıkarımlar yapabilir. Bu durumda daha yoğun hesaplama gücüne ihtiyaç duyulur. Bununla birlikte, modelimiz problemin çözümü için gerekli olmayacak derecede çıkarımlar yaparak eğitim verisi ile çok iyi sonuçlar verse de, test verisinde aynı oranda başarılı olmayabilir. Buna *aşırı uyum - overfitting* denir, bu kavramı daha sonra tekrar inceleyeceğiz. ### Kayıp fonksiyonu ve optimize edici Modelimizin eğitilmesi için bir kayıp fonksiyonuna ve optimize ediciye ihitiyacımız vardır. Problemimiz, film yorumlarını olumlu ve olumsuz olarak sınıflandırmak (yani ikili siniflandirma problemi) olduğu için, 'binary_crossentropy' kayıp fonksiyonunu kullanacağız. Bu kayıp fonksiyonu tek seçeneğimiz olmasa da, örneğin 'mean_squared_error' kayıp fonksiyonunu da kullanabilirdik, 'binary_crossentropy' kayıp fonksiyonu, olasılık dağılımları (kesin referans ile tahmin edilen olaralık dağılımı) arasındaki farkı ölçerek, olasılık hesaplamaları için daha iyi sonuç verir. Daha sonra, regrasyon problemlerini incelediğimizde (yani bir evin fiyatını tahmin etmek için), 'mean squared error' gibi diğer kayıp fonksiyonlarını nasıl kullanabileceğimizi göreceğiz. Şimdi, kayıp fonksiyonu ve optimize ediciyi kullanarak modelimizi yapılandıralım: ``` model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) ``` ## Doğrulama veri setini oluşturalım Eğitim sürecinde, daha önce görmediği veriler ile modelin doğrulunu kontrol etmek isteriz. *Doğrulama veri seti* oluşturmak için eğitim veri seti içerisinden 10,000 yorum ayıralım. (Doğrulama için neden test veri setini şimdi kullanmıyoruz? Bunun nedeni modeli oluşturulması ve düzenlenmesi için sadece eğitim veri setini kullanmak istememizdir. Modelimiz oluşup, eğitildikten sonra, test verisini modelimizin doğruluğunu değerlendirmek için kullanacağız). ``` x_val = train_data[:10000] partial_x_train = train_data[10000:] y_val = train_labels[:10000] partial_y_train = train_labels[10000:] ``` ## Modelin eğitilmesi Modeli, her bir mini-batch te 512 yorum örneği olacak şekilde 40 epoch döngüsü ile eğitelim. 'x_train' ve 'y_train' tensorlarını kullanarak tüm yorumları bu 40 iterasyon ile kapsıyoruz. Eğitim süresince, doğrulama veri setini kullanarak modelin kayıp fonksiyon değerini ve doğruluğunu gözlemleyelim: ``` history = model.fit(partial_x_train, partial_y_train, epochs=40, batch_size=512, validation_data=(x_val, y_val), verbose=1) ``` ## Modeli değerlendirelim Ve modelin nasıl performans gösterdiğini görelim. Bunun için iki değer kullanacağız. Kayıp (hatayı temsil eden sayı, düşük değerler daha iyi anlamına gelmektedir) ve doğruluk değeri. ``` results = model.evaluate(test_data, test_labels) print(results) ``` Bu oldukça basit yöntem ile %87 gibi bir doğruluk değeri elde ediyoruz. Daha ileri yöntemler ile modelimiz %95'e kadar çıkan doğruluk sonuçları verebilir. ## Doğruluk ve kayıp değerlerinin zamana göre değişimini veren bir grafik oluşturalım `model.fit()` methodu eğitim sürecinde olan biten herşeyi görebileceğimiz 'History' sözlük nesnesi oluşturur: ``` history_dict = history.history history_dict.keys() ``` Grafiğimiz için 4 adet girdimiz mevcut: eğitim ve doğrulama olmak üzere, gözlemlenen metrikler (kayıp ve doğruluk değeri) için birer değer mevcuttur. Bu değerleri, eğitim ve doğrulama kayıplarını, aynı şekilde doğruluk değerlerini karşılaştırmak için grafik üzerine çizdireceğiz: ``` import matplotlib.pyplot as plt acc = history_dict['acc'] val_acc = history_dict['val_acc'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) # "bo", "mavi nokta"'nın kısaltmasıdır plt.plot(epochs, loss, 'bo', label='Training loss') # b, "düz mavi çizgi"'nin kısaltmasıdır plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() plt.clf() # grafiğin görüntüsünü temizleyelim plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() ``` Grafikte noktalı çizgiler eğitim kayıp ve doğruluk değerlerini temsil etmektedir. Aynı şekilde, düz çizgiler doğrulama kayıp ve doğruluk değerlerini temsil etmektedir. Eğitim kayıp değerleri her bir epoch iterasyonuyla *düşerken*, eğitim doğruluk değerlerinin *arttığını* görebilirsiniz. Gradient descent optimizasyonu, her bir iterasyonda belirli bir oranda değerleri minimize ettiği için, bu beklenen bir durumdur. Aynı durum doğrulama kayıp ve doğruluk değerleri için geçerli değildir. Görüldüğü gibi doğrulama değerleri, 20nci epoch iterasyonunda en iyi değerlere ulaşmaktadır. Bu durum aşırı uyuma bir örnektir: modelin eğitim veri kümesiyle, daha önceden hiç görmediği verilere göre daha iyi sonuç vermesi durumu. Bu noktadan sonra model gereğinden fazla optimize edilir ve eğitim veri setine özgü, test verisine genellenemeyen çıkarımları öğrenir. Örneğimizdeki bu özel durum nedeniyle, gözlemlemiş olduğumuz fazla uyumu giderebilmek için, eğitim işlemini 20nci epoch iterasyonu sonrası durdurabiliriz. Bunu otomatik olarak nasıl yapabileceğimizi daha sonra göreceğiz.
github_jupyter
## Problem Given a sorted list of integers of length N, determine if an element x is in the list without performing any multiplication, division, or bit-shift operations. Do this in `O(log N)` time. ## Solution We can't use binary search to locate the element because involves dividing by two to get the middle element. We can use Fibonacci search to get around this limitation. The idea is that fibonacci numbers are used to locate indices to check in the array, and by cleverly updating these indices, we can efficiently locate our element. Let `p` and `q` be consequtive Fibonacci numbers. `q` is the smallest Fibonacci number that is **greater than or equal to** the size of the array. We compare `x` with `array[p]` and perform the following logic: 1. If `x == array[p]`, we have found the element. Return true. 2. If `x < array[p]` move p and q down two indices each, cutting down the largest two elements from the search. 3. If `x > array[p]` move p and q down index each, and add an offset of p to the next search value. If we have exhausted our list of Fibonacci numbers, we can be assured that the element is not in our array. Let's go through an example. First, we need a helper function to generate the Fibonacci numbers, given the length of the array => N. ``` def get_fib_sequence(n): a, b = 0, 1 sequence = [a] while a < n: a, b = b, a + b sequence.append(a) return sequence ``` Suppose we have array ``` [2, 4, 10, 16, 25, 45, 55, 65, 80, 100] ``` Since there are 10 elements in the array, the generated sequence of Fibonacci numbers will be ``` [0, 1, 1, 2, 3, 5, 8, 13] ``` So the values of p and q are: `p == 6, q == 7` (The second last and last indices in the sequence) Now suppose we are searching for `45`, we'll carry out the following steps: - Compare 45 with `array[fib[p]] => array[8]`. Since 45 < 80, we move p and q down two indices. p = 4, q = 5. - Next, compare 45 with `array[fib[p]] => array[3]`. Since 45 > 16, we set p = 3 and create an offset of 2. So p = 5, q = 4. - Finally, we compare 45 with `array[fib[p]]`. Since array[5] == 45, we have found x. ``` def fibo_search(array, x): n = len(array) fibs = get_fib_sequence(n) p, q = len(fibs) - 2, len(fibs) - 1 offset = 0 while q > 0: index = min(offset + fibs[p], n - 1) if x == array[index]: return True elif x < array[index]: p -= 2 q -= 2 else: p -= 1 q -= 1 offset = index return False fibo_search([2, 4, 10, 16, 25, 45, 55, 65, 80, 100], 45) ```
github_jupyter
# Feature processing with Spark, training with BlazingText and deploying as Inference Pipeline Typically a Machine Learning (ML) process consists of few steps: gathering data with various ETL jobs, pre-processing the data, featurizing the dataset by incorporating standard techniques or prior knowledge, and finally training an ML model using an algorithm. In many cases, when the trained model is used for processing real time or batch prediction requests, the model receives data in a format which needs to pre-processed (e.g. featurized) before it can be passed to the algorithm. In the following notebook, we will demonstrate how you can build your ML Pipeline leveraging Spark Feature Transformers and SageMaker BlazingText algorithm & after the model is trained, deploy the Pipeline (Feature Transformer and BlazingText) as an Inference Pipeline behind a single Endpoint for real-time inference and for batch inferences using Amazon SageMaker Batch Transform. In this notebook, we use Amazon Glue to run serverless Spark. Though the notebook demonstrates the end-to-end flow on a small dataset, the setup can be seamlessly used to scale to larger datasets. ## Objective: Text Classification on DBPedia dataset In this example, we will train the text classification model using SageMaker `BlazingText` algorithm on the [DBPedia Ontology Dataset](https://wiki.dbpedia.org/services-resources/dbpedia-data-set-2014#2) as done by [Zhang et al](https://arxiv.org/pdf/1509.01626.pdf). The DBpedia ontology dataset is constructed by picking 14 nonoverlapping classes from DBpedia 2014. It has 560,000 training samples and 70,000 testing samples. The fields we used for this dataset contain title and abstract of each Wikipedia article. Before passing the input data to `BlazingText`, we need to process this dataset into white-space separated tokens, have the label field in every line prefixed with `__label__` and all input data should be in a single file. ## Methodologies The Notebook consists of a few high-level steps: * Using AWS Glue for executing the SparkML feature processing job. * Using SageMaker BlazingText to train on the processed dataset produced by SparkML job. * Building an Inference Pipeline consisting of SparkML & BlazingText models for a realtime inference endpoint. * Building an Inference Pipeline consisting of SparkML & BlazingText models for a single Batch Transform job. ## Using AWS Glue for executing Spark jobs We'll be running the SparkML job using [AWS Glue](https://aws.amazon.com/glue). AWS Glue is a serverless ETL service which can be used to execute standard Spark/PySpark jobs. Glue currently only supports `Python 2.7`, hence we'll write the script in `Python 2.7`. ## Permission setup for invoking AWS Glue from this Notebook In order to enable this Notebook to run AWS Glue jobs, we need to add one additional permission to the default execution role of this notebook. We will be using SageMaker Python SDK to retrieve the default execution role and then you have to go to [IAM Dashboard](https://console.aws.amazon.com/iam/home) to edit the Role to add AWS Glue specific permission. ### Finding out the current execution role of the Notebook We are using SageMaker Python SDK to retrieve the current role for this Notebook which needs to be enhanced. ``` # Import SageMaker Python SDK to get the Session and execution_role import sagemaker from sagemaker import get_execution_role sess = sagemaker.Session() role = get_execution_role() print(role[role.rfind('/') + 1:]) ``` ### Adding AWS Glue as an additional trusted entity to this role This step is needed if you want to pass the execution role of this Notebook while calling Glue APIs as well without creating an additional **Role**. If you have not used AWS Glue before, then this step is mandatory. If you have used AWS Glue previously, then you should have an already existing role that can be used to invoke Glue APIs. In that case, you can pass that role while calling Glue (later in this notebook) and skip this next step. On the IAM dashboard, please click on **Roles** on the left sidenav and search for this Role. Once the Role appears, click on the Role to go to its **Summary** page. Click on the **Trust relationships** tab on the **Summary** page to add AWS Glue as an additional trusted entity. Click on **Edit trust relationship** and replace the JSON with this JSON. ``` { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": [ "sagemaker.amazonaws.com", "glue.amazonaws.com" ] }, "Action": "sts:AssumeRole" } ] } ``` Once this is complete, click on **Update Trust Policy** and you are done. ## Downloading dataset and uploading to S3 SageMaker team has downloaded the dataset and uploaded to one of the S3 buckets in our account. In this notebook, we will download from that bucket and upload to your bucket so that AWS Glue can access the data. The default AWS Glue permissions we just added expects the data to be present in a bucket with the string `aws-glue`. Hence, after we download the dataset, we will create an S3 bucket in your account with a valid name and then upload the data to S3. ``` !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/data/dbpedia/train.csv !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/data/dbpedia/test.csv ``` ### Creating an S3 bucket and uploading this dataset Next we will create an S3 bucket with the `aws-glue` string in the name and upload this data to the S3 bucket. In case you want to use some existing bucket to run your Spark job via AWS Glue, you can use that bucket to upload your data provided the `Role` has access permission to upload and download from that bucket. Once the bucket is created, the following cell would also update the `train.csv` and `test.csv` files downloaded locally to this bucket under the `input/dbpedia` prefix. ``` import boto3 import botocore from botocore.exceptions import ClientError boto_session = sess.boto_session s3 = boto_session.resource('s3') account = boto_session.client('sts').get_caller_identity()['Account'] region = boto_session.region_name default_bucket = 'aws-glue-{}-{}'.format(account, region) try: if region == 'us-east-1': s3.create_bucket(Bucket=default_bucket) else: s3.create_bucket(Bucket=default_bucket, CreateBucketConfiguration={'LocationConstraint': region}) except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] if error_code == 'BucketAlreadyOwnedByYou': print ('A bucket with the same name already exists in your account - using the same bucket.') pass # Uploading the training data to S3 sess.upload_data(path='train.csv', bucket=default_bucket, key_prefix='input/dbpedia') sess.upload_data(path='test.csv', bucket=default_bucket, key_prefix='input/dbpedia') ``` ## Writing the feature processing script using SparkML The code for feature transformation using SparkML can be found in `dbpedia_processing.py` file written in the same directory. You can go through the code itself to see how it is using standard SparkML feature transformers to define the Pipeline for featurizing and processing the data. Once the Spark ML Pipeline `fit` and `transform` is done, we are tranforming the `train` and `test` file and writing it in the format `BlazingText` expects before uploading to S3. ### Serializing the trained Spark ML Model with [MLeap](https://github.com/combust/mleap) Apache Spark is best suited batch processing workloads. In order to use the Spark ML model we trained for low latency inference, we need to use the MLeap library to serialize it to an MLeap bundle and later use the [SageMaker SparkML Serving](https://github.com/aws/sagemaker-sparkml-serving-container) to perform realtime and batch inference. By using the `SerializeToBundle()` method from MLeap in the script, we are serializing the ML Pipeline into an MLeap bundle and uploading to S3 in `tar.gz` format as SageMaker expects. ## Uploading the code and other dependencies to S3 for AWS Glue Unlike SageMaker, in order to run your code in AWS Glue, we do not need to prepare a Docker image. We can upload your code and dependencies directly to S3 and pass those locations while invoking the Glue job. ### Upload the featurizer script to S3 We will be uploading the `dbpedia_processing.py` script to S3 now so that Glue can use it to run the PySpark job. You can replace it with your own script if needed. If your code has multiple files, you need to zip those files and upload to S3 instead of uploading a single file like it's being done here. ``` script_location = sess.upload_data(path='dbpedia_processing.py', bucket=default_bucket, key_prefix='codes') ``` ### Upload MLeap dependencies to S3 For our job, we will also have to pass MLeap dependencies to Glue.MLeap is an additional library we are using which does not come bundled with default Spark. Similar to most of the packages in the Spark ecosystem, MLeap is also implemented as a Scala package with a front-end wrapper written in Python so that it can be used from PySpark. We need to make sure that the MLeap Python library as well as the JAR is available within the Glue job environment. In the following cell, we will download the MLeap Python dependency & JAR from a SageMaker hosted bucket and upload to the S3 bucket we created above in your account. If you are using some other Python libraries like `nltk` in your code, you need to download the wheel file from PyPI and upload to S3 in the same way. At this point, Glue only supports passing pure Python libraries in this way (e.g. you can not pass `Pandas` or `OpenCV`). However you can use `NumPy` & `SciPy` without having to pass these as packages because these are pre-installed in the Glue environment. ``` !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/0.9.6/python/python.zip !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/0.9.6/jar/mleap_spark_assembly.jar python_dep_location = sess.upload_data(path='python.zip', bucket=default_bucket, key_prefix='dependencies/python') jar_dep_location = sess.upload_data(path='mleap_spark_assembly.jar', bucket=default_bucket, key_prefix='dependencies/jar') ``` ## Defining output locations for the data and model Next we define the output location where the transformed dataset should be uploaded. We are also specifying a model location where the MLeap serialized model would be updated. This locations should be consumed as part of the Spark script using `getResolvedOptions` method of AWS Glue library (see `dbpedia_processing.py` for details). By designing our code in this way, we can re-use these variables as part of the SageMaker training job (details below). ``` from time import gmtime, strftime import time timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # Input location of the data, We uploaded our train.csv file to input key previously s3_input_bucket = default_bucket s3_input_key_prefix = 'input/dbpedia' # Output location of the data. The input data will be split, transformed, and # uploaded to output/train and output/validation s3_output_bucket = default_bucket s3_output_key_prefix = timestamp_prefix + '/dbpedia' # the MLeap serialized SparkML model will be uploaded to output/mleap s3_model_bucket = default_bucket s3_model_key_prefix = s3_output_key_prefix + '/mleap' ``` ### Calling Glue APIs Next we'll be creating Glue client via Boto so that we can invoke the `create_job` API of Glue. `create_job` API will create a job definition which can be used to execute your jobs in Glue. The job definition created here is mutable. While creating the job, we are also passing the code location as well as the dependencies location to Glue. `AllocatedCapacity` parameter controls the hardware resources that Glue will use to execute this job. It is measures in units of `DPU`. For more information on `DPU`, please see [here](https://docs.aws.amazon.com/glue/latest/dg/add-job.html). ``` glue_client = boto_session.client('glue') job_name = 'sparkml-dbpedia-' + timestamp_prefix response = glue_client.create_job( Name=job_name, Description='PySpark job to featurize the DBPedia dataset', Role=role, # you can pass your existing AWS Glue role here if you have used Glue before ExecutionProperty={ 'MaxConcurrentRuns': 1 }, Command={ 'Name': 'glueetl', 'ScriptLocation': script_location }, DefaultArguments={ '--job-language': 'python', '--extra-jars' : jar_dep_location, '--extra-py-files': python_dep_location }, AllocatedCapacity=10, Timeout=60, ) glue_job_name = response['Name'] print(glue_job_name) ``` The aforementioned job will be executed now by calling `start_job_run` API. This API creates an immutable run/execution corresponding to the job definition created above. We will require the `job_run_id` for the particular job execution to check for status. We'll pass the data and model locations as part of the job execution parameters. ``` job_run_id = glue_client.start_job_run(JobName=job_name, Arguments = { '--S3_INPUT_BUCKET': s3_input_bucket, '--S3_INPUT_KEY_PREFIX': s3_input_key_prefix, '--S3_OUTPUT_BUCKET': s3_output_bucket, '--S3_OUTPUT_KEY_PREFIX': s3_output_key_prefix, '--S3_MODEL_BUCKET': s3_model_bucket, '--S3_MODEL_KEY_PREFIX': s3_model_key_prefix })['JobRunId'] print(job_run_id) ``` ### Checking Glue job status Now we will check for the job status to see if it has `succeeded`, `failed` or `stopped`. Once the job is succeeded, we have the transformed data into S3 in CSV format which we can use with `BlazingText` for training. If the job fails, you can go to [AWS Glue console](https://us-west-2.console.aws.amazon.com/glue/home), click on **Jobs** tab on the left, and from the page, click on this particular job and you will be able to find the CloudWatch logs (the link under **Logs**) link for these jobs which can help you to see what exactly went wrong in the `spark-submit` call. ``` job_run_status = glue_client.get_job_run(JobName=job_name,RunId=job_run_id)['JobRun']['JobRunState'] while job_run_status not in ('FAILED', 'SUCCEEDED', 'STOPPED'): job_run_status = glue_client.get_job_run(JobName=job_name,RunId=job_run_id)['JobRun']['JobRunState'] print (job_run_status) time.sleep(30) ``` ## Using SageMaker BlazingText to train on the processed dataset produced by SparkML job Now we will use SageMaker `BlazingText` algorithm to train a text classification model this dataset. We already know the S3 location where the preprocessed training data was uploaded as part of the Glue job. ### We need to retrieve the BlazingText algorithm image ``` from sagemaker.amazon.amazon_estimator import get_image_uri training_image = get_image_uri(sess.boto_region_name, 'blazingtext', repo_version="latest") print (training_image) ``` ### Next BlazingText model parameters and dataset details will be set properly We have parameterized the notebook so that the same data location which was used in the PySpark script can now be passed to `BlazingText` Estimator as well. ``` s3_train_data = 's3://{}/{}/{}'.format(s3_output_bucket, s3_output_key_prefix, 'train') s3_validation_data = 's3://{}/{}/{}'.format(s3_output_bucket, s3_output_key_prefix, 'validation') s3_output_location = 's3://{}/{}/{}'.format(s3_output_bucket, s3_output_key_prefix, 'bt_model') bt_model = sagemaker.estimator.Estimator(training_image, role, train_instance_count=1, train_instance_type='ml.c4.xlarge', train_volume_size = 20, train_max_run = 3600, input_mode= 'File', output_path=s3_output_location, sagemaker_session=sess) bt_model.set_hyperparameters(mode="supervised", epochs=10, min_count=2, learning_rate=0.05, vector_dim=10, early_stopping=True, patience=4, min_epochs=5, word_ngrams=2) train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated', content_type='text/plain', s3_data_type='S3Prefix') validation_data = sagemaker.session.s3_input(s3_validation_data, distribution='FullyReplicated', content_type='text/plain', s3_data_type='S3Prefix') data_channels = {'train': train_data, 'validation': validation_data} ``` ### Finally BlazingText training will be performed ``` bt_model.fit(inputs=data_channels, logs=True) ``` # Building an Inference Pipeline consisting of SparkML & BlazingText models for a realtime inference endpoint Next we will proceed with deploying the models in SageMaker to create an Inference Pipeline. You can create an Inference Pipeline with upto five containers. Deploying a model in SageMaker requires two components: * Docker image residing in ECR. * Model artifacts residing in S3. **SparkML** For SparkML, Docker image for MLeap based SparkML serving is provided by SageMaker team. For more information on this, please see [SageMaker SparkML Serving](https://github.com/aws/sagemaker-sparkml-serving-container). MLeap serialized SparkML model was uploaded to S3 as part of the SparkML job we executed in AWS Glue. **BlazingText** For BlazingText, we will use the same Docker image we used for training. The model artifacts for BlazingText was uploaded as part of the training job we just ran. ### Creating the Endpoint with both containers Next we'll create a SageMaker inference endpoint with both the `sagemaker-sparkml-serving` & `BlazingText` containers. For this, we will first create a `PipelineModel` which will consist of both the `SparkML` model as well as `BlazingText` model in the right sequence. ### Passing the schema of the payload via environment variable SparkML serving container needs to know the schema of the request that'll be passed to it while calling the `predict` method. In order to alleviate the pain of not having to pass the schema with every request, `sagemaker-sparkml-serving` allows you to pass it via an environment variable while creating the model definitions. This schema definition will be required in our next step for creating a model. We will see later that you can overwrite this schema on a per request basis by passing it as part of the individual request payload as well. ``` import json schema = { "input": [ { "name": "abstract", "type": "string" } ], "output": { "name": "tokenized_abstract", "type": "string", "struct": "array" } } schema_json = json.dumps(schema) print(schema_json) ``` ### Creating a `PipelineModel` which comprises of the SparkML and BlazingText model in the right order Next we'll create a SageMaker `PipelineModel` with SparkML and BlazingText.The `PipelineModel` will ensure that both the containers get deployed behind a single API endpoint in the correct order. The same model would later be used for Batch Transform as well to ensure that a single job is sufficient to do prediction against the Pipeline. Here, during the `Model` creation for SparkML, we will pass the schema definition that we built in the previous cell. ### Controlling the output format from `sagemaker-sparkml-serving` to the next container By default, `sagemaker-sparkml-serving` returns an output in `CSV` format. However, BlazingText does not understand CSV format and it supports a different format. In order for the `sagemaker-sparkml-serving` to emit the output with the right format, we need to pass a second environment variable `SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT` with the value `application/jsonlines;data=text` to ensure that `sagemaker-sparkml-serving` container emits response in the proper format which BlazingText can parse. For more information on different output formats `sagemaker-sparkml-serving` supports, please check the documentation pointed above. ``` from sagemaker.model import Model from sagemaker.pipeline import PipelineModel from sagemaker.sparkml.model import SparkMLModel sparkml_data = 's3://{}/{}/{}'.format(s3_model_bucket, s3_model_key_prefix, 'model.tar.gz') # passing the schema defined above by using an environment variable that sagemaker-sparkml-serving understands sparkml_model = SparkMLModel(model_data=sparkml_data, env={'SAGEMAKER_SPARKML_SCHEMA' : schema_json, 'SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT': "application/jsonlines;data=text"}) bt_model = Model(model_data=bt_model.model_data, image=training_image) model_name = 'inference-pipeline-' + timestamp_prefix sm_model = PipelineModel(name=model_name, role=role, models=[sparkml_model, bt_model]) ``` ### Deploying the `PipelineModel` to an endpoint for realtime inference Next we will deploy the model we just created with the `deploy()` method to start an inference endpoint and we will send some requests to the endpoint to verify that it works as expected. ``` endpoint_name = 'inference-pipeline-ep-' + timestamp_prefix sm_model.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge', endpoint_name=endpoint_name) ``` ### Invoking the newly created inference endpoint with a payload to transform the data Now we will invoke the endpoint with a valid payload that `sagemaker-sparkml-serving` can recognize. There are three ways in which input payload can be passed to the request: * Pass it as a valid CSV string. In this case, the schema passed via the environment variable will be used to determine the schema. For CSV format, every column in the input has to be a basic datatype (e.g. int, double, string) and it can not be a Spark `Array` or `Vector`. * Pass it as a valid JSON string. In this case as well, the schema passed via the environment variable will be used to infer the schema. With JSON format, every column in the input can be a basic datatype or a Spark `Vector` or `Array` provided that the corresponding entry in the schema mentions the correct value. * Pass the request in JSON format along with the schema and the data. In this case, the schema passed in the payload will take precedence over the one passed via the environment variable (if any). #### Passing the payload in CSV format We will first see how the payload can be passed to the endpoint in CSV format. ``` from sagemaker.predictor import json_serializer, csv_serializer, json_deserializer, RealTimePredictor from sagemaker.content_types import CONTENT_TYPE_CSV, CONTENT_TYPE_JSON payload = "Convair was an american aircraft manufacturing company which later expanded into rockets and spacecraft." predictor = RealTimePredictor(endpoint=endpoint_name, sagemaker_session=sess, serializer=csv_serializer, content_type=CONTENT_TYPE_CSV, accept='application/jsonlines') print(predictor.predict(payload)) ``` #### Passing the payload in JSON format We will now pass a different payload in JSON format. ``` payload = {"data": ["Berwick secondary college is situated in the outer melbourne metropolitan suburb of berwick ."]} predictor = RealTimePredictor(endpoint=endpoint_name, sagemaker_session=sess, serializer=json_serializer, content_type=CONTENT_TYPE_JSON) print(predictor.predict(payload)) ``` ### [Optional] Deleting the Endpoint If you do not plan to use this endpoint, then it is a good practice to delete the endpoint so that you do not incur the cost of running it. ``` sm_client = boto_session.client('sagemaker') sm_client.delete_endpoint(EndpointName=endpoint_name) ``` # Building an Inference Pipeline consisting of SparkML & BlazingText models for a single Batch Transform job SageMaker Batch Transform also supports chaining multiple containers together when deploying an Inference Pipeline and performing a single Batch Transform job to transform your data for a batch use-case similar to the real-time use-case we have seen above. ### Preparing data for Batch Transform Batch Transform requires data in the same format described above, with one CSV or JSON being per line. For this notebook, SageMaker team has created a sample input in CSV format which Batch Transform can process. The input is a simple CSV file with one input string per line. Next we will download a sample of this data from one of the SageMaker buckets (named `batch_input_dbpedia.csv`) and upload to your S3 bucket. We will also inspect first five rows of the data post downloading. ``` !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/data/batch_input_dbpedia.csv !printf "\n\nShowing first two lines\n\n" !head -n 3 batch_input_dbpedia.csv !printf "\n\nAs we can see, it is just one input string per line.\n\n" batch_input_loc = sess.upload_data(path='batch_input_dbpedia.csv', bucket=default_bucket, key_prefix='batch') ``` ### Invoking the Transform API to create a Batch Transform job Next we will create a Batch Transform job using the `Transformer` class from Python SDK to create a Batch Transform job. ``` input_data_path = 's3://{}/{}/{}'.format(default_bucket, 'batch', 'batch_input_dbpedia.csv') output_data_path = 's3://{}/{}/{}'.format(default_bucket, 'batch_output/dbpedia', timestamp_prefix) transformer = sagemaker.transformer.Transformer( model_name = model_name, instance_count = 1, instance_type = 'ml.m4.xlarge', strategy = 'SingleRecord', assemble_with = 'Line', output_path = output_data_path, base_transform_job_name='serial-inference-batch', sagemaker_session=sess, accept = CONTENT_TYPE_CSV ) transformer.transform(data = input_data_path, content_type = CONTENT_TYPE_CSV, split_type = 'Line') transformer.wait() ```
github_jupyter
# Polynomial Regression ``` import numpy as np import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['axes.titlesize'] = 14 plt.rcParams['legend.fontsize'] = 12 plt.rcParams['figure.figsize'] = (8, 5) %config InlineBackend.figure_format = 'retina' ``` ### Linear models $y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \dots + \beta_n x_n + \epsilon$ $\begin{bmatrix} \vdots \\ y \\ \vdots \end{bmatrix} = \beta_0 + \beta_1 \begin{bmatrix} \vdots \\ x_1 \\ \vdots \end{bmatrix} + \beta_2 \begin{bmatrix} \vdots \\ x_2 \\ \vdots \end{bmatrix} + \dots + \beta_n \begin{bmatrix} \vdots \\ x_n \\ \vdots \end{bmatrix} + \begin{bmatrix} \vdots \\ \epsilon \\ \vdots \end{bmatrix}$ $X = \begin{bmatrix} \vdots & \vdots & & \vdots \\ x_1 & x_2 & \dots & x_n \\ \vdots & \vdots & & \vdots \end{bmatrix}$ ### A simple linear model $y = \beta_1 x_1 + \beta_2 x_2 + \epsilon$ ### Extending this to a $2^{nd}$ degree polynomial model $y = \beta_1 x_1 + \beta_2 x_2 + \beta_3 x_1^2 + \beta_4 x_1 x_2 + \beta_5 x_2^2 + \epsilon$ $x_1 x_2$ is an interaction term between $x_1$ and $x_2$ ### Reparameterize the model $y = \beta_1 x_1 + \beta_2 x_2 + \beta_3 x_1^2 + \beta_4 x_1 x_2 + \beta_5 x_2^2 + \epsilon$ $\begin{matrix} x_3 & \rightarrow & x_1^2 \\ x_4 & \rightarrow & x_1 x_2 \\ x_5 & \rightarrow & x_2^2 \end{matrix}$ $y = \beta_1 x_1 + \beta_2 x_2 + \beta_3 x_3 + \beta_4 x_4 + \beta_5 x_5 + \epsilon$ ### !!! But that's just a linear model ### Given the matrix of measured features $X$: $X = \begin{bmatrix} \vdots & \vdots \\ x_1 & x_2 \\ \vdots & \vdots \end{bmatrix}$ ### All we need to do is fit a linear model using the following feature matrix $X_{poly}$: $X_{poly} = \begin{bmatrix} \vdots & \vdots & \vdots & \vdots & \vdots \\ x_1 & x_2 & x_1^2 & x_1 x_2 & x_2^2 \\ \vdots & \vdots & \vdots & \vdots & \vdots \end{bmatrix}$ ## Some experimental data: Temperature vs. Yield ``` temperature = np.array([50, 50, 50, 70, 70, 70, 80, 80, 80, 90, 90, 90, 100, 100, 100]) experimental_yield = np.array([3.3, 2.8, 2.9, 2.3, 2.6, 2.1, 2.5, 2.9, 2.4, 3, 3.1, 2.8, 3.3, 3.5, 3]) plt.plot(temperature, experimental_yield, 'o') plt.xlabel('Temperature') plt.ylabel('Experimental Yield'); ``` ### Rearranging the data for use with sklearn ``` X = temperature.reshape([-1,1]) y = experimental_yield X ``` # Fit yield vs. temperature data with a linear model ``` from sklearn.linear_model import LinearRegression ols_model = LinearRegression() ols_model.fit(X, y) plt.plot(temperature, experimental_yield, 'o') plt.plot(temperature, ols_model.predict(X), '-', label='OLS') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` # Fit yield vs. temperature data with a $2^{nd}$ degree polynomial model ``` from sklearn.preprocessing import PolynomialFeatures poly2 = PolynomialFeatures(degree=2) X_poly2 = poly2.fit_transform(X) X.shape, X_poly2.shape poly2_model = LinearRegression() poly2_model.fit(X_poly2, y) plt.plot(temperature, experimental_yield, 'o') plt.plot(temperature, ols_model.predict(X), '-', label='OLS') plt.plot(temperature, poly2_model.predict(X_poly2), '-', label='Poly2') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` Note that you could very well use a regularization model such as Ridge or Lasso instead of the simple ordinary least squares LinearRegression model. In this case, it doesn't matter too much becuase we have only one feature (Temperature). # Smoothing the plot of the model fit ``` X_fit = np.arange(50, 101).reshape([-1, 1]) X_fit_poly2 = poly2.fit_transform(X_fit) plt.plot(temperature, experimental_yield, 'o') plt.plot(X_fit, ols_model.predict(X_fit), '-', label='OLS') plt.plot(X_fit, poly2_model.predict(X_fit_poly2), '-', label='Poly2') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` # Fit yield vs. temperature data with a $3^{rd}$ degree polynomial model ``` poly3 = PolynomialFeatures(degree=3) X_poly3 = poly.fit_transform(X) X.shape, X_poly3.shape poly3_model = LinearRegression() poly3_model.fit(X_poly3, y) X_fit_poly3 = poly3.fit_transform(X_fit) plt.plot(temperature, experimental_yield, 'o') plt.plot(X_fit, ols_model.predict(X_fit), '-', label='OLS') plt.plot(X_fit, poly2_model.predict(X_fit_poly2), '-', label='Poly2') plt.plot(X_fit, poly3_model.predict(X_fit_poly3), '-', label='Poly3') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` ### Polynomial fit is clearly better than a linear fit, but which degree polynomial should we use? ### Why not try a range of polynomiall degrees, and see which one is best? ### But how do we determine which degree is best? ### We could use cross validation to determine the degree of polynomial that is most likely to best explain new data. ### Ideally, we would: 1. Split the data into training and testing sets 2. Perform cross validation on the training set to determine the best choice of polynomial degree 3. Fit the chosen model to the training set 4. Evaluate it on the withheld testing set However, we have such little data that doing all of these splits is likely to leave individual partitions with subsets of data that are no longer representative of the relationship between temperature and yield. ``` plt.plot(temperature, experimental_yield, 'o') plt.xlabel('Temperature') plt.ylabel('Experimental Yield'); ``` Thus, I'll forgo splitting the data into training and testing sets, and we'll train our model on the entire dataset. This is not ideal of course, and it means we'll have to simply hope that our model generalizes to new data. I will use 5-fold cross validation to tune the polynomial degree hyperparameter. You might also want to explore 10-fold or leave one out cross validation. ``` from sklearn.model_selection import cross_validate cv_mse = [] for degree in [2, 3]: poly = PolynomialFeatures(degree=degree) X_poly = poly.fit_transform(X) model = LinearRegression() results = cross_validate(model, X_poly, y, cv=5, scoring='neg_mean_squared_error') cv_mse.append(-results['test_score']) cv_mse np.mean(cv_mse[0]), np.mean(cv_mse[1]) ``` Slightly better mean validation error for $3^{rd}$ degree polynomial. ``` plt.plot(temperature, experimental_yield, 'o') plt.plot(X_fit, ols_model.predict(X_fit), '-', label='OLS') plt.plot(X_fit, poly2_model.predict(X_fit_poly2), '-', label='Poly2') plt.plot(X_fit, poly3_model.predict(X_fit_poly3), '-', label='Poly3') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` Despite the lower validation error for the $3^{rd}$ degree polynomial, we might still opt to stick with a $2^{nd}$ degree polynomial model. Why might we want to do that? Less flexible models are more likely to generalize to new data because they are less likely to overfit noise. Another important question to ask is whether the slight difference in mean validation error between $2^{nd}$ and $3^{rd}$ degree polynomial models is enough to really distinguish between the models? One thing we can do is look at how variable the validation errors are across the various validation partitions. ``` cv_mse binedges = np.linspace(0, np.max(cv_mse[0]), 11) plt.hist(cv_mse[0], binedges, alpha=0.5, label='Poly2') plt.hist(cv_mse[1], binedges, alpha=0.5, label='Poly3') plt.xlabel('Validation MSE') plt.ylabel('Counts') plt.legend(); ``` Is the extra flexibility of the $3^{rd}$ degree polynomial model worth it, or is it more likely to overfit noise in our data and less likely to generalize to new measurements? How dependent are our results on how we partitioned the data? Repeat the above using 10-fold cross validation. Of course, more measurements, including measures at 60 degrees, would help you to better distinguish between these models. ``` plt.plot(temperature, experimental_yield, 'o') plt.plot(X_fit, ols_model.predict(X_fit), '-', label='OLS') plt.plot(X_fit, poly2_model.predict(X_fit_poly2), '-', label='Poly2') plt.plot(X_fit, poly3_model.predict(X_fit_poly3), '-', label='Poly3') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ```
github_jupyter
# Exercise 4: Optimizing Redshift Table Design ``` %load_ext sql from time import time import configparser import matplotlib.pyplot as plt import pandas as pd config = configparser.ConfigParser() config.read_file(open('dwh.cfg')) KEY=config.get('AWS','key') SECRET= config.get('AWS','secret') DWH_DB= config.get("DWH","DWH_DB") DWH_DB_USER= config.get("DWH","DWH_DB_USER") DWH_DB_PASSWORD= config.get("DWH","DWH_DB_PASSWORD") DWH_PORT = config.get("DWH","DWH_PORT") ``` # STEP 1: Get the params of the created redshift cluster - We need: - The redshift cluster <font color='red'>endpoint</font> - The <font color='red'>IAM role ARN</font> that give access to Redshift to read from S3 ``` # FILL IN THE REDSHIFT ENDPOINT HERE # e.g. DWH_ENDPOINT="redshift-cluster-1.csmamz5zxmle.us-west-2.redshift.amazonaws.com" DWH_ENDPOINT="dwhcluster.csmamz5zxmle.us-west-2.redshift.amazonaws.com" #FILL IN THE IAM ROLE ARN you got in step 2.2 of the previous exercise #e.g DWH_ROLE_ARN="arn:aws:iam::988332130976:role/dwhRole" DWH_ROLE_ARN="arn:aws:iam::988332130976:role/dwhRole" ``` # STEP 2: Connect to the Redshift Cluster ``` import os conn_string="postgresql://{}:{}@{}:{}/{}".format(DWH_DB_USER, DWH_DB_PASSWORD, DWH_ENDPOINT, DWH_PORT,DWH_DB) print(conn_string) %sql $conn_string ``` # STEP 3: Create Tables - We are going to use a benchmarking data set common for benchmarking star schemas in data warehouses. - The data is pre-loaded in a public bucket on the `us-west-2` region - Our examples will be based on the Amazon Redshfit tutorial but in a scripted environment in our workspace. ![afa](https://docs.aws.amazon.com/redshift/latest/dg/images/tutorial-optimize-tables-ssb-data-model.png) ## 3.1 Create tables (no distribution strategy) in the `nodist` schema ``` %%sql CREATE SCHEMA IF NOT EXISTS nodist; SET search_path TO nodist; DROP TABLE IF EXISTS part cascade; DROP TABLE IF EXISTS supplier; DROP TABLE IF EXISTS supplier; DROP TABLE IF EXISTS customer; DROP TABLE IF EXISTS dwdate; DROP TABLE IF EXISTS lineorder; CREATE TABLE part ( p_partkey INTEGER NOT NULL, p_name VARCHAR(22) NOT NULL, p_mfgr VARCHAR(6) NOT NULL, p_category VARCHAR(7) NOT NULL, p_brand1 VARCHAR(9) NOT NULL, p_color VARCHAR(11) NOT NULL, p_type VARCHAR(25) NOT NULL, p_size INTEGER NOT NULL, p_container VARCHAR(10) NOT NULL ); CREATE TABLE supplier ( s_suppkey INTEGER NOT NULL, s_name VARCHAR(25) NOT NULL, s_address VARCHAR(25) NOT NULL, s_city VARCHAR(10) NOT NULL, s_nation VARCHAR(15) NOT NULL, s_region VARCHAR(12) NOT NULL, s_phone VARCHAR(15) NOT NULL ); CREATE TABLE customer ( c_custkey INTEGER NOT NULL, c_name VARCHAR(25) NOT NULL, c_address VARCHAR(25) NOT NULL, c_city VARCHAR(10) NOT NULL, c_nation VARCHAR(15) NOT NULL, c_region VARCHAR(12) NOT NULL, c_phone VARCHAR(15) NOT NULL, c_mktsegment VARCHAR(10) NOT NULL ); CREATE TABLE dwdate ( d_datekey INTEGER NOT NULL, d_date VARCHAR(19) NOT NULL, d_dayofweek VARCHAR(10) NOT NULL, d_month VARCHAR(10) NOT NULL, d_year INTEGER NOT NULL, d_yearmonthnum INTEGER NOT NULL, d_yearmonth VARCHAR(8) NOT NULL, d_daynuminweek INTEGER NOT NULL, d_daynuminmonth INTEGER NOT NULL, d_daynuminyear INTEGER NOT NULL, d_monthnuminyear INTEGER NOT NULL, d_weeknuminyear INTEGER NOT NULL, d_sellingseason VARCHAR(13) NOT NULL, d_lastdayinweekfl VARCHAR(1) NOT NULL, d_lastdayinmonthfl VARCHAR(1) NOT NULL, d_holidayfl VARCHAR(1) NOT NULL, d_weekdayfl VARCHAR(1) NOT NULL ); CREATE TABLE lineorder ( lo_orderkey INTEGER NOT NULL, lo_linenumber INTEGER NOT NULL, lo_custkey INTEGER NOT NULL, lo_partkey INTEGER NOT NULL, lo_suppkey INTEGER NOT NULL, lo_orderdate INTEGER NOT NULL, lo_orderpriority VARCHAR(15) NOT NULL, lo_shippriority VARCHAR(1) NOT NULL, lo_quantity INTEGER NOT NULL, lo_extendedprice INTEGER NOT NULL, lo_ordertotalprice INTEGER NOT NULL, lo_discount INTEGER NOT NULL, lo_revenue INTEGER NOT NULL, lo_supplycost INTEGER NOT NULL, lo_tax INTEGER NOT NULL, lo_commitdate INTEGER NOT NULL, lo_shipmode VARCHAR(10) NOT NULL ); ``` ## 3.1 Create tables (with a distribution strategy) in the `dist` schema ``` %%sql CREATE SCHEMA IF NOT EXISTS dist; SET search_path TO dist; DROP TABLE IF EXISTS part cascade; DROP TABLE IF EXISTS supplier; DROP TABLE IF EXISTS supplier; DROP TABLE IF EXISTS customer; DROP TABLE IF EXISTS dwdate; DROP TABLE IF EXISTS lineorder; CREATE TABLE part ( p_partkey integer not null sortkey distkey, p_name varchar(22) not null, p_mfgr varchar(6) not null, p_category varchar(7) not null, p_brand1 varchar(9) not null, p_color varchar(11) not null, p_type varchar(25) not null, p_size integer not null, p_container varchar(10) not null ); CREATE TABLE supplier ( s_suppkey integer not null sortkey, s_name varchar(25) not null, s_address varchar(25) not null, s_city varchar(10) not null, s_nation varchar(15) not null, s_region varchar(12) not null, s_phone varchar(15) not null) diststyle all; CREATE TABLE customer ( c_custkey integer not null sortkey, c_name varchar(25) not null, c_address varchar(25) not null, c_city varchar(10) not null, c_nation varchar(15) not null, c_region varchar(12) not null, c_phone varchar(15) not null, c_mktsegment varchar(10) not null) diststyle all; CREATE TABLE dwdate ( d_datekey integer not null sortkey, d_date varchar(19) not null, d_dayofweek varchar(10) not null, d_month varchar(10) not null, d_year integer not null, d_yearmonthnum integer not null, d_yearmonth varchar(8) not null, d_daynuminweek integer not null, d_daynuminmonth integer not null, d_daynuminyear integer not null, d_monthnuminyear integer not null, d_weeknuminyear integer not null, d_sellingseason varchar(13) not null, d_lastdayinweekfl varchar(1) not null, d_lastdayinmonthfl varchar(1) not null, d_holidayfl varchar(1) not null, d_weekdayfl varchar(1) not null) diststyle all; CREATE TABLE lineorder ( lo_orderkey integer not null, lo_linenumber integer not null, lo_custkey integer not null, lo_partkey integer not null distkey, lo_suppkey integer not null, lo_orderdate integer not null sortkey, lo_orderpriority varchar(15) not null, lo_shippriority varchar(1) not null, lo_quantity integer not null, lo_extendedprice integer not null, lo_ordertotalprice integer not null, lo_discount integer not null, lo_revenue integer not null, lo_supplycost integer not null, lo_tax integer not null, lo_commitdate integer not null, lo_shipmode varchar(10) not null ); ``` # STEP 4: Copying tables Our intent here is to run 5 COPY operations for the 5 tables respectively as show below. However, we want to do accomplish the following: - Make sure that the `DWH_ROLE_ARN` is substituted with the correct value in each query - Perform the data loading twice once for each schema (dist and nodist) - Collect timing statistics to compare the insertion times Thus, we have scripted the insertion as found below in the function `loadTables` which returns a pandas dataframe containing timing statistics for the copy operations ```sql copy customer from 's3://awssampledbuswest2/ssbgz/customer' credentials 'aws_iam_role=<DWH_ROLE_ARN>' gzip region 'us-west-2'; copy dwdate from 's3://awssampledbuswest2/ssbgz/dwdate' credentials 'aws_iam_role=<DWH_ROLE_ARN>' gzip region 'us-west-2'; copy lineorder from 's3://awssampledbuswest2/ssbgz/lineorder' credentials 'aws_iam_role=<DWH_ROLE_ARN>' gzip region 'us-west-2'; copy part from 's3://awssampledbuswest2/ssbgz/part' credentials 'aws_iam_role=<DWH_ROLE_ARN>' gzip region 'us-west-2'; copy supplier from 's3://awssampledbuswest2/ssbgz/supplier' credentials 'aws_iam_role=<DWH_ROLE_ARN>' gzip region 'us-west-2'; ``` ## 4.1 Automate the copying ``` def loadTables(schema, tables): loadTimes = [] SQL_SET_SCEMA = "SET search_path TO {};".format(schema) %sql $SQL_SET_SCEMA for table in tables: SQL_COPY = """ copy {} from 's3://awssampledbuswest2/ssbgz/{}' credentials 'aws_iam_role={}' gzip region 'us-west-2'; """.format(table,table, DWH_ROLE_ARN) print("======= LOADING TABLE: ** {} ** IN SCHEMA ==> {} =======".format(table, schema)) print(SQL_COPY) t0 = time() %sql $SQL_COPY loadTime = time()-t0 loadTimes.append(loadTime) print("=== DONE IN: {0:.2f} sec\n".format(loadTime)) return pd.DataFrame({"table":tables, "loadtime_"+schema:loadTimes}).set_index('table') #-- List of the tables to be loaded tables = ["customer","dwdate","supplier", "part", "lineorder"] #-- Insertion twice for each schema (WARNING!! EACH CAN TAKE MORE THAN 10 MINUTES!!!) nodistStats = loadTables("nodist", tables) distStats = loadTables("dist", tables) ``` ## 4.1 Compare the load performance results ``` #-- Plotting of the timing results stats = distStats.join(nodistStats) stats.plot.bar() plt.show() ``` # STEP 5: Compare Query Performance ``` oneDim_SQL =""" set enable_result_cache_for_session to off; SET search_path TO {}; select sum(lo_extendedprice*lo_discount) as revenue from lineorder, dwdate where lo_orderdate = d_datekey and d_year = 1997 and lo_discount between 1 and 3 and lo_quantity < 24; """ twoDim_SQL=""" set enable_result_cache_for_session to off; SET search_path TO {}; select sum(lo_revenue), d_year, p_brand1 from lineorder, dwdate, part, supplier where lo_orderdate = d_datekey and lo_partkey = p_partkey and lo_suppkey = s_suppkey and p_category = 'MFGR#12' and s_region = 'AMERICA' group by d_year, p_brand1 """ drill_SQL = """ set enable_result_cache_for_session to off; SET search_path TO {}; select c_city, s_city, d_year, sum(lo_revenue) as revenue from customer, lineorder, supplier, dwdate where lo_custkey = c_custkey and lo_suppkey = s_suppkey and lo_orderdate = d_datekey and (c_city='UNITED KI1' or c_city='UNITED KI5') and (s_city='UNITED KI1' or s_city='UNITED KI5') and d_yearmonth = 'Dec1997' group by c_city, s_city, d_year order by d_year asc, revenue desc; """ oneDimSameDist_SQL =""" set enable_result_cache_for_session to off; SET search_path TO {}; select lo_orderdate, sum(lo_extendedprice*lo_discount) as revenue from lineorder, part where lo_partkey = p_partkey group by lo_orderdate order by lo_orderdate """ def compareQueryTimes(schema): queryTimes =[] for i,query in enumerate([oneDim_SQL, twoDim_SQL, drill_SQL, oneDimSameDist_SQL]): t0 = time() q = query.format(schema) %sql $q queryTime = time()-t0 queryTimes.append(queryTime) return pd.DataFrame({"query":["oneDim","twoDim", "drill", "oneDimSameDist"], "queryTime_"+schema:queryTimes}).set_index('query') noDistQueryTimes = compareQueryTimes("nodist") distQueryTimes = compareQueryTimes("dist") queryTimeDF =noDistQueryTimes.join(distQueryTimes) queryTimeDF.plot.bar() plt.show() improvementDF = queryTimeDF["distImprovement"] =100.0*(queryTimeDF['queryTime_nodist']-queryTimeDF['queryTime_dist'])/queryTimeDF['queryTime_nodist'] improvementDF.plot.bar(title="% dist Improvement by query") plt.show() ```
github_jupyter
# Naive Bayes on Abalone Dataset ``` # importing all necessary packages and functions import pandas as pd import numpy as np import math as m from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') # kernel option to see output of multiple code lines from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" def df_split(dataframe, training_samples): df_training, df_validation = train_test_split(dataframe, train_size=training_samples) return df_training, df_validation def convert_to_sex_prob_1(x): if x == 'F': return sex_prob.loc['F'][1] elif x == 'M': return sex_prob.loc['M'][1] elif x == 'I': return sex_prob.loc['I'][1] else: return 'N/A' def convert_to_sex_prob_2(x): if x == 'F': return sex_prob.loc['F'][2] elif x == 'M': return sex_prob.loc['M'][2] elif x == 'I': return sex_prob.loc['I'][2] else: return 'N/A' def convert_to_sex_prob_3(x): if x == 'F': return sex_prob.loc['F'][3] elif x == 'M': return sex_prob.loc['M'][3] elif x == 'I': return sex_prob.loc['I'][3] else: return 'N/A' df = pd.read_table("abalone_dataset.txt", sep="\t", header=None) df.columns = ["sex", "length", "diameter", "height", "whole_weight", "shucked_weight", "viscera_weight", "shell_weight", "age_class"] ``` ### We decide the training samples for different cases (100, 1000, 2000): ``` # only input before clicking Run All training_samples = 2000 df_train = df_split(df, training_samples)[0] df_test = df_split(df, training_samples)[1] df_train1 = df_train[df_train.age_class==1] df_train2 = df_train[df_train.age_class==2] df_train3 = df_train[df_train.age_class==3] stat_table_1 = df_train1.describe()[1:3].transpose() stat_table_2 = df_train2.describe()[1:3].transpose() stat_table_3 = df_train3.describe()[1:3].transpose() stat_table_1 = stat_table_1.add_prefix('class1_') stat_table_2 = stat_table_2.add_prefix('class2_') stat_table_3 = stat_table_3.add_prefix('class3_') frames = [stat_table_1, stat_table_2, stat_table_3] stat_table = pd.concat(frames, axis=1, join_axes=[stat_table_1.index]) stat_table = stat_table.transpose() age_prob = pd.DataFrame([len(df_train[df_train.age_class==1])/len(df_train), len(df_train[df_train.age_class==2])/len(df_train), len(df_train[df_train.age_class==3])/len(df_train)], index=['1','2','3'], columns=['probability']) sex_prob = pd.crosstab(df_train.sex, df_train.age_class, normalize='columns') df_test['age_prob_1'],df_test['age_prob_2'],df_test['age_prob_3'] = [0,0,0] df_test['sex_1'],df_test['sex_2'],df_test['sex_3'] = df_test.sex,df_test.sex,df_test.sex df_test['length_1'],df_test['length_2'],df_test['length_3'] = df_test.length,df_test.length,df_test.length df_test['diameter_1'],df_test['diameter_2'],df_test['diameter_3'] = df_test.diameter,df_test.diameter,df_test.diameter df_test['height_1'],df_test['height_2'],df_test['height_3'] = df_test.height,df_test.height,df_test.height df_test['whole_weight_1'],df_test['whole_weight_2'],df_test['whole_weight_3'] = df_test.whole_weight,df_test.whole_weight,df_test.whole_weight df_test['shucked_weight_1'],df_test['shucked_weight_2'],df_test['shucked_weight_3'] = df_test.shucked_weight,df_test.shucked_weight,df_test.shucked_weight df_test['viscera_weight_1'],df_test['viscera_weight_2'],df_test['viscera_weight_3'] = df_test.viscera_weight,df_test.viscera_weight,df_test.viscera_weight df_test['shell_weight_1'],df_test['shell_weight_2'],df_test['shell_weight_3'] = df_test.shell_weight,df_test.shell_weight,df_test.shell_weight df_test.age_prob_1 = df_test.age_prob_1.apply(lambda x: age_prob.loc['1']) df_test.age_prob_2 = df_test.age_prob_2.apply(lambda x: age_prob.loc['2']) df_test.age_prob_3 = df_test.age_prob_3.apply(lambda x: age_prob.loc['3']) df_test.sex_1 = df_test.sex_1.apply(convert_to_sex_prob_1) df_test.sex_2 = df_test.sex_2.apply(convert_to_sex_prob_2) df_test.sex_3 = df_test.sex_3.apply(convert_to_sex_prob_3) df_test.length_1 = df_test.length_1.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class1_std']['length'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class1_mean']['length'],2)/(2*m.pow(stat_table.loc['class1_std']['length'],2))))) df_test.length_2 = df_test.length_2.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class2_std']['length'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class2_mean']['length'],2)/(2*m.pow(stat_table.loc['class2_std']['length'],2))))) df_test.length_3 = df_test.length_3.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class3_std']['length'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class3_mean']['length'],2)/(2*m.pow(stat_table.loc['class3_std']['length'],2))))) df_test.diameter_1 = df_test.diameter_1.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class1_std']['diameter'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class1_mean']['diameter'],2)/(2*m.pow(stat_table.loc['class1_std']['diameter'],2))))) df_test.diameter_2 = df_test.diameter_2.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class2_std']['diameter'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class2_mean']['diameter'],2)/(2*m.pow(stat_table.loc['class2_std']['diameter'],2))))) df_test.diameter_3 = df_test.diameter_3.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class3_std']['diameter'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class3_mean']['diameter'],2)/(2*m.pow(stat_table.loc['class3_std']['diameter'],2))))) df_test.height_1 = df_test.height_1.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class1_std']['height'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class1_mean']['height'],2)/(2*m.pow(stat_table.loc['class1_std']['height'],2))))) df_test.height_2 = df_test.height_2.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class2_std']['height'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class2_mean']['height'],2)/(2*m.pow(stat_table.loc['class2_std']['height'],2))))) df_test.height_3 = df_test.height_3.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class3_std']['height'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class3_mean']['height'],2)/(2*m.pow(stat_table.loc['class3_std']['height'],2))))) df_test.whole_weight_1 = df_test.whole_weight_1.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class1_std']['whole_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class1_mean']['whole_weight'],2)/(2*m.pow(stat_table.loc['class1_std']['whole_weight'],2))))) df_test.whole_weight_2 = df_test.whole_weight_2.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class2_std']['whole_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class2_mean']['whole_weight'],2)/(2*m.pow(stat_table.loc['class2_std']['whole_weight'],2))))) df_test.whole_weight_3 = df_test.whole_weight_3.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class3_std']['whole_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class3_mean']['whole_weight'],2)/(2*m.pow(stat_table.loc['class3_std']['whole_weight'],2))))) df_test.shucked_weight_1 = df_test.shucked_weight_1.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class1_std']['shucked_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class1_mean']['shucked_weight'],2)/(2*m.pow(stat_table.loc['class1_std']['shucked_weight'],2))))) df_test.shucked_weight_2 = df_test.shucked_weight_2.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class2_std']['shucked_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class2_mean']['shucked_weight'],2)/(2*m.pow(stat_table.loc['class2_std']['shucked_weight'],2))))) df_test.shucked_weight_3 = df_test.shucked_weight_3.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class3_std']['shucked_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class3_mean']['shucked_weight'],2)/(2*m.pow(stat_table.loc['class3_std']['shucked_weight'],2))))) df_test.viscera_weight_1 = df_test.viscera_weight_1.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class1_std']['viscera_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class1_mean']['viscera_weight'],2)/(2*m.pow(stat_table.loc['class1_std']['viscera_weight'],2))))) df_test.viscera_weight_2 = df_test.viscera_weight_2.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class2_std']['viscera_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class2_mean']['viscera_weight'],2)/(2*m.pow(stat_table.loc['class2_std']['viscera_weight'],2))))) df_test.viscera_weight_3 = df_test.viscera_weight_3.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class3_std']['viscera_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class3_mean']['viscera_weight'],2)/(2*m.pow(stat_table.loc['class3_std']['viscera_weight'],2))))) df_test.shell_weight_1 = df_test.shell_weight_1.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class1_std']['shell_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class1_mean']['shell_weight'],2)/(2*m.pow(stat_table.loc['class1_std']['shell_weight'],2))))) df_test.shell_weight_2 = df_test.shell_weight_2.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class2_std']['shell_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class2_mean']['shell_weight'],2)/(2*m.pow(stat_table.loc['class2_std']['shell_weight'],2))))) df_test.shell_weight_3 = df_test.shell_weight_3.apply(lambda x: (1 / (m.sqrt(2*m.pi*m.pow(stat_table.loc['class3_std']['shell_weight'],2)))) * m.exp(-(m.pow(x-stat_table.loc['class3_mean']['shell_weight'],2)/(2*m.pow(stat_table.loc['class3_std']['shell_weight'],2))))) df_test['class_prob_1_vars_3'] = df_test.age_prob_1 * df_test.sex_1 * df_test.length_1 * df_test.diameter_1 df_test['class_prob_1_vars_8'] = df_test.age_prob_1 * df_test.sex_1 * df_test.length_1 * df_test.diameter_1 * df_test.height_1 * df_test.whole_weight_1 * df_test.shucked_weight_1 * df_test.viscera_weight_1 * df_test.shell_weight_1 df_test['class_prob_2_vars_3'] = df_test.age_prob_2 * df_test.sex_2 * df_test.length_2 * df_test.diameter_2 df_test['class_prob_2_vars_8'] = df_test.age_prob_2 * df_test.sex_2 * df_test.length_2 * df_test.diameter_2 * df_test.height_2 * df_test.whole_weight_2 * df_test.shucked_weight_2 * df_test.viscera_weight_2 * df_test.shell_weight_2 df_test['class_prob_3_vars_3'] = df_test.age_prob_3 * df_test.sex_3 * df_test.length_3 * df_test.diameter_3 df_test['class_prob_3_vars_8'] = df_test.age_prob_3 * df_test.sex_3 * df_test.length_3 * df_test.diameter_3 * df_test.height_3 * df_test.whole_weight_3 * df_test.shucked_weight_3 * df_test.viscera_weight_3 * df_test.shell_weight_3 conditions_3 = [ (df_test['class_prob_1_vars_3'] >= df_test['class_prob_2_vars_3']) & (df_test['class_prob_1_vars_3'] >= df_test['class_prob_3_vars_3']), (df_test['class_prob_2_vars_3'] >= df_test['class_prob_1_vars_3']) & (df_test['class_prob_2_vars_3'] >= df_test['class_prob_3_vars_3'])] choices_3 = [1, 2] df_test['vars_3_guess'] = np.select(conditions_3, choices_3, default=3) conditions_8 = [ (df_test['class_prob_1_vars_8'] >= df_test['class_prob_2_vars_8']) & (df_test['class_prob_1_vars_8'] >= df_test['class_prob_3_vars_8']), (df_test['class_prob_2_vars_8'] >= df_test['class_prob_3_vars_8'])] choices_8 = [1, 2] df_test['vars_8_guess'] = np.select(conditions_8, choices_8, default=3) print ('Accuracy with {} samples and 3 variables'.format(training_samples)) len(df_test[df_test.age_class == df_test.vars_3_guess])/len(df_test) print ('Accuracy with {} samples and 8 variables'.format(training_samples)) len(df_test[df_test.age_class == df_test.vars_8_guess])/len(df_test) print ('Confusion matrix with {} samples and 3 variables'.format(training_samples)) cm3 = {'Matrix': ['Guessed 1', 'Guessed 2', 'Guessed 3'], 'Actual Value 1': [len(df_test[(df_test.age_class==1) & (df_test.vars_3_guess==1)]), len(df_test[(df_test.age_class==1) & (df_test.vars_3_guess==2)]), len(df_test[(df_test.age_class==1) & (df_test.vars_3_guess==3)])], 'Actual Value 2': [len(df_test[(df_test.age_class==2) & (df_test.vars_3_guess==1)]), len(df_test[(df_test.age_class==2) & (df_test.vars_3_guess==2)]), len(df_test[(df_test.age_class==2) & (df_test.vars_3_guess==3)])], 'Actual Value 3': [len(df_test[(df_test.age_class==3) & (df_test.vars_3_guess==1)]), len(df_test[(df_test.age_class==3) & (df_test.vars_3_guess==2)]), len(df_test[(df_test.age_class==3) & (df_test.vars_3_guess==3)])]} cm3 = pd.DataFrame.from_dict(cm3) cm3 print ('Total misclassification errors: {}'.format(len(df_test)-cm3.iloc[0][1]-cm3.iloc[1][2]-cm3.iloc[2][3])) print ('Confusion matrix with {} samples and 8 variables'.format(training_samples)) cm8 = {'Matrix': ['Guessed 1', 'Guessed 2', 'Guessed 3'], 'Actual Value 1': [len(df_test[(df_test.age_class==1) & (df_test.vars_8_guess==1)]), len(df_test[(df_test.age_class==1) & (df_test.vars_8_guess==2)]), len(df_test[(df_test.age_class==1) & (df_test.vars_8_guess==3)])], 'Actual Value 2': [len(df_test[(df_test.age_class==2) & (df_test.vars_8_guess==1)]), len(df_test[(df_test.age_class==2) & (df_test.vars_8_guess==2)]), len(df_test[(df_test.age_class==2) & (df_test.vars_8_guess==3)])], 'Actual Value 3': [len(df_test[(df_test.age_class==3) & (df_test.vars_8_guess==1)]), len(df_test[(df_test.age_class==3) & (df_test.vars_8_guess==2)]), len(df_test[(df_test.age_class==3) & (df_test.vars_8_guess==3)])]} cm8 = pd.DataFrame.from_dict(cm8) cm8 print ('Total misclassification errors: {}'.format(len(df_test)-cm8.iloc[0][1]-cm8.iloc[1][2]-cm8.iloc[2][3])) ``` ## References https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_table.html<br> https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html<br> https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html<br> http://www.inf.ed.ac.uk/teaching/courses/inf2b/learnnotes/inf2b-learn-note09-2up.pdf<br> http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.add_prefix.html<br>
github_jupyter
``` import pandas as pd data = pd.read_csv('./data.csv') X,y = data.drop('target',axis=1),data['target'] from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25) import torch import torch.nn as nn import numpy as np X_train = torch.from_numpy(np.array(X_train).astype(np.float32)) y_train = torch.from_numpy(np.array(y_train).astype(np.float32)) X_test = torch.from_numpy(np.array(X_test).astype(np.float32)) y_test = torch.from_numpy(np.array(y_test).astype(np.float32)) X_train.shape X_test.shape y_train.shape y_test.shape import torch.nn.functional as F class Test_Model(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(13,64) self.fc2 = nn.Linear(64,128) self.fc3 = nn.Linear(128,256) self.fc4 = nn.Linear(256,512) self.fc5 = nn.Linear(512,1024) self.fc6 = nn.Linear(1024,512) self.fc7 = nn.Linear(512,1) def forward(self,X): preds = self.fc1(X) preds = F.relu(preds) preds = self.fc2(preds) preds = F.relu(preds) preds = self.fc3(preds) preds = F.relu(preds) preds = self.fc4(preds) preds = F.relu(preds) preds = self.fc5(preds) preds = F.relu(preds) preds = self.fc6(preds) preds = F.relu(preds) preds = self.fc7(preds) return F.sigmoid(preds) device = torch.device('cuda') X_train = X_train.to(device) y_train = y_train.to(device) X_test = X_test.to(device) y_test = y_test.to(device) PROJECT_NAME = 'Heart-Disease-UCI' def get_loss(criterion,X,y,model): model.eval() with torch.no_grad(): preds = model(X.float().to(device)) preds = preds.view(len(preds),).to(device) y = y.view(len(y),).to(device) loss = criterion(preds,y) model.train() return loss.item() def get_accuracy(preds,y): correct = 0 total = 0 for real,pred in zip(y_train,preds): if real == pred: correct += 1 total += 1 return round(correct/total,3) import wandb from tqdm import tqdm EPOCHS = 212 # EPOCHS = 100 # model = Test_Model().to(device) # optimizer = torch.optim.SGD(model.parameters(),lr=0.25) # criterion = nn.L1Loss() # wandb.init(project=PROJECT_NAME,name='baseline') # for _ in tqdm(range(EPOCHS)): # preds = model(X_train.float().to(device)) # preds = preds.view(len(preds),) # preds.to(device) # loss = criterion(preds,y_train) # optimizer.zero_grad() # loss.backward() # optimizer.step() # wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(X_train,y_train,model),'val_accuracy':get_accuracy(X_test,y_test,model)}) # wandb.finish() # preds[:10] # preds = torch.round(preds) # correct = 0 # total = 0 # for real,pred in zip(y_train,preds): # if real == pred: # correct += 1 # # total += 1 # round(correct/total,3) ## Testing Modelling import torch import torch.nn as nn class Test_Model(nn.Module): def __init__(self,num_of_layers=1,activation=F.relu,input_shape=13,fc1_output=32,fc2_output=64,fc3_output=128,fc4_output=256,output_shape=1): super().__init__() self.num_of_layers = num_of_layers self.activation = activation self.fc1 = nn.Linear(input_shape,fc1_output) self.fc2 = nn.Linear(fc1_output,fc2_output) self.fc3 = nn.Linear(fc2_output,fc3_output) self.fc4 = nn.Linear(fc3_output,fc4_output) self.fc5 = nn.Linear(fc4_output,fc3_output) self.fc6 = nn.Linear(fc3_output,fc3_output) self.fc7 = nn.Linear(fc3_output,output_shape) def forward(self,X,activation=False): preds = self.fc1(X) if activation: preds = self.activation(preds) preds = self.fc2(preds) if activation: preds = self.activation(preds) preds = self.fc3(preds) if activation: preds = self.activation(preds) preds = self.fc4(preds) if activation: preds = self.activation(preds) preds = self.fc5(preds) if activation: preds = self.activation(preds) for _ in range(self.num_of_layers): preds = self.fc6(preds) if activation: preds = self.activation(preds) preds = self.fc7(preds) preds = F.sigmoid(preds) return preds device = torch.device('cuda') # preds = torch.round(preds) # num_of_layers = 1 # activation # input_shape # fc1_output # fc2_output # fc3_output # fc4_output # output_shape # optimizer # criterion # lr # activtion activations = [nn.ELU(),nn.LeakyReLU(),nn.PReLU(),nn.ReLU(),nn.ReLU6(),nn.RReLU(),nn.SELU(),nn.CELU(),nn.GELU(),nn.SiLU(),nn.Tanh()] for activation in activations: model = Test_Model(num_of_layers=1,activation=activation).to(device) model.to(device) optimizer = torch.optim.Adam(model.parameters(),lr=0.25) criterion = nn.BCELoss() wandb.init(project=PROJECT_NAME,name=f'activation-{activation}') for _ in tqdm(range(212)): preds = model(X_train.float().to(device),True) preds = preds.view(len(preds),) preds.to(device) loss = criterion(preds,y_train) optimizer.zero_grad() loss.backward() optimizer.step() wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(preds,y_train)}) wandb.finish() ```
github_jupyter
# All Models Test Load the right file and rull all ``` import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, BayesianRidge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error from math import sqrt from sklearn.svm import SVR import keras import keras.backend as kb import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers.experimental import preprocessing from sklearn.metrics import r2_score def printScores(reg, X_test, y_test): regName = reg.__class__().__str__()[:reg.__class__().__str__().index("(")] print("\nR2 score : {} = {}".format(regName, reg.score(X_test, y_test))) print("RMSE : {} = {}".format(regName, sqrt(mean_squared_error(y_test, reg.predict(X_test))))) def run_models(X, y, nn_input): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=True, random_state=42) lReg = LinearRegression() lReg.fit(X_train, y_train) printScores(lReg, X_test, y_test) # print("\nR2 score : Linear Regressor =",lReg.score(X_test, y_test)) # from sklearn.preprocessing import PolynomialFeatures # pReg = PolynomialFeatures(degree = 4) # X_poly = pReg.fit_transform(X_train) # lin_reg = LinearRegression() # lin_reg.fit(X_poly, y_train) brReg = BayesianRidge() brReg.fit(X_train, y_train) printScores(brReg, X_test, y_test) # print("\nR2 score : BayesianRidge =", brReg.score(X_test, y_test)) dtReg = DecisionTreeRegressor() # print(dtReg) dtReg.fit(X_train, y_train) printScores(dtReg, X_test, y_test) # print("\nR2 score : DecisionTreeRegressor =",dtReg.score(X_test, y_test)) rfReg = RandomForestRegressor(n_estimators = 10, random_state = 0) rfReg.fit(X_train, y_train) printScores(rfReg, X_test, y_test) # print("\nR2 score : RandomForestRegressor =",rfReg.score(X_test, y_test)) svReg = SVR(kernel = 'rbf', gamma='scale') svReg.fit(X_train, y_train) printScores(svReg, X_test, y_test) # print("\nR2 score : RandomForestRegressor =",rfReg.score(X_test, y_test)) # NN model = keras.Sequential([ keras.layers.Dense(134, activation=tf.nn.relu, input_shape=[nn_input]), keras.layers.Dense(134, activation=tf.nn.relu), keras.layers.Dense(134, activation=tf.nn.relu), keras.layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.0099) # model = tf.keras.models.load_model('./ipl_model_tf') model.compile(loss="mean_squared_error",optimizer=optimizer) model.fit(X_train,y_train,epochs=500, validation_split=0.2, shuffle=True, verbose=0) model.save('./ipl_model_tf',save_format='tf') regName = 'Neural Network' print("\nR2 score : {} = {}".format(regName, r2_score(model.predict(X_test), y_test))) print("RMSE : {} = {}".format(regName, sqrt(mean_squared_error(y_test, model.predict(X_test))))) # Visualising the Decision Tree Regression Results plt.figure(figsize=(15,10)) X_grid = np.arange(0, len(X)) #.reshape(-1, 1) plt.scatter(X_grid, y, color = 'black') plt.plot(X_grid, lReg.predict(X), color = 'red') # plt.plot(X_grid, dtReg.predict(X), color = 'green') plt.plot(X_grid, rfReg.predict(X), color = 'blue') plt.plot(X_grid, brReg.predict(X), color = 'pink') plt.plot(X_grid, svReg.predict(X), color = 'yellow') plt.plot(X_grid, model.predict(X), color = 'cyan', marker='x', lineWidth=0) plt.title('All Models') plt.xlabel('index number') plt.ylabel('Revenue') plt.show() def do_traning(data=None, filename=None): if(data is None and filename is None): raise Exception("Atleast provide data or filename") if(data is None): data = pd.read_csv('./dt/feature_engg_1.csv', header=0) df = data.copy() # df.sample(5) print("Data has shape ", df.shape) y = df['target'] print('y_shape is ', y.shape) X = df.drop('target', axis=1) print('x_shape is ',X.shape) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, shuffle=True, random_state=42) run_models(X, y, X.shape[1]) # saving the model in tensorflow format # model.save('./MyModel_tf',save_format='tf') # loading the saved model # loaded_model = tf.keras.models.load_model('./MyModel_tf') # retraining the model # loaded_model.fit(X_train,y_train,epochs=500) ```
github_jupyter
``` import pickle import pandas as pd import numpy as np import os, sys, gc from plotnine import * import plotnine from tqdm import tqdm_notebook import seaborn as sns import warnings import matplotlib.pyplot as plt import matplotlib.font_manager as fm import matplotlib as mpl from matplotlib import rc import re from matplotlib.ticker import PercentFormatter import datetime from math import log # IDF 계산을 위해 path = 'C:/Users/User/Documents/T아카데미/T 아카데미/input/' # pd.read_json : json 형태의 파일을 dataframe 형태로 불러오는 코드 magazine = pd.read_json(path + 'magazine.json', lines=True) # lines = True : Read the file as a json object per line. metadata = pd.read_json(path + 'metadata.json', lines=True) users = pd.read_json(path + 'users.json', lines=True) %%time import itertools from itertools import chain import glob import os input_read_path = path + 'read/read/' # os.listdir : 해당 경로에 있는 모든 파일들을 불러오는 명령어 file_list = os.listdir(input_read_path) exclude_file_lst = ['read.tar', '.2019010120_2019010121.un~'] read_df_list = [] for file in tqdm_notebook(file_list): # 예외처리 if file in exclude_file_lst: continue else: file_path = input_read_path + file df_temp = pd.read_csv(file_path, header=None, names=['raw']) # file명을 통해서 읽은 시간을 추출(from, to) df_temp['from'] = file.split('_')[0] df_temp['to'] = file.split('_')[1] read_df_list.append(df_temp) read_df = pd.concat(read_df_list) # reads 파일을 전처리해서 row 당 user - article이 1:1이 되도록 수정 read_df['user_id'] = read_df['raw'].apply(lambda x: x.split(' ')[0]) read_df['article_id'] = read_df['raw'].apply(lambda x: x.split(' ')[1:]) def chainer(s): return list(itertools.chain.from_iterable(s)) read_cnt_by_user = read_df['article_id'].map(len) read_rowwise = pd.DataFrame({'from': np.repeat(read_df['from'], read_cnt_by_user), 'to': np.repeat(read_df['to'], read_cnt_by_user), 'user_id': np.repeat(read_df['user_id'], read_cnt_by_user), 'article_id': chainer(read_df['article_id'])}) read_rowwise.reset_index(drop=True, inplace=True) from datetime import datetime metadata['reg_datetime'] = metadata['reg_ts'].apply(lambda x : datetime.fromtimestamp(x/1000.0)) metadata.loc[metadata['reg_datetime'] == metadata['reg_datetime'].min(), 'reg_datetime'] = datetime(2090, 12, 31) metadata['reg_dt'] = metadata['reg_datetime'].dt.date metadata['type'] = metadata['magazine_id'].apply(lambda x : '개인' if x == 0.0 else '매거진') metadata['reg_dt'] = pd.to_datetime(metadata['reg_dt']) read_rowwise = read_rowwise.merge(metadata[['id', 'reg_dt']], how='left', left_on='article_id', right_on='id') read_rowwise = read_rowwise[read_rowwise['article_id'] != ''] # 사용자가 읽은 글의 목록들을 저장 read_total = pd.DataFrame(read_rowwise.groupby(['user_id'])['article_id'].unique()).reset_index() read_total.columns = ['user_id', 'article_list'] ``` ## 콘텐츠 기반의 추천시스템 - Model의 단어를 이용한 방식 - TF-IDF 형식 - index : 문서의 아이디 - column : 단어 하지만, 문서가 총 64만개로 너무 많고 data.0의 파일을 읽어보면 단어 또한 너무 많아서 사용하기가 어려운 상황 ### 해결방식 위와 같은 문제를 해결하기 위해서 해당 대회의 1등팀인 NAFMA팀은 글의 키워드를 활용해서 Embedding을 구성 - 참고자료 : https://github.com/JungoKim/brunch_nafma ``` from sklearn.feature_extraction.text import TfidfVectorizer metadata = metadata[metadata['keyword_list'].notnull()].reset_index() metadata = metadata[metadata['reg_dt'] >= '2019-01-01'] article2idx = {} for i, l in enumerate(metadata['id'].unique()): article2idx[l] = i idx2article = {i: item for item, i in article2idx.items()} articleidx = metadata['articleidx'] = metadata['id'].apply(lambda x: article2idx[x]).values import scipy docs = metadata['keyword_list'].apply(lambda x: ' '.join(x)).values tfidv = TfidfVectorizer(use_idf=True, smooth_idf=False, norm=None).fit(docs) tfidv_df = scipy.sparse.csr_matrix(tfidv.transform(docs)) tfidv_df = tfidv_df.astype(np.float32) print(tfidv_df.shape) ``` 데이터가 Sparse 형태인 것을 확인할 수 있음 ``` from sklearn.metrics.pairwise import cosine_similarity # 메모리 문제 발생 cos_sim = cosine_similarity(tfidv_df, tfidv_df) valid = pd.read_csv(path + '/predict/predict/dev.users', header=None) %%time popular_rec_model = read_rowwise['article_id'].value_counts().index[0:100] top_n = 100 with open('./recommend.txt', 'w') as f: for user in tqdm_notebook(valid[0].values): seen = chainer(read_total[read_total['user_id'] == user]['article_list']) for seen_id in seen: # 2019년도 이전에 읽어서 혹은 메타데이터에 글이 없어서 유사도 계산이 안된 글 cos_sim_sum = np.zeros(len(cos_sim)) try: cos_sim_sum += cos_sim[article2idx[seen_id]] except: pass recs = [] for rec in cos_sim_sum.argsort()[-(top_n+100):][::-1]: if (idx2article[rec] not in seen) & (len(recs) < 100): recs.append(idx2article[rec]) f.write('%s %s\n' % (user, ' '.join(recs[0:100]))) ``` ![](https://github.com/choco9966/T-academy-Recommendation/blob/master/figure/Contents_Based_Score.PNG?raw=true)
github_jupyter
# Python Functions ``` import numpy as np ``` ## Custom functions ### Anatomy name, arguments, docstring, body, return statement ``` def func_name(arg1, arg2): """Docstring starts wtih a short description. May have more information here. arg1 = something arg2 = somehting Returns something Example usage: func_name(1, 2) """ result = arg1 + arg2 return result help(func_name) ``` ### Function arguments place, keyword, keyword-only, defaults, mutatble an immutable arguments ``` def f(a, b, c, *args, **kwargs): return a, b, c, args, kwargs f(1, 2, 3, 4, 5, 6, x=7, y=8, z=9) def g(a, b, c, *, x, y, z): return a, b, c, x, y, z try: g(1,2,3,4,5,6) except TypeError as e: print(e) g(1,2,3,x=4,y=5,z=6) def h(a=1, b=2, c=3): return a, b, c h() h(b=9) h(7,8,9) ``` ### Default mutable argumnet binding is fixed at function definition, the default=None idiom ``` def f(a, x=[]): x.append(a) return x f(1) f(2) def f(a, x=None): if x is None: x = [] x.append(a) return x f(1) f(2) ``` ## Pure functions deterministic, no side effects ``` def f1(x): """Pure.""" return x**2 def f2(x): """Pure if we ignore local state change. The x in the function baheaves like a copy. """ x = x**2 return x def f3(x): """Impure if x is mutable. Augmented assignemnt is an in-place operation for mutable structures.""" x **= 2 return x a = 2 b = np.array([1,2,3]) f1(a), a f1(b), b f2(a), a f2(b), b f3(a), a f3(b), b def f4(): """Stochastic functions are tehcnically impure since a global seed is changed between function calls.""" import random return random.randint(0,10) f4(), f4(), f4() ``` ## Recursive functions Euclidean GCD algorithm ``` gcd(a, 0) = a gcd(a, b) = gcd(b, a mod b) ``` ``` def factorial(n): """Simple recursive funciton.""" if n == 0: return 1 else: return n * factorial(n-1) factorial(4) def factorial1(n): """Non-recursive version.""" s = 1 for i in range(1, n+1): s *= i return s factorial1(4) def gcd(a, b): if b == 0: return a else: return gcd(b, a % b) gcd(16, 24) ``` ## Generators yield and laziness, infinite streams ``` def count(n=0): while True: yield n n += 1 for i in count(10): print(i) if i >= 15: break from itertools import islice list(islice(count(), 10, 15)) def updown(n): yield from range(n) yield from range(n, 0, -1) updown(5) list(updown(5)) ``` ## First class functions functions as arguments, functions as return values ``` def double(x): return x*2 def twice(x, func): return func(func(x)) twice(3, double) ``` Example from standard library ``` xs = 'banana apple guava'.split() xs sorted(xs) sorted(xs, key=lambda s: s.count('a')) def f(n): def g(): print("hello") def h(): print("goodbye") if n == 0: return g else: return h g = f(0) g() h = f(1) h() ``` ## Function dispatch Poor man's switch statement ``` def add(x, y): return x + y def mul(x, y): return x * y ops = { 'a': add, 'm': mul } items = zip('aammaammam', range(10), range(10)) for item in items: key, x, y = item op = ops[key] print(key, x, y, op(x, y)) ``` ## Closure Capture of argument in enclosing scope ``` def f(x): def g(y): return x + y return g f1 = f(0) f2 = f(10) f1(5), f2(5) ``` ## Decorators A timing decorator ``` def timer(f): import time def g(*args, **kwargs): tic = time.time() res = f(*args, **kwargs) toc = time.time() return res, toc-tic return g def f(n): s = 0 for i in range(n): s += i return s timed_f = timer(f) timed_f(100000) ``` Decorator syntax ``` @timer def g(n): s = 0 for i in range(n): s += i return s g(100000) ``` ## Anonymous functions Short, one-use lambdas ``` f = lambda x: x**2 f(3) g = lambda x, y: x+y g(3,4) ``` ## Map, filter and reduce Funcitonal building blocks ``` xs = range(10) list(map(lambda x: x**2, xs)) list(filter(lambda x: x%2 == 0, xs)) from functools import reduce reduce(lambda x, y: x+y, xs) reduce(lambda x, y: x+y, xs, 100) ``` ## Functional modules in the standard library itertools, functional and operator ``` import operator as op reduce(op.add, range(10)) import itertools as it list(it.islice(it.cycle([1,2,3]), 1, 10)) list(it.permutations('abc', 2)) list(it.combinations('abc', 2)) from functools import partial, lru_cache def f(a, b, c): return a + b + c g = partial(f, b = 2, c=3) g(1) def fib(n, trace=False): if trace: print("fib(%d)" % n, end=',') if n <= 2: return 1 else: return fib(n-1, trace) + fib(n-2, trace) fib(10, True) %timeit -r1 -n100 fib(20) @lru_cache(3) def fib1(n, trace=False): if trace: print("fib(%d)" % n, end=',') if n <= 2: return 1 else: return fib1(n-1, trace) + fib1(n-2, trace) fib1(10, True) %timeit -r1 -n100 fib1(20) ``` ## Using `toolz` funcitonal power tools ``` import toolz as tz import toolz.curried as c ``` Find the 5 most common sequences of length 3 in the dna variable. ``` dna = np.random.choice(list('ACTG'), (10,80), p=[.1,.2,.3,.4]) dna tz.pipe( dna, c.map(lambda s: ''.join(s)), list ) res = tz.pipe( dna, c.map(lambda s: ''.join(s)), lambda s: ''.join(s), c.sliding_window(3), c.map(lambda s: ''.join(s)), tz.frequencies ) [(k,v) for i, (k, v) in enumerate(sorted(res.items(), key=lambda x: -x[1])) if i < 5] ``` ## Function annotations and type hints Function annotations and type hints are optional and meant for 3rd party libraries (e.g. a static type checker or JIT compiler). They are NOT enforced at runtime. Notice the type annotation, default value and return type. ``` def f(a: str = "hello") -> bool: return a.islower() f() f("hello") f("Hello") ``` Function annotations can be accessed through a special attribute. ``` f.__annotations__ ``` Type and function annotations are NOT enforced. In fact, the Python interpreter essentially ignores them. ``` def f(x: int) -> int: return x + x f("hello") ``` For more types, import from the `typing` module ``` from typing import Sequence, TypeVar from functools import reduce import operator as op T = TypeVar('T') def f(xs: Sequence[T]) -> T: return reduce(op.add, xs) f([1,2,3]) f({1., 2., 3.}) f(('a', 'b', 'c')) ```
github_jupyter
<a href="https://colab.research.google.com/github/dribnet/clipit/blob/master/demos/PixelDrawer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #Pixray PixelArt demo Using pixray to draw pixel art. ![beruit.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAfQAAAEYCAIAAAAlMpMkAAEAAElEQVR4nOz9d5wkyX0fiH7TZ5a37d302J11sxaLBbC7MARAEI6eFD1PeiL1dCRF8Z7uZO7E0zudeIaiTk86iQSNRO9EB9DC7AKL9WZmzfhpb6q6y1d6+/6o7K7K6CIy3mxuc/Huvp/5xEbHRmZlRkb84ufi92P+0dMbiOK9aZZoaSLbYjKjLSm7kXIaRDe9c45o8U5qRItimoppjbZokqxLMtGtorWJlj1VtlxutCWfN/MFk+y2Pkm05IsS0aKi5cIebZE0yGpAdAum5ogWJ7hFtICdCZjIwwf9ftDvj7bwEtITDHGd2Z8hWjo75HhmJpj0ZORCS/P3Nlyi27mzFaLllRr5nBOpXDWVHW2xba/VJL9OaaZMtFgNzbcjv+gEvhv4RLepRfLCWo18Hc4C60VeRy6nlHJqtMU1XHWnS1xoSA7RMrewQLSYjY7V7Iy2MCWFKStEN+0G+cqV+Qmipd/v9tXIrVKZjJLOEN12V1WiBVNAdCIXDb5ocESv6yx54WQuRbTUaoEZndqlsl+sRIbd1aCukx/CuatAtEz0IEenTMfud+zeaIssyhOFaeLC3dYm0bKkkMPe8jpNrzPaklOUrEIO++4eubiqVU2OPlazm2l1I1M0xVkL6SZx4ev7BaLlxBSbjg57f99VG+QykU+Sy0TZWiNaGjNVoiUX8FmfH20xWGOT2ya6zVuniRY2vQXeGG0xzbJpHnkGm5yQQpX8pnotT7QEGTXIRC4MPNGxiuQz4Ai6PvkljsLhyOk4FsyRmX0Uokt+hrEgKPtYOKZIcysaBCK5JYzvxpDb0tsKS4//NACyIrm6bhsEZR8LURaS+rl3Jhzbju+UKEySaRkDPv32P0cUpm/Fd6IDQdnHQveo1uAxQ2eM+E504D2qecWKVBRyzIVHm/IsyWb+/xkItn086Ig7E8SvQj65KZqrxu9w71gQbPtYuAbJpI+FZcUPe9BKbBEmSNx1xqPpJlPwDC7J8/01t7pNynCbsByqL3jM4ItUzKhgJ/fwfPz085m3dzmPIe40oOTcAyV+Kts8H9sHgMRTrYrEYCfGpJi9+D4AeCme/7U0UmR7u8GK8V8nQc5dLlHJHJZJ8XUUqqdyzPj1LIh0EmFyUhwVcacT42hg2hSSAmB6VN2obmXGz6sUR7UG08kNu2DF7+KpgE4sdhOTnmnAcGOefAxx33DiJ03KJjWq438yObVMguCRnPYmObUML1IQ9+TWc4KwKegjJVyDajJI8rEK7LSce2KkD51OfB8+lZiEnU+T6tq3GzRqmRRPNexacsPuSPGUQfGPlWq/FYxTy1DICrpIWgbGw4iXDKg593im1U5O504JGrWMnKO6lUshEkp06zmTnM6dBgkSd0pQce50EI7XYGAgObVMcju95SSmwpKE4zbA0HDublunuRUN526wiY2Vzyamlgm8MaSP+dylL78vt75uFQ6brm9If3VjEkA5H5Jd1mAn2jpb7/iTYTfB1yt/58Tojby+s/7iTl9/DoAkzAGwnK37Hv4J4vd6ejPQeywTEn0/8NPZjJRWAJgBzIApsIEZYOfpqwDMriHnFTkvm10zVS0q81OKxBqWPyidltuvWWLWE7K+tiOIWU8K9pl+aF73xBwAzu7x8sjH0HaD6n19/n5X6EPLAYBowZbErCVOI8gM+UHGhvP8ZV/tsZncoGQzuSBlAoAHcAAPuHA6LnjF3lkHwGXzXr+bOXs2M5V3VEvISI5qpaaygWPvvtK2VCY3FfRqjJQJLJWZq64OfohzWgBs5bQn5/iJKQBaXRfTAoAea+p2OCl110/xbAU+V9etgAcgMe6gYk20daebEvKDMi3kK3bRD8KPXeBSAK7p67t+C0DP6s/npze7uwAemnmXKEv9bh+AJIkA9rYuW1oPQG5iztJ6UjonpRSBT3EpDoCne1JF9Hd9U3MkLpwbIivs9+uu05SzObPfk7PhVsaV8gBEOZy+tukpEESZt013UAJY761xKalrtfNSsWu1AczzJzK1tMv6HutxPjco5UcKlmbamiWmJQBSWtZe33R2e06zJ5Rzg1L1d82CBkBkMnagDipz9z/OHbAOnutyPC9edizfASCxIQ26qWytB5vz4kLP6wLoet0lbnGRWwRguYbpmACYhtnd3OULKbej84VQIdn5sMT02CDnD0oAd26V84bY44bke8NQr7dDv6+qKANoB+Z750KHrqzA9R0PwMa+bemupXm5qtTbt6Q0x/vsnBBumV2fB8AV7Gaek1nW9H3L9ySWKwUZXhUBHI4nAF69oWi99sSsrIUuW+vdPoBSfqbV3QGgSNnZidlyPgugb4V7pNd3Lz6zdvjY6VIGQOmJUKnPK7ZriAC8ixmoltnrFuYWAHS2NgpnFwpnQxeaVq+tSDKAmzde1bQugHQ6D0DTujN3PyJUAk9nuFTg6QwAfksT9kwAfIEB4HYCHbpeLPBBDYCPDAuVU07PFhgAQaoAgHHMQJCvrL8MIHCKDGeANeHLU3sCALEg252Q08qUjOKJwNQkOW0Nyo3ruZ4e2KomZkIztJhNT5z1ofsAkGKDfZdJsyqT6WkZhvGCgGMYDwCnd829tXS6UN9fBZBJFQHcOfOgyukap6e9VMZLqZxuS2zARbbtlZcaDGOpTQZAphwAUFJNPl02G9cB8KkygEy2sDg/7zgMAEEINIMReTT26m5X4/NptxuOf12RRBRaaq2UmTJsFUDWaS+8eQWAcfdJvt4W9lr6+TvuuicHoOun8qy+6ZZzrMG/L7cOYFHqHD7WdYSTr9kNZ8xEW2fbOgC2HnZLfWiCy0W2aC4n9J96blC3nC38NZDAmYA/4kvnByEDIjOQmWBQMbshRTa7xqCeqhYViQVwWA7mvt3n7D43qHjIphESd+7Q2UvbHX0AZv9VIcW4yv3h37YEIMhIQdTVzd3Y8tUegMPSV3vc4oG84mHAgdl7e4eXeP0uADYwhExVyIgABiUjiJbKAOjVGACD+oCmH0I0briVRwYDkZ48sGfopu4OB0p3/YbjLcCVmPC7DCrrTheAflDqTrfCFwtHjCI9K1ztA8oOQJQlANn80P9sQNkB9Pa2Bn9Onz8n5cJ9gk9xAHiFy9oRYSsrKLVWz+r3AAxKAAvLEa8yUeZYm4UPUeaBsBQcfq17C0DXDEeji9YjeIz3Wd5nAQxKAFJalkY4NP2NcII5zV5YHugVBpQdQHZ2lhsRCg/rh2Q9fFOvCxabNukQDEDiFYlXAFj5PjbhdnQgLAf/f0DTB+Uhct6QHbNMp+GEFGdQuVAtZoVhh0G9tx8S2UHF0rx78kMWMsd5AHbAWL5n+SEFsXyvExgzcviVB+MJQKr3ABT3hr56KYbRAwwoOwDD6htWD8gCyEohN9PpR5QkWksFMKEMBTJesQFYzr7bCwB0tsYMVylXBNDrNQeUHcBhRagEALjUsATgdnwAbufg+gIGlB0ACxWAZ9wIZh4+vH8gDCcAIxy4SrOm3fUA2N0RGboEAHLaOixLk/3G6wwAWw0ppq1qE2crSIUTjKnyAIyO4vs8MBhMHoDe71l6W9XDnzusZLxUxksd1pviGMPagLIfVlwDinE9fHe9CUAuZAEIQjggaSUAYG7sATik7ABc21U5E0BLDcenuB+uF+X10Ol5lu0DOQB5Vgcwzzdx2wZVSqj9v5bKH4JGHQFAKWVj+/CgEr5owJ0indxvG2YnMa2FLiQmx+XkQlK3ShA5r0DTTZigU3W981DTqIT6vpuYPj1BMw1fiX+qXI488XDbkJT4JU+JTD6xUZgtLdN001rxGh6136G5lezdphvF20vcM9nESKTR6sf2cUHlw+Pw5HmNowhadD4uFJALiWkhUw6VxjbHFpL6RbOX2DjQIOeR5zXGwtmLfyqjR2Xzp4HVPXJe6XZR05Mz/9EhObMr3Max2vMtI37JA3BT8U+1u5HYKPQM8nzlWKRL8fa/TLZAcyuTu00qfZuXWSt0TrbJgYZz90B1rkNwd+M7HTsYnTyMdxQpJzHPorxCRUblXDyPbFpUG0DAxi/CHkceT71tKDk6mz8FcvPx3ACAXirezDuVonKvyvLHSkYLlcQORPV68dMYANOkcB+g49wdJTH/YMt+Jx6booExjt0ZQ9xXWol9aRq1TIInRDgktuUwJSrBn5Pj9+fOKtVTBanE5Nme34nt0zUSI6PvTFBy7m02fmeyelQsJA0S5NwNJrGF02kcN68GGuJO6WdGgSwVJ0OFnHLsbqMUahlzHHHnf/4vq8uTQ45jpS55tuu3HAC+7rMpFkBqSq7enU4vprX1cBJs71qv/l5tck4GUN8yB5V3v+fbms2QmpfLc83mVppJN597SSwX7WYbgFgu7u8ZgTIUkfSuk8oL2sEe0Gv2cuVcr9mb/MDd1l6E+vS6Rrtp7t7Ymz4dBgPxddOLHtN2PHXmffc57c5oY7tNRgWx0+c4rguAlUzfklnJhOpZz26x5dJhH0bX+TzDyvBHfmHpzKzWj+hMdzp6T3N5ZoTPshnBjMiAAWc5hfCRPNnkTBlAqfiIbYc8TiZzxrabda29q68BkKVw9nCaP6WHepisafVlKeV4TpWMhSIzJLtho9NAB4CBroI8AM7VH2xXdpShTSKdLemMC8CBLxzs8b48AwB8Fm4ffBZAs6mk+pFBluCLvO+xFgDOlwAwaUjC4uD/8umCq3XkiaVbN/fSGQ6ApnqDCuv6RtcGkM4ImuqkM4LE5k4rZwYXlvhyy21aas02roRj5exzQhVA33vXoMUIMJg7Imf5QWh+YBnPD7jcwmkjbwBI5St6NyTr5hXS7ldTQ0tjig8HeUrLWCKn2JIhWoOyImSkYoT09BWvWQgXmCEHiskAOGMXBi0FV+7wZsGVb+23fSNCJbnAPJ1yAZQFv+mwAFgho3kCx/hewHJMeE9R2QTge0OlojWxRDy5wHrFgFTKtepDw6mh9ZV0Nji/LPeGz2Dm0qmWLjsMgFQmq6t9AF46v8PyAHYYPhv4Wfj9hp4+MN2LfmAPzqj35gAE3TkmHy7PVN6XT+TNVlcu5c1WF0CQzrR7FgDD8hSJMywv6GtzzDwAHcOVEtwymAwHIFC9QYWfTAsH/p5CXnG6BiervBwx1HOi31sNX9Dq9KVCFkAlmFaCrMEMt1tplkmfzAKwWuGLC4vpjq127CmZU2VOBaA7XnbGE7OyPTKZzVs9AF7b5Iqy1zYB8AoLgNVUP50ZlClFFE+dzJdK3VZow1Rbxl47rGuGkVYUAH+08eYdk1ORbxO4M9XowqxWUu9/wHhjGPepB9x67auVfAVA42DSlpTTADw+w7mhMnDytLdYVTotu1ASOy0bgJ1nVvJDstYulfyuvXeN3C/5lbq0Uo88hKexnh6K/57uA0jdnZl4rAAgvRgy9RPA059Zr2+FIzWofPTvPFSuzB/ep1yZv/a5F+1my26GY2E3W7andP2IZKr3DewPp2O32QOwWM3J1ci+/fpvvjKo7N4IHVRkwVCECKlNVYpCsSgUI1vr1hopU8tSmwt9bcBKFgBvf99rtrzm0INFLCp8QQHAjoxNJqNkMhEX8o3L214ALxhS88BhRCOq4ONNuzDcqDzZAiCK50RxyKqLYtl3uobdATAoAUxo0oQ6/PmMZTnZbK96JPZQj2xpupcQhDfR0QEgeWkgNWMMyYfXcfTJ8Cs7CBd2IB+EM+NDidhsa6YXoSmZvJ3Nh6M3IPFMVpG5YRw0Pl0AoKqeqoYXDiqs53GeC0DTnEE5WyrMlYexqMpCGcqZje1fHj6kuw+g7b/rsMUMAKDK+CwzZGdYxmcZvzIfxq1L5SoAnP1u7/nXiZHRJ0MGR3dDzWkquCNlA4BsS4OSL/kSF1knEuccEncAuhwAWBqJ01TwFAAwe2taRDtfkqwzKffg7QZ36LjBkhtwANyD/YnhDAAcN5zM3fSSnIlMY0E3i1pE7dOu11v1vdEWQ+3LF+4ychFuRpJyZSvcvAfhz9qMuB86hKDPsH2wXDWfcXcOLxH9AEDQDQ1mh5X8qQCAVModlh3N7vZDAcK0PQCSLSlIAVBGDGCW6gUHk2FQyZ0dfncAfCHFg3eCyMJxelZvbajgtjp9ADPzDwFIBUONDSeBKwGAVApfXDWh2hkAppcxvQwANuVmMxoAKTtcUNYzoXrWbYUDywv1QYXVtEFZPDNZPr0AIH/A+akt/eYLw5BquqEDgIAr9drow3+wMjkRPcFjLhVMQLnr5GGLWttvvtRsdiOKrMzEewcVTwiJbXbCSBf9QkkEMCiv5jMrheheUsbzT+0gijFqGd+Jd8nY3qWSLrPTxyrCpKul+E6JojQZL+/lBCrpsuPFG2qcbGL+A1wvsbMYxw+nUkjqVgGFh1VFTkxRyaWm4jvRQc6QIun4bhQhfTgKAzUAl8Lty6U4aP11jUyJymtjgk/s7Lravk0fubHEPd7KSkncqcBRGQnTE/F0TdtvxfYBcMi2fw14dOfgadBzEnM4Se2Qm/Ntw8vRnWLl+NguTEC1T7A+hepQpbJ125VjZRoaJpVKuq0mF3pFpXLJSAreO9K1VMjFG7QAMBTE1g+ojsEzR2JZH4XaovK33nPjJ0NmiowwPBaTy/H0ylkbYxYaQ8dZIf4NZ6ePNdQtAG0v3qh1/Jw7DSg5dxrQcu4HOpmvgQQ594A57mgbYiMx2udTcO6UxL2YiV8UvkPlVUnoZMbCVBNz0HxnEnend9yRlmlAybnvefGuU7sXL7/lxwnBFcb4+dymK2SSnHtyoOTcPf/rVXIU+om5bdDCi5dgKKOn0bBFCcLdT8wdKEG1TIKgVMuYXLxXJaVahgYeRXDTZMFSsBYsQyWIJ8i5T3DxXpVZOs6dBl5nzF7CzxQMANN5Y7er7HQUAKdPmlv76UyFVRvhq6bLTKceUvPCpLz6WldBUIZvgAGgIDDAKAheelKdWRJ31uxBCaArM0wqldP1rXIZwFyz2Uspd981U9/qAdjb6k/MZScXJy+/sB+wBuMrAWsAYHzF7e0B8Hp1LjfJ5ybc3p5QyQrVkG/Vr+yk7pjhO3uB4TK2E2RTsBzGdiAJOxs6gGxe6HedQUVJsYbuD8pyhdd1n9dZvQb5BNw2+CLMVTAa/Oosq/UZvRekcgA0Ma9A07207qdTrKb7aQBvXhuugYmKBGC/YWWnUv2aPnuh0qvpAPS23nT3B32UUspo6eWT1ZwlWLwnuRwAi/cArI6E3iywACAJ+TyXsZyeJOQspwdAEqR2tgxAtvummB1UAKS0VUcsABAGBtjdUKnqZwqs2vEzBc8teFFy5GSrYrPj59JsTxuU2lTV79gABiVbEP2O/bCys+2WZvnhHtnIZHfMnNW2paIIQCyKbEd3ey4Ar8tw+cDrMuKCaXoAYPmBxDKWHwB4b3F1wygAWFA6T7dPvLe4+rx6gslIgXowCzVLZxlNtzTdApBOSZpucb4dLN4BAN195KuDUnYAQLYBwBRhCrAnZoVGx0tnvXSW0/peOhuoJjTD0EwlLQMwNNOH6E0uAGDVLgA/kwcgFXakQhmA1WkCsDqt3PQUb4Sbk2XakizaAdc3vJ7uA8ilWADNrjXPZTe9/jyX7fkWgBwrNQ/IhXLAICl213RMlw3v5nCSIItMdQ5AoPUAMOkcAKOvKal0s1lPKRkASiqt5IduCACM7ibLBwBssyXKJc81PNfQ+12920nLRQBppagZbc1ulxbmABjdntHtKfmcks/JLb9VYkutIZGSelrb0orTocXbVPvIlrn6apApMGonyBQAIOgwExIzKQEI6tagIvGQOACwPEgcLA8SD7U/FGKMvubB4jgRAOerHpsBoEhMaj5cp4est88HPM8BcN0DG3vt5qAiZkq22gIgVEp+4AHwXZ/lWQApJe3lXK3XT+eyWq8/OTcDoK/cEJ0SAMkt95UbALibOZ2rZ4uT2cIkgJ3V13xHhDzh9dsAuGwRAMt49lS46wxSX4gOwyihvBIYPa486+v9VJ7jRe1goBgA3U5PX3HzxRyAbruXL+a2V+o2TzLvj0xPPbdbe2R6aquvApjLZnxd8HmRdWxfEAGwjm3VXJyA8WwDgDCXcrZ0txIUq3cM7tDev6KkK3Kqml8Szc7QhUEucC4cYzN8quy80N90vBWx7ITPoGedVF8ol9UzIvO7NwIA3346JAjMpSc/QzzoRe90Oxppxdj3jejhtMAP3CM5emrb5MFc94OkPPvocvHRkxGRc3/LeOp3ybRVM2eeJVrM09/gZSPGKL++49UjKlo/kC2PTLP3wIdI//H9V3v7r0bYOnZWZGdIBd9+k2QJ7DYpGTz4hFiMptBrvqQ3XybfOn2GHKvN1J1Ey3KFFIZ0U9CNCB9UYleL7CrRrfcmaW/RJxaNyaXRFlGBEGWv7bWOvdYhLvzh+14lWq6Wz10t3zHaYr7asS5GLnSycvcsyYP8N5nfIFp+X/jAFhvx4+RrGl+LjJU4K0qz5IfI90irdVvX7agPjxfobhBhqQSuwHOkcqN4nmiAtGtxZoRfa7TtRpvUci6kSS2WPT9PtPRefZpoSZ9YSC+TqelWv/w80VI5fQ/RwrINMBFerLVfbzfqkZtnyidOvpe4UKqRCqtWb6/V24/cnAvYI+x86u4zRMsJZ5ZouXlzRVUj30vOmEqa5BknzjxEtBg1UoPUrV1zzIgYyhXyXKEQuTmXmjmS2O+S8adES3+l56iR9SUFGdEnhS3vYw8QLVPXSC1uMXtd4COPutPwthuR6eHxWUMh84l+01lS9FlrHcnuudJUVyOOMUxe4e4hz/DP3kXuHM1n91vPRr6gkCoL6YiP3GJB+94La8SFY9QyBSZelxdQGMcAMBR23v3N43bb0GrxOiWbwqpMCaFMNVZmcr/oDNixrwmuQKVLISj7+J/LxkugAAjK/lYg8BQ5F6O5Pf86+Epiw85n4l2njA7dad4gsaOShpXYASWCso+FkKIyCxGU/a2ATx+rIohzqZ5cprBcMnkqY5WxeZshs8bM7FU/MW+toBgfC6U6T/WGnFqP75QcHIqUrQCKRzJfv60wggJNNyGa33ksvA6V1aSi78d3osOcvxfbx96msqE5bvy8ooTQjtfGShzV9uyqiWn5GSbeHKepVAf9FSneYMCmqYyEmUxitodUgUwQfxSmR0XUXC2xwHwE2z4WthT/5EiUV1Pmqb7OUYx5giIF554gKDl3L0PqW95WCHRZ/dpH0rofha9TbQAGxWxQmA7NrWg4d0o0UvE2n9TOcUcyoOHcOTozrycfK+euFKj8Umg493SGKl4FoZN5K6Dh3B2dirE9Zs49OE1FkR2XykZNgwJFhtGgS0X6aDj3hcKYT/P2RoVkV+LH/fg5d303ni06fs5doZDjKDl3GlCqZZLk3IN4zp3LUg27RpExJ0FYHtUyoeHcKdUyDEXEG0qUcvHbs69R8chTU4kp1mhQpEv3liDnTgPOo9qWOhQZRinVMrcNdsucBdBzIwyFdmA+3bsaAOjyXJdjuxy7LgnrkgCgKwwPBWTyodQwP2cdloOKUu0DmNzpDFrSI4Ed7GZz8I94INkLAEjZSSkbYdXt6Xvtg1QhAEbrAMqiC6DCmbyv8r4KYFAehb7Xr96XR8ZBxgEwqMzlWwDm+MhnU/yO4ndGKxnPnXRMAIMy47m3ngp/pXUlXB5uKuWmwgGxKpVBnS2mD8vDSl4MKdRhZQCmVRuUkhCOpCR+rd2IO2AGhxXRGfwb/ik4sCKbR0EyR8tB5RAeP+b8iH+zAYCZkzErY1YGcFiWtK+19UqTJQCv1Cridg3AYcnmJABsdvhbXt8DwDSMw/IQgbZ9WEk5kS1HYm2HlQ7dVAC4rOxw4Z9KVBJ1693R0ldYn2EG/wAMyoDvA/Dk7UEl4PucLA3+DW4iHGHA7Z09PpP3M6KfEQEMKpw+nn3zM+ngIOn2YWUmww7+AcA4zl0QBUEUUplwdtl2TwwYAOLIqf1ee/2wYhkdHOjc/XQegFtddKthFKBspnD4DwDHhDc5rKiaVq/vqZo2qKuaVqvtBUIQpP1ACAJhKLMKUl6QQqmlMHNHvztm63VHMs67tvXXce5qK+SdbWPMDNytr/FBHsCgHGDAuacLk6NlfnIZgJQpDv6h1c/UOqJqAhBVUzw4bma6mulqhxW3V2L1yO8qQmiTl/KpQelxoVEhJQ1fwWobh+XwXXRd1fXDOoBcMT34N6inYKcDOx2Ew3VYYaKRNZX5lDDPABDmmcG/ktICUFJah/8GSbUu90MuZFBh/uMfXCJGcGfF216NTMrWDN+aifDg0ynhE3OkrfmeaTJO0/N/3m/VIjrNiUl3Yipy895OsbdTIC6c+lHSHr1+cW390tpoy4WT/oWTEYJ1o8HeaJCstK+RikJ/nocS6faetPaeVIR52dWDP9sgWWmuQTpI9HPk/mwqFTOqypCLmCEN9SivkEcxm3/yx0QLe+YMcybiw6D2hJUrJFl513tJvlJjbS0aMjBoC2iTUtSD5gtES/UUuaKe+41Ocz3CGXmfPON+8izR7eH/+MtEy7t/hPyCn/251Z3rEeHRf8/93qORoWEbff4r14gLpQrpXqIsPcylI9NvKyhs+YXRlgK0e7FCXLj6MikIZ+85R1Dqpnqzqd4iut0z9zGiZfP5N4gW9UFSNzqhZSa0iIFRhX2DJX2u/vljJ4iWX/qrW6v1qF9KzpRykQ04I+SXs/cSF77x0u8TLdbUHeZUxEloUcRidC5YsPcZUuzYeYHcsL2cN0rTAZTzU5VcxETXV81rN0hpby5HRuicnDglyZGR2d0zd/YjTAwLVcJN4sLS8v1Ei1xUODFyADXQEES1FI6+6+jkituwyB30HidTiJ5lbc9J7bloN6MBg3wd4XmSDlxT/H607Xx15nw1oh3yPEM/8lS6cZVoad85becicvbiRntpMzKLNu3U77VJx5sx8iZB2cdiRqE6GUxQ9reCTpRVH4vTFTofHj2x0zScm1jc7dRZkmIeRSZHJYHadFmYaUBQ9rHI7VDFDCAo+1uBp8efVuvQBfenQUaiOvnM9RMb9qXJxJS/biaxkzIEZR+Lvhqv86TEINleMrcSqMazQBelgAZ9Kv0iFYRuvO/Dlj1GwzOGuOeKiSniS1Pxg5WboTpKXjjIzf010KQzXQblxAbe4+N/0aQ7Ku80EsscJCL+BbM+lVa3vJiYn9kD3xSvsfUrVI50XCqe2hbogvsfVbDcNjw6gwEN1urH6tRggYppYJxj9Q3zQUWRPTt+T6WM+tChO8hKgxk7sbFy8vHmsTlxjG12DB3vteMZ2x3juMM+0HDutDDiGZBpuuxkCXLuQiXefKT2qEitllwaBxrOvTdDlaho90Y8tWUbVNYqGs6dEg6dhZMG4k5ilj0azj3NF2hulSDnToNshspD37TiqS0l5y7S/SINqDh3gcoxsU8R9eHtxtvLuZenEuP7aDh3WijxhHs3wbzCdKDh3CnVMkIQz0L2WSqOlYZzp1TLTJ9OTE8iVk8ldatEOff4haMmt+8mCAmJrVNKtYwsJaZ38o4clT8KSrVMgpx7liLSMiUIhTs9bpNz39WpqEyzFt/N6lM9OmFNfUtIjnOn84anQoKcu8PEPxalWoYGCXLuDCXnrsVz7gU6vo+Gc09LiWVApMSXXot3/NXcDs2teDXemfX41TI0nDulWoawpr5DkCDnLvbide7z4hhnVuaH/+2L211vu+cCmM3x2z33Bx7ED0a9OzauuRtXw8+/xYsAJNf7MhuRhkrZ1NxSE0BJ4VsHwdDzzez65pBm8Z6JSsadrOT9DoAuWwDg86ab3gWQaoZTRy8HuMwCKJRSnYMYbEJhj2VbbM4B4PcENucIu1NTZmHwf5vtcIf46CM1AJOpcNOq6/4f1wqs6/kjx16sXXgmw+cCt8fwuYDP+u1+YORdAGfSznVNOJN2AHz/OaGh2ZW0CODannZ2Iv0Lm3kARoYDoKierPqeYHqCxzkcAE/wAMiqwdkSAMFuA3DEouA5lZTB7uz5M6HSmd3Ze2Xq0XyGzaeZrhZ0VR/AyYcW2P0dAP6BSV31AtWFDNuEKMMGwDZb5iubgtMB4AgFwenoyuL3/HBkd1RN8ZWfv2zP5QBgq43tNt61XM7w5Q+dPuyjd5lUPjC/ckMIPAAOEw7O/Ut9AD4YFoEPBsBLz2pfeDFch33BA3D6wYVvKIR7TLeUzbf6O+A6pqHJ2bTZ1w5cIL75ZJibzVZDXvXiVf7y664zEuOmMJeRJzNctwPAyxcA+GLeEUpBIQuA6fQHldT0FoBgcuhp0PhCn1NNXQ9XdSrl+mnu9NQ+gJ6fyrE6AN2R1nplABnOVD05w5kA0qojFHJCPg/A6XaFfL73+gtOt8Xni263DYDPF4VMGZNFACm+rLtNAJ5pCQYPwNUsPh1O+xtTDUENnAwDIF0L7Azu8aZTKbnP2VlPBNDn7Gar3dI0AF1XzfMZAIzEnstVVd/LsMMJWZjPAfjS1pDnTdu3YJgAOMn1LB5ANTdTlBbb1npRWgRgep2WaWqu2jmgkgUp07HUit0LpPAJGcsKJKl04uGUAAC6g5SAfQMVdGv7b2bEvGp3AWTEvO7pLMerzS0AopKzjR4ATnoQgK8a7EH2sXI2SEnhysrKMoB6DcbVPAAxD7sLMQ8bDePEJSkz4iaoZgvNb/CDPstk/SBsb3mrlh/650wVFmqdjdnUdIYLHW/yMgNgz3eNGdvtaXwuDWBQ2ahdkpicFQy3ZKEu83VJLmXNVn9QKudzEBkbfRFZADb6Mp+u8BOcnALgmfqgUmo3AHRst3CwPWykqjovAUhpFgA9LRkNa+sgjWVadjWTzwc1pfay3Y8IKPfd+Q1iPqOu1zKL4SswTvNavVlOi5WDCbO5Kfr1CoDRWP1Pnhx6GE4LAYAlP3S+lNmu6ecBVF+OeIt5c9L16embTmmJ7QJY8/MA7ivsls61+q2Inwv/wubwKQck/sLX5MPmXBvA/hGTXauvDzxxWiNpLraj+UtdTgYn44CsD8C64WrXy+Re1xmJrsnKGisf5MY70E4c0vRDHJL10T/9cQca3R4zKN0eh1z4zNc1YVCeyALAgLIDODuRxgFZH8DIcEaGk3WLc0OyfogBWT+si57DdnoA2J3IKZ6u6ndH2BejbyrVMefoTIiHJctIA8oOYFBJGetAxM0mI9v2dgPbI0qe51fwwYhLZSof+F0DI2R9FCyCw/LSntGPvl2h1UchtGfmW+FaHdB0bcS57ZCmH6LeYJ1o9DJfN7luOPIDEu9X8wOCDmBYmRwzLIeUfVCfzIZiwYCyA9Cc8NupnnxYVheHfnsHJL4FYEDZBxUhU07xIas+qNhu19ZCauKO5LobUHYA2hQDIOXIAAaUfVBpAt2DTJiDSo5VAIxSdkTJejgavooD3omThgtqQNkByFwhK6rb2jC124DKH1L2w7rb20Z5FsCAxFcVqN0egAFlH1Rcy3DtcNwGlP0Q7EheSZYRsnJEdpRMaWDIG9zM7gJ5jFJ2AFKmjyZYJguEJQCJyVsIH6DWCQncgKYfIiezBjCg7BipjFJ2AKzGATBb/dGSnWIA2Bg+yYCgj1YGKIww/gPKDkBPj1QOiJBm8gA0k+P65PcS8xkAmZHZhcA+G80vJNnS/tf0rdh1GABLB1NjQNkBONmc0B++Mrdlca6FqZCsD7Dm50poZUuRRTdGLXORQoNapXO24zOJBQXzzXiN7VSRSqHpmfHS5Srd6WjOjVfSCRQh0QEo2Xj1FNOk8rxZ/hDp8XrbWD4SLPMoskjMtZTVO0ndKi1QTQYhf6wJXnIiXVAENjGVtKQklpoxJVG4A3WpjpVaQfxpXtmmMv6xamJqmZQbbzBwlcQM1B2FKr6TnblNs9DbG37g6xcn6FaEx8dPLIciU12yaK/E65H9DtW+u7ITTyL7dLNokuIUgp8q0NzK2kvM8EWDw4OpXxtd5p2YwcYy4vkUNsEpmqfy6JWY+Dg8x49Dzv14UDCodnpRvU3z2G0S96NqmbFw1fjgCT5PtSRYObEjMJwcb+ug5NxpYHNUVlCjHz8O3mkyYv5tg19MjGOdoRPj6o34yZYg506JgVrma4OTE0sq2bPfiRsAJXSLIhgWHXGngSlSSYR+Jn6nl/nEPLWU5hWqfky8yEvJud82xqy3r61z/5sCK8eb1ycLiTkaf4BuEGjUMqJH9VTmEUXemJ+7QZ6nv234dBHpaNQyfRzr2RYA0sSxCkOeSbUI88FxJxamAY1ahuUTc4Vk6NQyPY8MVXIUlGoZGnTM+KB1oFPLOCmqFwxYqjP8NLhttQz/iUL9rKxeM0Md31lZ9c3Z31/J/ZdV6VtOWFc6/JU29/GUtusLJcZrBdygnJClnJKXxIMIWbYuialvKG7UbXfywEBRt92d+WCnKYtpHoCtuWKaV9tde2OHeIjHL/QXmQituc52m0EeQJnpNoP8GWZzMvvwhtNbEHIbTiikbFak1aoGgO/ybt4F8FyXv/4CU80GAAYpf/d6EDoeAFb1OS1wJjlW9e++y506sM3udAQASgVfaYoKN5xMN7YDdt9cq/kAlqbYL13y3n8vN1Nb688WxJ5p5+RB+Z0XugsZZ0MVNjVhPu0AeP4NbnUlxSuea3C84vGyb0F4911D8lcpy42mmdt8fZ/Pnzc2LisL542NfSG/Xl/obDNCMee0ewCEYg4sCloPgHMQS1YxtZTHmcxIij6fYRzrwL0lfP78cnZn58BQ6XkA+g1N+6srqcmcXu+lJnMA+MCrzIcfXcyLAOyuvbKjtftMMRsAaPcZADdvMPYth02zvuazaRaAeC6YfIg1W4FcCl9qoYTV37c52/dElrNDbkubzXGB4zECAJcVJE9fX7MGwQcNzQegpNk7TzOL56SpKgOgth9MVZndnf5LF18F4BcKANhOxy8U5Bs1b74kPnvTfvcpbrPlzZdK1SnLD5QUjANjFyMotT1elhzTEmQp/LgnZbftsgobADB8RmGD5374BwGUH3qo+eKLg/Luf/+zHge/1eJOnQLg3bzpyrKBHgfegwvAg8OBlZaLADzN5tLioDLNA0BmxGfshTW7pLAtwwcwqBTLyrkLSwD6HS1bSPc7WoktNdZ9XUPqgJXUNeTntwXf0PmS4BsAHFbJ+krJuc9iO4M+kl+oe/UrrRqAsiw3TbMsy021XeGzUiABsBgLgBRIlZmcZfXyuTnT6gGwrF5gor2/ZXb6ciHbXt8uLs72aj1OnvHNkOizct/VlMLCWQC200ynzjpOw3aas15ZhZ2BCECFDQCMdaXfvdLv3ZHNNWyrIkoaWxAUBYB1sNtWy2uLxm7BVQHkHbUrZAB8dYTQaRw0Dncqp2U5027vKgd7j6FaL/b2S0I4ni3HyuZ4p+MDsCwXgCTxluVWpVMAJDbbc3cBSGymUhBQQCGbLeSynV4fwLbf8oSUG1iub/GsBIBnuY360Lu0o2oAukoWwAyHF208JGLHQ23/1YIoAegancXyUkfvnGe9GS/o8kUAebcNQLO94vzZQq7S6TUOy2vc8OZNVmsy2lK98NXX0g/NuQC2e+xszl9hVqfvjHAJupN6rFR8X4X9ykiyJ1OXADCeH3DsoGLns4EfOtrZmbSoapPT3P3ndjd68ntnO09vFwDcO9d67kuGOMEBsPc8APa+x3+yUANwdoQv/tIt40m2CuC/rIYD/XRbKsJtBTyAQWmIhVxmuINJYhrApMBPjkSLnBT4plFIVw8C6aU4AMJ+19yMsJ+PnePfx5JKniZ6ZSYk4oPKPJ+b53ODyqD9KVG77ukAnHwoLbKcgyb2+wyA/QO9ilAfypKD+v3TQyecmaID4M1+qiRGmJd2z/zSdnjhat0D8KVL3t2TyG53AEh9c1AuvNcFsJBxFjIhQdnksArONTgArsG5BrcwjTvORnR51Ypsr6xWnS6A88YGgKrTXd3bsyHbrdDQZLe6KZEZTPVDWzlv+XIAOYhwyozvIzRphlPEYznvYEgHlX5DF1xbr/cBDMq5uyuZhUgWOjEvXr7YANAekZEaNzRf9X3VBzAoBTGQSpBKw2do1wKp7wDgR8R2lxXckaMxFpcCrFZ9KMQYqgdIF+4IN6SpCgMgsBmm0wbAdULTMddpc4HBbbUAiM/eBMBttYT3lwfnLg9JZL8PwxIMSwAwKCUeSg5FPhwTmQ0ANF96gSjbt27mH3mYK4VKKu7UKUbXPcPwRly/PdkX0wIA9iB0OJsWpnxy0mbywY2DYEpNwweQkeRcgQWQK6QOy82XAgD6cA4i7RsAUm6oIBJ8Q/KnAUgjodD6htQwuwAahnVQsifZ8P3lICSfspSTpdygMihbO1vtnW0ARrcHoL2+zQTTgTmUTH0zB0AQRACCUBmUglBJ95E+cNkZVJ7s7+1DA3Cl3wOwb1kpW0lFjwRaenVRHvrw5B0VwN4RLtbl84BXLA4fo236bbfXdofkr9dl5BFO2jIdAGU5JDs5Prx2aXaoYCzksgCsgKsHGs9I/EGWak0312vk0YFtMQtgsMpftAGA94yuEXKZ6801AF3Wm2G9nDvU3VVSxeLMOQD5XOWwvM69Sdz8pS0ewItbIT3c6bHlSdi5iA5wWrDflysDeF9lyFb+1RYLIGDDloBlrZRsS8PQHU4mvTTTX8hqC1kTwHtnO4N+9r5n70f0Zrcp+BjRoJRvBesNKs1aX413FLEyyWU/yVEJ/i+9cawWaVei+rn9jXg1Qq9+m7m7bhubq8cad1sUqJRF+Uceju0jSImJ2M0rVLNdCOK9ZWaTs4IKdEf2jhks3drScKzWdVEhE/OOhVSM1/BUhMQsAdcvJ5esQ8lRndnbs+K1kIuVxOijpFKtnL1GchHbKDA/TbVySoinyLxF9YJfv5ieoZsMFMdK+xTnkCnh0OUG6arxv1i+I7HZ3g+oJoPejx+rBLFnk+GLxyJDcbabLk8zFdIKlTnEF+O9DNR2YnavhpOYk8iZ82P2iTFT7UmWzDh+FI6VmAM7JeeezcRvmAly7jId5z4zEb+eN3ePO0xNdeFYLXvtGtUXnD8Rb7V75aXkgntQRBACsP5z/y62T4Kcu96gmgzO8aa6FFJ0EiFFoM0JcfUtP04ISs49jfilWm913uLDHEKUqTh3GtBy7hTm7ub+GLH47dUqTEjx3GiCnHsuufDx7a3jdllrIZ4iU6plaNDbo1LLNHfjd/HlC4lJQvc/SKdqoIj2lSDn7tPl485njlW50UuOs3WSy3BAybmrFGksHSexL5ih49xpYNOF8Lba8S7zCXLuY8H3s8V+pgAgq3YGlR9w1taiRwwUwZRFkthZTU3jKmmvAWBQefIm+TFSjFndCy/U83Kqa6aniieKEWfta7X24SHhRclZtwQALY/MeZRp7Pa41KwsbZvWoOw4Tlr00trIvOQQBAP6zg6ti1kegJfzuR7r5XwAmz67th7ZDHXXzXhtW5RF27RFGUCVVd05GUB/y8/OhSSVP5JI/pUVZzeaVKxhe5mpCNvFlsTPXxMBLFcYACuNAEBTmKj4WoMd3jBVSmU8b37kpM9+m2/02cDlADC8F7icH+j14nCLTpucJns33zAAlCb41sHRnouXVGTDF+Rt3xVZXvAEzwTgMzwbuAByk6kMa6fZyMOvm6T+Qczynu1zYjgCnu13ava1L4dW38qi3Fg3+7rI50QAvuWzEjuoBJ5L0MR9k+OzAZ8Zkm8+w6/cCvsUS2y75Zsm/MD1AxcAy/CDSuGMbLZcucQDGFS29h0hzTmaB0BIcwAyjDYxYbS1dDEdLhhJYNb9Jdl1zAPGR3YdNzvNWn1fGnoH5h5/P8EYBa0OVkOHrqBaZPbbmKqovYjrnjhRYX3b2WsBECZKg4poB5N2ZAmk3XL7r1T5ZMG81QlvmMvzxTxXjJA24ciJHidw95lVACkUdHQAKJJwVlSallOWwse1Hd/Lk/yatqMCELOi3bcHFT4zxQUpAIGtMmIGAKyMJJtybqgU1jRBt8ldfNsIfygrpPqODuChhewNQy0ciEQdI1CaO9PtrdGrlube4HMkrcg8RzSgdWfLOQhrIQsKAI73p0qRPZvj4GhmGrwG95A93zqaw8/rih5TTklNPXwjN+Vy3joAX8yzg8AIbrZanAWgmb20nNPMHoD79RyAXcGadqRBZZnhpOIwOMG64czl055l+iMxGGRPXtkjHSvZjMi2htTcL1lLnltdGJ7C3eimz5QlOXVy8GfTaZWFUlXIXNrdnMzm6/2w52Q2X7XC39J4AYDOiSen3wDQ7hSLhfagVP3iVi+71RuJ9mFZ0vlCOB77Ha5a8PY7/NVT9xEPesbcXPQ6oy1Mqc+UIgSrs8J3rEgfAE/WyECs39ptE8GPM8Vi9sLJ0ZbX9va/vEcGrit4pE5/q9FlhIj2MGvjhBVZS64fdAOSeTfOHUzigwglL++JTjTCzqTYmxSjx5ZYqNMpAIeUHQCvk6Gc66q53Y48gyBrmakIU2xC+8K1CoAvjOSPU/KTRDa5xyf0x09FjDAvXWHq9YMZY/MAbEFqFUYHwQHAv2kCuDVirrezQwI6WAppxhX9yJIzd82TIsn/pk2SWMzOyo3eaCPX3nWufuVg1n6lC0DIybk7Btb8Q5rFuUcCNyrTEhBhZzb3GDk4cEm65QHQDcf2Dz7EwbgWTpG0T601t9cPGhsugKWF/onJDhA5QXNRPU9c6GWnvGwkLZwgg4sqXYJWO7gc0as6V1ZU0jx7LTX87GFOPqnsT0RvZfzZhrktAuujjal/9mHiqYRN8gXrzC0zmiluWppdFgtEt60j6eu0F1VgqD3RAGuqwGUi8zY318vmI+ynbLm1fVKSaxsHVx2Q/WnBv1CJbEtya1u5Fdn2ZF7l7iD3icyRcz92yWiVDx+zDaCQLVYy5DiIKqmReKl75DltOx0NV1mWtivSFkamY16sKqVB3sfZw3L+JgDcd/Cw9xmoLqrSyHd9XxG7irATzQzT7Xo76yRxl67MRv6+lT27sHd+foSszcPjFj3uUOMd0sC/rD2D2uawW23zQ/IBw3ewi808QGr5n1295/OXybl9/yOkCXeMmJ/x4gX2wnJiCpDJNFXwexo4XmLSZTtLpQ7zKQ4o6XZiJ0RsId4ED0Bk4rU3k9PHfeJmshgvZetGYh41Ta9A1e2l+AOHLpuYvkV4/GR8JzpY/LF6HyULvxyvkWCScx/wRaqAMNaRfMtHYXYTe6o3dsl0qWNhdeK1/NNzYwjybepwzTadBYaLH4jJdHIngwUq5a9nxHcr9qnUYb4fv8mlRKpFuFiK7yY6xxr7ghIORbxpSqQUqo1wezc+MkmZ67zVpzmA4iWm/PXWqHJIyckFDntngm3Gr/qAzsKUpogyz6uXaW5FAzlP9VTnF+LD6N81TSaRHwu7U6DpdhRjHlTl4llpWuLOxxP3147oZMbDjV/2hkNl+PIpiDsl584L8cFzKDn3pdKxuutOzlBx7lGdzHik5qiCQNXb8YuQknPPZuK3k+v2Is2tzvzIt8b2ocxRESSXNtP0E/OWEdTEtl6Z4jsrE8nFgOpTrQiHghS5GVKJMRa5KlWUAhrsd+MJyJ5KFYcnuxTvgKT2xozCmKY+G0+w5CKVbFJ1KfQDlGoZCiGUknMXSvFuy7ScO4VahpJzf+pm/LBTcu42hQd0fYdqzd8xH/919K348K2gU8tUSonp6M6I6/GdgOZL8TydQMe4M2JiDH6CnLuTSUz/1t6M372MPSquiG3Ev6CXpXKdEihIEWtTcZCWfqxqmb0+FXGnUcuMBd9tcfmS121xh5WT6Q5yWN8WF2ftbo8DwDt5xmEB9Ov57GQXAC+qClcY3MLwOmUp4v/0ngdSX31ZBwCLAx9cNhkA5+Xgssk8DDR3dADlmdSg0tRCFX9KlfWMKdg8gPlia7Ndmi+2AGy2SwBcRhQOToQ7YcQ122yVecUAwCs6AEOTwdqVh040XlytPHQCgL7dLlZ4AGuN7lIlZDw2Vd6BlZ0MCWV2Ukq1LWuXk+SO68o8bwJoyGW/BzaHw2x0bA5B22aKIgCYXmB4YR148A4RwEtXbADTVX6/j6lKBUCt0ZiqVBr91LJ7DUCbDR+gzeYqZlPnI9R8ezX4apcDMD99MHsCB9FsqDZvAXhC49YEf+mAY7kOTKe8XZ2bTnkAdnVuMRPUt1xX5gDwpgfAKbIaeAG+A1aAD0ADHzgsXAZ8AIA5WCI1j5kayRC2tm8CKOX5VtdVZFaRWACm4xmOV0yFr1+9s+AD6QKjdYJBCcB1GZ4PXDckB67LSG0TULiRXC6cwaZzwiANRjoHAOtrOgBfEFnHDjguYDnWsfttTm1zAKaX7X6byxa9O07tbu/mtuvl2cnmdj20vV973rz6vPmpHytce94E0NTF8n07TX6m7IauLyVv92XACSAw0H2kWBz62vG7a+70Er+7xtfWVb7KAt6BBZVzAoNjAMwvpQBsrunzS6m5JWX3pra3aQKYmJcB7G2agc1U5lLNPa08kR6Udhob26ieHopu4mOT7RsHjNjpEwDwuS/ingcB2HyK8x3Od2w+ZTrqpDhTt3cyXBaA6vX7rp5h9Syf6rt63zWyvGIFrmhLACRH6qd7g7oHuKLD24J7wFJkXc3NyLLstzuCInuy7DdVy+ctAB3NWpzIdTXLtFyZTZu+JrNpAIMKm4XRD0rTTOvgoEZuIuUecU9s7K6mMoXDP+WFPYHPCHzacTUAjqsCYHADAMvcHM7kymlJSVmGnitVLUMDIMkp3zNYTvE9I3ANTioNOHcrCCSGGVQAnKt0rzby5ypdAFcb+W++Y3NjJ7+t5wAEJhOYYOShKZ6fnPI11VdV1t5PN1YAaJWIq16XGWrJ8kFphx86cn5BUz6YNlYc3t/z9f0gVWUApCZYSR4uybQbLuFS1tvtc9PZUHOw2+f2bLmuCROCOSGae7YMgLGRL6HVRauDUgEA7po+t9vcKomZm+puScwCaNl9SxIAWJIgWQfJ77bLJwvtre3K3Gxja/sg+oK71+Mncu6BqNECkGd3m8xuK5gOLcDMt//3EU8mAKfF9QwbsalKQVoKIixVXhHOz5O+FoWPkpbYX/+MsbEaUZWUWLfIRloYhWfnSH7tJz/8RaLlv7v4jURLbkfM7kQ42dQJYe5vk7vcR49wG092OvtOhHk3XtX1VyOsuuX5+zppvVxKkzz43/77R9htcRJSxB+jvbnx8u/8JtGrJZGeRXWtYnqR15EkS5IjzzDD4BNHhZN7SPHo3/4RKZr4BcaPDsxcVf62JyaIbj/4n0kG50KrT7QYlmVEPSanTqY/+nfJWMTbXyRDebzypN7ZjwjaD71fefgDJF/5a39IPrziThItP/LjLxAtv/A/cp/5FySj94tPkd/rx77lS0TLI++uPPJIJM5fl612GXJk/v5Pkd+r1SWnhxOkg+hm7PGWy5Pd/uRbf59o8f75zxAtlUCXooGUd41WzYjo6zNs+rRIDvsVm7TRnZgup6Px6Ffq9Vt18us8NvEeosWfJp00esa840WWqvuV3/O+/HtEtx/6Dx8gWv7z//sNokV65MPSuz4y2hKoe4FKakX6nSmi5Se/8Smi5edfP7/ajdCias6byJEaWvFJMg7ls2myz544sSdFPv1E3ZzYi8i4GU4+lSIn5AMXSFPKH2nBTlSrJLRzYiei2MrJ5h2z5IdYz5ABad94wem2IuLCXObWbPZWpJPF4MjB+3E6dz8xCycNAoNKs7acibdE6XTRSwjK/lbwyguJeSwQlH0sdujk/tlyvPi8tU+lllEpNF21W2/vWYyj2NmKP8R09j6qwSIo+1jMzsdrzACwFEFOmm/SWZgooPqJDftimjxWMhYCF+9Hd+HjVIeYuDlyszwKy6TSTS3n4+MrcGtUmkONiyd9GY5KzTV7rCFOxmMMcZ/iE5t/iyfiX5FRksuSRXHAHUBVSOw0+f0PJ+bmKHPxbo4zyR2BfOQ8lRU0Q2GjvvANJJ9723jtKpVJfGYufj2fvUBF3Le2Egug5gXx23PzjcQW1zRP8o9joR05mHbb0O34jbB2vZPUz/E81ZOvdON3er9AZaxKe/H7pepRcUUvUjgtZxUqz+Z8iWLZW2P6jCHuNTfeJzSXoiJq66tUa5UGy5nE4lDSwKI7cU7Dubc3N2P7gI5zpwtBhu1mYpa9mhK/EdZWqFhIQiczFpN0sShoOPdrF6kGa+6ISvAoKDl3homfM2e+k8ptQ6JIbrXrkhL9WEwUsrF91jWqKZoS4w2AtetUp/P52cT8/RMEDedOiYcodpO+QZe+sUWxnHNjzLzjDjGx8Wu1p9P5blNw7pRYUaniUCaFnES1e01TSF/Lj5LazLEoSPHc6Mt0hvqHz8Y/1XOXqQTVKSOee6JUy5w4f6xO+tdepSLuNJz7C89QOacTCve3gh7i99QMS0WJNDOePSwIVGIcDec+dYbKtcPdvhXfiQ6rFJx7UEjMZShBzr1nJneQkJJzp0GXzh/5K1+MJw3UOvd4zp1S554gdrfjOaz2ZnxGMQCmG0/7KNUy2xSBNueqVBOLhnOfOklFZVYvx8/3yQrVG+5sxXOjn/yhxFzWHn6UKt8sDed+/bepTtPQcO7ZYz/oRKNzp0SCnPsJCp0706GiyBpFqlVKnfsMhbI5J7+90QmZa5//BaLppYmPtKP+HrP9N+fUSKqRNVv4lRa5RX9sg9z/509ac8uRJX3L9W45kVXHNbNc88g0Vcldobl/iWg59/j95564f7Rlehg/Zojnf+3vEy3Zez6Wu/tjoy1f+sMbX/qDG6MtZx6aOPMgqUq+trtGtEg9iYseoljgri/y10dbdpnzNZYUxr/1e+aIls/8xzdWVyLT9GOngm86FRkry2WaR2byzx0xgGe2Ai4aE7Eg2Xkx8iEMVtqWSKXtlZ0O0fKAqRe8CKHpBGhHU0F98BHhg+8mBZ3ff4Ek0/V1o7Yemc1uLuPkIw8vqb6kkUR5d458QbayyIgRdUru8z+T/fy/ilwVTP6J+w3EhRe6NaLlI//g2z78k9822iIae6IRcdvotc03XyZ1IPk8OXqmt+cFUc8KSUzLkZERMnzhFPk6v/a3XiRa0j/5LfyZSLiSgubntciH0H1sHuHXlsrkrrB5M6WrEUozWXInoyfmbMFv50ge64ufIz/ElHFZ8iPzL7P2TGb9mdGW0gPl0v2kkP38Z0jRp3nqI63THx1t8X3fDyKTNpuRslmS47n6Kmm0OPsE2eLvLQR7ERMxqzDiEc6ovLhNtMz1PCXKHxpZyYw+Q89R3+hGFjiA1Z0zREteYAp8hDKIypagRH6xzqZqR8SvpYvksH8iIDeK3fsWdy5EzuidaG0vN8nXuU3Ofc2mskkSlP2tQEmRTlFHQXmqj6DsY9HcTswVIRtQ2dBOLFOJxu9AfOE5KoFJ7caLaJQR+QMrnoWcZqhU0iffTaUEpwHLJKZ38q6TC/XrBa2XqWxjRjneW8a2qexezbXk4pckJ/kTlH0sMgHV7zURr3NfLc0ebRzzBATbPhZPZKhoX68dr4X0yqQn9VgYOslzHQXVjYDe639K1zEeHM3ZZzp88fPxRi2JT8xSqviJ7bvLc4kNAm9TvSCbjVf+7gZUziS3nk0s6ggNHJVKCSl9PD75H12CDSiZeBIpJjeNlRkq47PSvBnbx7KpxkopxJNIny64f4si3WBOoNKGddx4rWCajriXcZtOcmM+atGKJ6NryUU6ZHUqIUCgGNN4RSwAOs79+HFimSYBBdVn5iimcotPTFA4MXfcPr2BnZjyl9DJvBX4Qfx+KWSoHH+tz5KntI6iScdmNmvx8oRNc4QfMLn4KUoJGs5dEqnGyujE0yKexpsQUCjSg2zquzS3kikiidZZqmAb15n4r3OiNUbUu03O/esaVv1GbJ/yLJWs51Gsij5DFW6UULiPhUi3pVoUU1nxqYw5hMJ9LFa3EnN4pQShcB+LneQ49247MSlHyFBthDScu0K3pZan4h/eoYuek3fI0+xHoUwnFh1IFKnekIZzTxB5gYqHNP34IaVUy5SD+OXcUcY81Rjivty9SPOTSYGhc/Y8Zlx/MbH4cNMBleBPw7lTgk8swS06XGJcudpJLOwlDec+Q6dzp8F8cuYQRz3ujZCGcxfo4l52BdIL4Cgode40apm+SrWnpgrxLnkujas4HSjVMjScOyWazG0+PPO9P/VLi6XIxfvZE0a61Gz0AJQrOQCp9Mw90iaAbbOybVZm5QaAHLcJoMLwjYPkR4Nvr/RNIxt6C6X7ftDFdEUD8MrVyemKqlT32ozW35LFnCvlXKvH2z3+535cBrBVx1adeeSeYKuOP74qAmBzvN8Lb37W71vNLoByVgHQ7BsaG7itevbEYn91vb+yll1eKp1YvO9DTwzmsgVIgAWs/pd/DgD8kKH4y9VPAFi72X/iG0MrxHN//GYhCGmB6YkAXNbaZvTJqRKAeq01OVWamCqfe9fEyRPFW6vtUlEBUCzI7bWuU2+rOpdJedmUv9sQTj8wo0g+N3XCq60C4KZOOPXt3c8/BUBHvooNDXkAK9l7AWztmHMz8qDiOV66kAPQ22vnJooALEOfZXurKn8i4w7KW01mytI2e+G8mc8Fc7ngTS7Lm4wrDz/iqYdnzE1t6kxVbepqU5s6U73y9CavtjWH1x0BQEpw0oL78Q/kASwWQuHjy2v8F142NT+YFFgAqhcAMBikXDflOA1FSbmuzvMTe4ZvhJdwLuPxAYBP/+3qm/V8NW1OZCwAe6qUn+uaqg9AzrCm6ssZ9uIz7qvPeDJvuH4obkslQS6xHBMA8A7Yk/enOwAWS+ZSyQCw1lIublWnAgdAjREGlWe4CUZFRu3Xpmcy/T4A58wi+9y/Ez79zwY3cf7wX3DnHvvVv/9PzXSZd3QAvG0AeNd3fnLq0+9Xr66qV9cy55YA3PvQ3RefDTfyW9c7AE6fUXxFOTvHXNsKAFzbxCNn2Dmu13FC9zCZNUxf8ff+j0x6WtV2pybvA1Crv/oPfvq3mnuhzN7c3wVw5tRKf//JfrcyM38VQL9X6WuPlU58EIAslk27adqtQub0P/ruHwPgShZvSYPK7tT3Tzy6dPOKXqoIAEpVobtpKnq4MUyVBQAXr2vyLN/p2ydmMwDaPRvA8kRQa5r1pjlZlutN894zhW4z3Qn6FaEIoOG0AeQltmfp8/n0sxt7c/k0gAxSUreYrgCA1gCAiXPw8yqAXNEbWM5yRU/dl62GLp05Z12/ypcrXLki/e7/PHnl9xyPFTh/UAK46598DIChGUpaMTSjPFn+jb/3Cu+G2lef9QAwv/drWt0CoNXN9KQMoH+jW055APbrLoB0hvV8DwHLyWbg8gzvBi4PwAtSuYoIoNewAfSa9mI1xVuC7BUAmFwHQLvcNoKIsjdoS0xzD4DVvA6AS5UBzJ0952t8oAtc1fA1HsDlTuuOHNOwUJHQsLBvBQa4THZ4q2qO264Fpwo+AMXJG0J4WGQu39m3uaroAdi3OQAtkzMs9zCrJeu7IjvLpWu8XQXA2aEn3mSmNefkt4TuoARQnnmm3eUAFPPeoGKzJ7PFE3wnlOz5Tg+7a05jaKLL3lgH8M/Tj5ya5k9Nc3/+igXg1DTPA1hvRTaZvu84RnijAYk3jcwL6WFc+W2zkue0ZT5cohUmrOxmZQB6dugH2r3IAtg9CO+528iwfZuVPAB2j7d7PIBTB+a4uUnMTQaDCrsT3pPNhRVVE6ayQ1tNOau462vttbX+ylr42CtrUycWLeBwuw8rCml8KzNXX74xC+DJPwsVVQXOUoJw/1c4G4Aa+PBRr4UuXPVaq15rfdN3nQVw8sTQBzSr+E7Kz6ZCYjddcQBwUycOSwBufSeNLoDRcusg4u5hRcmFiqABZR9gVeWjpXdI2QFs9pjNHlOaZADw5rCdN5mpM1UAmXIqU04ByJTTmyPxj3RHePgO6ZCsD/DYkvv8RSbDhfcZVJgg0Hle53kAg1IT+Ex/KE5yLlM8V3qzngewr8n7Wvj18+jKB64vg8rupg/AdIcfUQK4A65kUFnMmI8vR444LpWM2mb4c1MHYuxUs6s5LICp3TDio9PvOweUHYDw6X9Wv/QqAFmLMJJTn34/gMy5E5lz4df5iz9dU/cjrN+1TfgKrm0ON8u9BjKl4Ucx/fAVVG0XQK3+6uH/Kk9MRyrWSjbXyB4kw8vmGtnSVYjfEQ6LWJbFMgBXOkj+OVK5eUUH0Go4g5IzXM4MiXutaQNwpaDTtwGsbg/dxC51Qk+HetMEcOl6J5N1eIEbkPUBtrpaz9K3utqgDiCD4CSK2sj5072rWP5o+HO54oG0UVqQSgAgnQmpgTxRxhUMaPqgnP3Q+VQmBWC0PKTsAFifAxC8eTV95zkA6YPgrLKv+LsqgOpkuOS9ILAcEwDDu4dlLh/eakDicxUxszX0vByQeNkLjHLE+TIImvb1YRw0T28C8PbD73hYqXDpKz0NwP6IzLDf80frC7KsOGF/xTkU6ToDyg5gUOlrruUHo+GG2MCV1DtHnyrLBnNOHsBoaQDF/EFi4bwHwOTyPuAeJIV3Czl5d21A0A/xpDgH4Oaue3M3/MWbu+PirTsilb2bBkwhvs8pOl+LjBOvn2quUoXwpoFK4X4EwNPe3mMIf+OgUQqaDSo1UG0zMdFYo/DuqF16NbYPJdp0vhbPfomMjzgGHJXnpUIXD4AGPF2SAxpwSMyOnSDMXPxT+S0q7z6dQgnedY81qQ4An4k/Q/eEPcYcMmaRKEdSG7+tuLmV2EnC1gEX/7UxYNu/NjK36350FMo9D9F085NLAKu24recJ1+j+jkK9wEYdMT9mDF1L5n5/bZBSdyp4FEZYAyKMI38uBPnR+HSpSdLCtufp3MtfYMqfSgNeIvCW+ZUYrHtKIm7SufUSwPBv81QDWOIu0uROi5BUBL3jB1vNiktL9HcaqYU7xCfocgxTQmnntiBFFai4sKmTsbH91iapCINQmJ7HKbmE7vXRJoiBllyxL2oUD35u99P4VXpUtE+PRVP3H06jtylIEYqHUvOMe/EXTxB6H48kcnzVA6aGTF+zvR9qnnlMfHLeW2cl+rtxpbxEjsVRqmWUcXE4vQmCC6dWOgfXohnQHyLigtT2/GLcK2e3HmoChU3MD0f/6HX1cTGs37sapkEkdKTU8vQESMaeEH8h84uUzn+4q743NAck5hCiVItU6GICpmgWibLUs0rGrXMWPDC3F2jf7O56um5Xrko3FgdbuZprZfWwtGp2M2GWAZQV0NLiKpymYwHgHNvAmC1pp8OTRyzVcaP8n4CLwlCrihJbSu0Wegd+7lLJM3in74FIKhkADANld3XLp89Y/nTANa1/GK6C0Cxg1QaAFih4DsdAFqrwGsvsc6uL4xkM+ErYFIIhq8js2q7GZnxZ+9WvuPTZ9dv9gEsnsoOKi/88R4Azxc4NtTErf2XFoD0rAhA27bTsyIOcsfYmiemOXXPqZhb5m4kMqrhp1u98OhAz8rkJBVAhmMkBtbBx5UYGN09luMAcLbuiSkAWVeXfQNAKdABtJgUBMzfRTIX6/vk6vXb2k7H7retbFEC0G9bKcOY9SLcWR/ily9y67VgcWr4gVhJBwDehcsPKnkzazmiC58H68IHUDiTLlUi3nVu23QZG4DPeGzADcpX14YeY6+uZe5bUl++1PO8yK5zNmV//HRnXY14Rv/8izPLRWOlrSwXw86rughgz+YmRG9QUSpevRT0U5g+MJcyQVd785nMxJDhNU3dE8Ktwud41nMBdL/0VQDW2qa0FPZcNpqqagNY7j+zkn0UgGqmtH4KgKRtAbDScwCu4FSj1a+UsgAGlUfPngGgZMtGP3yIz/32L5aLi8//5Rfe9eEPDlrm5py5+UdHXm4e7g30LsPah1RF7zJy5wE4I9GB/EBkGXvG33zA/7NNZphoqSafqclnAbCy7Juh2s1NbzOiE4ycKJzySEe9BuvAi9AjReROZCsdywJQkKSOZfV9t+mTUUL5S0YqH5JXveul8lzXCQANgNp2MkVBbTsPFFMzH1ya+eDizhdCc1d2uVq73Fb3jUx1uBPM3EUSzdoX1lELvzuz2gHA3FlFSvYDj2W4QckayBgZjY9EFXF6pIdri+l7bEpl9MxBtjimx5Z2p+z8UDk5nxI/9APvu7bWOLtUubYWLs/Pb27oXDflDf1cGVdZ4Au6P9S8SxK7XJK2tWFLnhda7dAnynb7AEQ+23fkti8UDwhF2xe+525ZjSrweSt9JoWVDpYLALDSQa3XW+92Aay7W4t86GbqbIcapK2tibm5PQDIkVzR9dnz03c16tXy5H7ztTvP3vPmtTU2940ZFcCpSftmPeSDmf/H75F72vJkPRMNV5ZZ3c+sRQKkrFnVdYvcouXJ54mW0yU2G2W4JWZJYpZGWwzX3tBJ39jcTdI0elU+f1WKGKPuZDbuZCIBF4uT2sMfJi9ceZ2MP/fzv+mvRI17P/C3Mj/wtyKr4tf+pPnrf0I+1XcvLREtUr7HCZFvyHtd3ousk5Y32TqSKE4pk4KI3d/z3Yhj7xSMyaggXCjgvnvJL/2HT5Ebe41NE4ffJtX+lNaP9kntMuSSSy2SAbvtNhmKbfo95an3RCJDOS2j/xJ5bO+FbVJOtN5Y97qRyfapC/1P3Rt5qs/fqHz+BundZBzxtXfOktyT7HJS1DugubLx1c/8BtHt3UcOx09l+tPpCPnYR3oviIzMvrC8z5MJhn7qSELH9vpeaz1yPOJdj9/xyBN3RDq5JjSSPP29f/Qa0fLEibWJdGSsXlY+/bLy6dEWm9u1+R3iwnvdRaLlWZU8cL6gpBbkyAvuOMa2S9pplgxypjVNUln0xJmn33/mK6MtasNb/TKp5MmVyC3n5o33qNEg3v5dk8FdkeOTrAFWJxUXDZcUyPpsuh8Nv1VWK5V+5OaL8+z3fTvJA33P75BhSObMuxQ/Mm8n8+xUIfIMK/u7K/vkbK/PkAduf/o9lTvL0RMGfTJAyo1O99+8GgnICMDvkCtOzBbEkSy1APb4+j4XmUXvkmsPK+S8uk21TCG54J+6R5VvhaDsbwUryblt0EBhqAKasUK8RqJQoFLSqUy8hofydBwrUJwQaVO5DBGUfSyWy1Tz6uiav21kKF6QEkaHSvanAUHZ3wrmRYq4HRzV0WeejT9VpFGkZAGQycbn/TgSDHE8+hRx7RfnEpswxdRxR1q+bYwh7poVf6StE82T+1aQ4hJTppemqEjDMoVl754ziVmVjYBqNrhGfPaMTue4lb8JQpin08YmhMYqVRj9rBhP3NMeVXYhg2L3whEGeSze3IsfK86nOgffo2Ce+h7VTi+wiTn+1nbPJnUrkY5NOWa82YjfCG8cnEv62iDYdnqMIe5pKf6xjp9zP2fFuxm0aoltOZQgdDJfR6Dh7gEwfPxk4IuJWUFXmol9wcqJhaRuRQklT+FlwFONVTUdv744OqYhR8E8UXLuhhsfhmHyDqpoIpnjzZr55WepPBEMjio9GQ2+42x8NJHTBaqII54dv6duuWMmw7g0exT5QQo8ldhI4+yZIOferifmw5MgShypCxsLlj/WGDtTPtX2HFDkh6KE34v/RUq1DA3Ky1TEfVc7XimbjrjTqGU8OnXfph3fLctSaUBo1DJqIzG3+oAuTJ4UJKZYKznxHqjFNJXA9GYzsWBznBg/Z7bHEXf+Max+GScew+o6iotoA+ivmd5SqlPzAXRqAYBiVc4IMoAdQQYw45hpgytNyKUJubVnHpbnpnOff633oXtyAFbq1krdOr8wtdtN8X4HAB+0AezLUjow2pJRtJSirbRFo501GNVhfcdnBdZ3APiskJ7LZeZyANStnrbVm3xkrveaehW4J1itIzRklb2WexAM0/UCALOzvgAJwKB0YAmQ2C4TyGBMAPDzAdtldvcUgXN4XjAsXTjgWV57w5ic4Ot77kEpsLwMgONlx+ywvMzxMp8Op5GriazgAXD01L2PB5dfNKoHabW0Violi+revpgeMqF+9sCOrztBSmAbum0bAERR8TwHgOe5UzPK3Fx1e7O3vdmbnc/NzuduvNSwIFlCEUBOX+2lTuhqe6HTa7aDcpFptgMA5SKjMgwAjWHSQTCoMHBdS2APzJD+wXKTlyvmSkNergCQTlTwZDNfHGqoLDPwvTwAhrEABIEEgJ06gV7f7/b4+Vm/12dzWTYnpF99PX3xDe3CXQDSF9/wIcEtZ9c3+4vz2fVNAP3F+Xk1D2Cuu7WVD90ArnU1rprmqmlvX+OqafvyHldN/+n18uly50azcLrcOV3ueIEI1gcANsDAC5gNJibTAPZq5sSUDKA6Je0KejrQlrM+gJU+C+B02n1uTVnIOfN5F8Bml2c49v77Zx94YBbAyy9vDypzWQC4+Ub91F2TN9+oA3jg0bvV1VsAZu9dBrB9aUVAWtrsbbb0vCIA6BrO2YK12jSYlBLoRmCEJm6fm2ACL2A41nMClmV8H4DnC54vcqwNwPNFsB4A8A58FqwPn4XtPP1CE8DCrLKxbQzKC6fTtZY9VRIPy6v8HRNpTTG7ABSr28ovmLrIqAajmf5kEQBbbzNpWSwJYiDajC0GIgCV1bZMLccLPdfJ8eHE/v5H5i5t9QDcO5cDcGmrt1Yz1gM1z4sAuu5B1A0PABSfMQ7882xWBGBxouTZg0rJ35721+r+iUl2dVAyG9dqolZfD7eiycW07zgbq0NuZuHETLfTN/Rgci5b3xraE2umn65kgkqaaWhBJc1erTuZDEzwMswuBnyOa8GxWwqnGJ4xKIGQHbUyM7wd3q2YrrK2nR9xldZUsGXNb6bZshYYIqPYAP7wFR3AuWn56q45qJSEfEnIt5xuScgDaDndQOqU2Jx5EEZNFoK6be31QzJ9T7b4Wr8tum5aSQNIKxnNCDfOAssBKLJc2/eKLAdg++kv3Hn6gAJcfgrnH4cNzD+OQ2bJwumFHF7Fx07JN1pDQ8XldkjKvSCsGH1zzuyU9+oAmhOTABR+3ZhGBoYKZVBW5bTNT4pu3eYnLX4ya77Wl+9h1n/3s4ji31+s3upE+DV9xjdmSB78Hz5MWji/cfYrRMtnLy3sdCKCdjPdbKZJiWz5VVJWeNe330O0rPziNaJFc6BFjTezM/43f5rUk7z8OZL3/MoN5cs3IpvhubPmuXPkM/zhy+QW/V1nSSboiU8oEzMRX47tW972SuSxOp540SITj7ErK0TLt33b8tx8RPJ446L3+kXSPFWsfZFoeUUhQzSbvQLRcv6xufOPR87lqru9G39GWuprRyJ0Cstn2ExkHM5/4Vfu/OIvE920I/kNlZOk08vvvf/bt6qRsILBmuevRV6QYy1FJgWdj3/PHUTLSW8tHUR428D3/CDCMzIsyx/Ncn7kGE5miRy91z736mufvUg0/ocnSX3RT/3cE0TL6rM1rRmZRY99ZOqxj06Ptmys9n7jl8lh/8CdpHh+i3G0aACMtc2l9U3SY0e+n5R619rkAb3ve3T2+x+NDPuvPLvxn56N+NFnwC4fScm9mSZH5uPmr0z7a6MtGxudjU3SIKE3yNRjnU2Srzz9r/+IaLFUxtRIY1ifORI8coFU1l84YjUxbM9wIvNKt4ONJnlY8oPLpL9ToDuIHhS/avevWaRm/BuqpEuSf+RE2UOv/quHXonkfcR3/Pf4jv+e6Ib6FaLh+3+NpKv3b126fyviT3XzgnjrQmSpynJlZvJ9xIWJncMcC4Kyj4WsUp1QLd4Xn3xne4fqddZb8UJoo5/Y0Y8CRyU2EpR9LHi7Q3Mrlov3WOjXElMveqBS3cztxUcD5zgqkx1B2d9uvLSWWGoaSmQoFiY/fdw5Una5pdg+HkUMKAD6zTfib4XEDsTqlDoSihAgRQolCQCSso+FGu8yBGD6iGs/JW6TuD86S+X58MARv+nbhrkbr419+CEqN6zFUny3Rj+xldPxqIwKzz0b/wl5m8ptw/fid6bsVGIxyr0jTN9YPHfnI/G38qhWjnbEQ/8oWI5qbntm/NY7U0gs7A8lVBz3L74DQUnc1eDt5VDfRlDknAEdcVekMbzv3/y4KH2qeSxTZHjZ3k7sdSrZxOxCHZ+KsX3k3VSZg44ZhE5mLDjQiSb78Zw7JVQK4k4JGuK+0znuZULDuft0Iu+98/EuGWk6OjDtrcX28ek499Spu2L7cEjMHTlF6RZAxxDQYHv6vfGd6LJFvjJH6qgpMeZlThbiZZjNPpVcvNuNp8hGlmpAaTj32Vmq6U4o3MeCknMnFO5jUaDwMaCEK8aHEEoWvhofZI2Sc09QLUODBKNsUnLu6XJiLqHHDI1OUCAU7m83ElTLUP/kO1FgSkAts8p5g3KV86YmWgAG5SGWGOtxtj+oDFp6TXLlX90LBuXgX1Ykt/G0hXSU1nFC+AyzM0Ory17NGq3v1SyjpgNgqjmm+tdyIs+/Itj2kOO2bW/wpydgtFyuqBxrAuBYc/APQI5jDksAywXPtZ3BPwCDEkC6yByW1RPs3o7Xarq3bkRm4X4rovHQDVk4iFYxqAzKuTkWwCOPhCqUl57/a5XgNm+rkgpAT6W8dB6Al84P/gFYmJUOywEOde4cyw3+dWv97m4fwKAEkJkOh9HjR6xYQib8dwgxA2D++lPDFkkKxPC3BhWCczcV3lR43TT3Ox0Ao5Xq1iqA8899YdAzL7ULUqhoGlQ8T154OLT+5Wdzg9LZXQcwKA8r7Ua73QivPawM0Gl2AAS+D2D18hqAzn6HGNL2XjjanVq7uVYf/APQXKs31yIhBOS0eOcZBYDAkMIcE7CHJQDCmhowLAImCADgsFw4kWMOHFQOK6+ukJ++Dq91Zad1ZQfArf/y0qAiVFihEv6WUGGFMgsgsJlBOfh3eIesGPZ8Zb0zWvnMV9YAZAUJwHQqnAMWPBsegEE5WmEFkxXMQWUvmN8L5gEMKoP6Bz4SmnlPnCycOFkQUqliPg2geOD1X8ynJ+eVex4tAZicD5lxg2MH5eE/YgTA+iJKiqQoklLOlQAoUngt241QkFtdrd5XAQzK13dqAHqebfmedeAodlg5ihW1M/xD4gBcbjT39ZCVPKwAUA5i+NQtf/Dv8M92l6SEfzH1t/+6X3z1pYN5IqYArOyNkR3F8vCpWidCpmF/rjooG3OhVrzfUPoNBcCbz+3t7Yc68MMK832f+03ivndK3bukzmiL0K4KHTIg8hd3C0TLRok0Kwvqfawb6TaX35ovRCz1U9XURx8nXZL/Xz9LOpOce+x0thpREfQvb/cuR6LpThbtDz1IaqUvr5HaqM/8NrmWHr9LefxOku36e79Fygo/99PkvrJzk+zT61R6HdIg4ZfJqL8/8B2k3v+X/x05wmbWNDMReiEz/CRH6kk+9UlSMvjd3/K3oiEWhJTHp6K/yAEKyadoeJBo+Z7/9KGF9S+PtnQm7upW745cxXLr4hE9yToZ1zDNyxwbEXT6p+f7ZyKfnpuYlD70EeJC52d/imgJZDGQI0ZOJgCRaZIByzGkSOF4pGTAu74QTQIup1NyipQ4f+bfk6/zX/3MtxMt11+pq9FIDI9/qPr4B8nJ8L/9wz8kWv7JH5HhXz69+zLRkv3e785873eNtrg9Rr1MupdMnyQnw6V6v2NGPv18rjCfK4y2WI5Z75LPICyRlht+N8+YkWH/9J2Xv/k8ebrwc//xdaJFniXVIq9Vv/H16sdGW9I9J9UjecGZKnnhyiZJRm+pG6obWYYTpfxEkTQpTfhkCKZbNvnKal13omOVkdNpOTIZdIbb4Eh90bs88qn8/Z7fiNDDycr0ZCXiOmX46rZ9g7zwo+Q+Z/XP+25k1Tdeae+/Qmbd8BdWiZYxKhGCsr8VEJT96wjX945bQLvvoXhNlxlQWYzn4o9i0GJ96fGkbuUG8WYMb49KAg34eG1YQKdqYIP4c3YbO4kpizZvJeZiQAmCso+FRBHXKFnspU/H9pEoQqJTQqQ8EEWBFMU0poTCJneAbtzJ0zHEfY/CY6FjJ+YpmCDqbSrl7/RE/MOfmaCyBGRLFJNGoNK57+7E30pmqIZ9K7Fg4ElCojiKzE3QWZX5xAxfnB+/ByzMHDfta4hUxyBpUJDfiUt1QiM51ttGmk8sZAVPkQxHp4syT7DtbzvMMVP0b95b5v/GOxnve+pfxPZJ//UKzVF4FN0oOXe48RSZoZvbHhvfLUHOff7I2a6xaFCEcuRzVFHk8sdL3Js7VK4WNJw7JTQ33tXCZqh8eIxuPB+WIOdu+FQBJAidzHhQcu40KIhU+gFXXovtU9un8gfq71MNRFJIUi3jUPlhTc98vcYgowShcH+HgIZzTxCUapkKRUCY40cgvxOnqEpB3CkhJLcRMhTel5RqGZaP91gbf+HRpj3361Utc/ywDIptnE4t8+qL8b7blDr3LZqA9ceaNpkWx6+WcY5kAjkKSrVMthDfjZJzp4HbS0wlbTlUoglhTR2L8kxiRxCs5HJMU8KhsE9QqmVoQMm5U2GcWoY/+3JKz3qpA7duPeu9YHzyckHb7IYeafP5LdlJnVZauyPW0R23+KGHOwBSxTSA/Vt71ZMTj7efBbCYYQCsq8FihqnhmZfX2ncuvTK4ar8z3bCFf79yYfDnjGjs2Moko/d/4Y3Z6fT2bijQzU6nT59iVBVTUwCgqshkoDVrzedUq9UHIJWyVqvPSymrAd8GO6LLrUbDb9gm+OdCdiMQGQCMHZSnlFu6547kljQk3wucIPAZhg0CH8ByBTMFNuqRgVf/NJ8tef0Wly15APotjpmVAFieCEDibMsTH0ndOJ9+etB/165Mi43n9DuecpetQAIgMRYAK5D+4/+6NzPvz8x7Lz0jzMz7O5vsijBcYCmG1QP/A3n37DRmDyzz2wZEn/nqlh6MeHcwHPdvP8PNTmN7JDnMez/glw9cs69eYc/d4a9s86t7GRwEkwpMnZFTluMBCCAysAOIAH7wz++tlR4EMNV6adBz4wvewvpTAJiHnhi0fG7jyq9tXgVwZnv/+sFB5U+xKW05Grvmf/szALM7a/1sYdDSDrqBM2SySpZXv95qbgT1ujY5mR6Ujnnlyj/4+cd/9Duf+j9/e+mhOwGsvfjm5/7iB6rz1f3NfQCDisvJjd1Wpjh0XlJbnX6rPXli6HHUb+teICiFFID2WqO4VAHgd3sAsqXM7q369MlJAEZH42UuM5lX693MZB5A+9pu+0YdAEaYescPAAQj8V6aUk7wPYflDkuWa1XZlgTLgiTBAiAFed1TAHgBB4BjPADrF1kA8gHTZqr48Q9OnZoYOmDc3DMWHn5vZ2s/d3boSnT18/X27w6P7AvVtLOvBd+0DIDRuCDtDcrahmjYjCIGhs0AUMRgkc1D4vOcUODFjmsD6FudvUY4XQQh5Ti6Is8uZS8A6Ni1gjgFwPTU1kYfAGQHpjAohYIdiA4ARvADh2UE/9abq8/dfB1Ac6dbnsk3d7o31u1Wl2l2LQDlvNTsWuW85L4y9BY7cXZ29dr2J37kip1TFoXhfLgYFF+1C4d/+n4gcPgTcxiH5wxfve7uf2rRN0xZkc1Wp6DIpiKbj06+a0fvzaRyh6Upiuue7gcsx7pBwAIwA/+W3QaQY/meH1Lwx/wFnTFTgTwoATDvUXtds9s1AcwvFHpds1zO20a4iaYzoqbaVUZX1MaOLcyIzmH55GPV1L6rV3kAqX0XQO4mlB01MARGcQJDANBNuU1nux8YWUbpB8YsV6qVOTF9HoCbCWcyr/bu+RUNgD5lpWoh798pbZneZXl/C4BZnQPwYC6Y+I5wuW3uiwCWy1rBNQHc6rMns/6g5AGkRg7spPpckWutdIep6Ta7cxkx6JkR16JKIRiQ9QGqJydwQNYHGNRlKIeUHUC1sPva1jDuz449nNCHlH1Qr5xMZw5m/2FlQNkPK8auOkhWfpiy/L1PkDvhaBwI5oAR2GrBPZI1eEDTByWA59Ygj2NT+i3usARgG1IghrcdkHjLE3Fw72mxAWBKalh6GPpqQOLD199kdzbZQYX4Ff3gMWZHfK5mFexrCKJ+e4HnAdwoZQdQHjl0c+4OHwAjCIeUHQAjp1yfDRC+RTByCumQrB/8+eIhWSdwSNmXSyWtREZGG2B7Zmn4Ry+afVDimhJXr2sADkut2wTw1P/52wDWXgwXdnW+elgOKrs7/VHKDiBdSKXykQ+WLqYdP/wSA8oOIHuQ721A2QHkZgsszwIYUPYQUV3N+q4TYAwX6bDcaMl2GiFNx1BWG5D1w/oXfi10EzRHpuooZR/8aQOjlB1A/uyCNmInd/Y1nAlPtAVp77Ac0HTjwOHdsBlTDBalcKkWeBEAx4j9g4S6jqMD8Jw+BAAYUHYAMpcJk8INuPUDnp0RfKLS3OmOVsp57sZGeOxjQOKbXSs/on5YvRYS+lHKfhQsy6wxERXWdTeMR6bIJoBSoTP402bVmVQOwGG54pp+wALwDiaAcXA66ZCyDzCg6amD+IvoiLkCcvnwz0ElnRmujnRGhKYDmBEdohxQ9sNKDhjQ9EEJoKt7fmAB6AcGgG2vxWhpdzLiAO1mcoMstYeUHQC/96rsh+8+IPHIDRmp+eqACIZ/nsz6h+Xba1A1kztjpjUSk9E21MTk2UPK/tYhU/jkUaJJcVyeZY5b5mWExLwaZCUxraDvJjYOQYt0Pf56gchRZY1IENPL52L75IPEJsz/NTGGCqywZEbpoyjnEzNDTbJU9pDkKAMV5gpU3Rg7sX1CoSDuDbrcoc12fDc/SOzJ2wbVFj6qk/nrIMiJfWafLp4UL8erUNd3j9uQ6Kcpwqo0qYZdZuM/tO1R+e35bvyQNrtUxpzdlauxfbpMYpZSmcInihIO3ZkA82xiOYhuG7f5zs3u/+1DmTAMJjFqWy4eK1deVBIL8ESJDkVKbpah4j98Cq9KSjAl8gzkbYPfT0wIMP2v47y7NBD9eJ8Tk9InqpBYUqfCH92mi8vtQRjnanGbNJqSc6dRy9R9KmaNgu3DxhrVbvmeyfiH3+rQ3AksxWRYdxNzkKAEDedOqZaxr70Y24eSc6dRyzgmFbMmU7isUXLuNFicpjvfWIoP63biHjLIxFi41cT2CRrOnVItw/KJbYQJqmVsNt7nJEHOXaDzLDpmOOOON/Lq7EfE3k0AnhTOp29VtBsqf0/GrNt83eYnRbefqi0tPQNgr7MA4K6lr774wnuu1nK9Vj9XCh3ss+VchzdXet5ybijnspmzm6ZYlbtXOgsA7ihsmFpp2hIBZFwWgMr7j08G3/x+dns3mJ1mtneDdz3APP9y8OrFslzUXUPgFQeAawjzld5MyXjXGRPA89fld50xf/2zpZWGCIBlXD/gWcbN5bgrL3O9tpQrWtmi1W9LANaLSNs+AE1kdYGpap4pYUZ3AWRcP+v4uwrfRnpDQHckTXCryASvdwKbYcQAB+GZ7vnwNoDJYrgn19tZP4dLzbnADxiWGZRf7i+97oZLZVHsAnhKnzlT2up7CoAsF9LBVSEHwGJ5yXcH5dKU7QdeJe01NA5AJe01XP5ahwPQsFgAFclfbcs9AwBsJxxkUfAuzERoa8dgZYN/4a+UbNHLFb1em8sVvW4T95dru04my9kAMqxtuCmeU0qs1/K5Qdn2uJ9bHOY8ej5ffa5QfY9UCP78t56YmgfwZG3zqdrmd1Tnbl56NVMtq/vNQall/dbZ0wBu1TYBnJya/6tLz3zs/EM7VX5m3x2UAMrX+Eqn5LoKzxuuq7ieMjsrLcy7rbZ26sTEzdW9UjFd1yrlu2emz869+ifPDR5j6szc5//oRQArV3eWz82sXN0BoHKZjYb9/MtbAN71wNzzL289fv/09Vc27rl/5rVXdgDcc/+M6wvf/ANPDG7CMa7tyRzrfuaXvvTcKzuP3B/mjXrulZ0Kz6XAXHh4FsDFF7YvPDzbvlH71EcXwDFgWfg+vKDWsiWG4RiGZ+AG8IKAY5hcZ0M0u7acF80uAFvOVyYNTM70m0a2rPSbBgAlK7m2Pzi8NfDyd+0gP60CcMzhavzZz+0AuLGrfdP9E597Ze+b7p/Q/+L1UycjQWnqVrn80U8Ld4ZP7ry5Ixpp9obsZFxB5Q/LssCqXrhtZzgGwA7UgAvVSkogAtCyZYuvAOAOrLpMIGlBG4AE0YI9qAQpBgIDJ7xboLA9dZLlAwC2nwJgeekHJ6+fPHEWQGm60tptlKYrxSv1zi+EDIHFhCT1W7753dvbzdnZ8vZ28+GHzrzw4vU/eaM9vXwHgB0nA2BGUNs2V7Vsh2UB2AwDwGaZk/5M0c+22f5heaPTz8oGgN12abrY6puKrerTstDUQ3VQQ/OmJn2XtViBBxD4PsOyxSA4dUD+GggqYAD03D4Ay7ckVhqU03YOsgdAgODAESAY/+mL/dcj2aBO/eTHcqcmAeQPzMTdrqr+ztDYzc+l3C2d0/sBLwBgXAdAwAvfdJd48jHx7BR7reYDuF7z0xzzpdd2AMiNPgCzkpUb/RvnS61qZCti1s80yx8EUG6Gv9LasBjd1Ws+gNQUq9f8nbJ08XQYdeQ8p1/2Upf9FPPf/iK5Ec2xqoIIk6/MvKDMRDi49esLa9fJaF8bOTJoV744L0YD0geNHJoRx5szC+pPfg95Fvk//OIc0fKJB7dnShEq9tSzuaeejdyqlHcfvpfkIl9nSO66/4bTfyOiRc3cl83eFxHutl4xtl8lb/WZn14nWv5i6866HuF6On2v24+oHTOie7pIymhPb5Ojd6qqn6pG+FbV4TUnohHudsVul5ROPnCOTFu415T3mhHNYKlolIuR1+ECPuWQ8uyDTTKKnBukDp1qBkjXG+la5Bdt0e9USE3rxGOPES2P/XaruhkZ9kpZqpYiZz2Y+Tw7R4Z82vjFzxAtT19Tv3otwq8VERSjwWTO37v4T3/u+4kL3/1+Mj/OqSPC66c/tvSpj0XyqD3/ovr8CyR7+F/9OJnvLTdfElKRr5MrV3PlCI1+8S9fevGvyKBg/+Nnybx07+mR2dcmvu/7qt/3faMtbDcQXzui4C6R5oE9oWuxkUY/VQ7SEe8mzrC4I3HtuSOncDRjxo0egvnuMy9/1+nI62xerv32T/85ceEP/lNyMvxc+l8SLbm+nutH5r/DKC5IBffVHklkbNUnrOJ81uEzkVee4diZI4En31RJ0nffnctES+8//0XvV/9ytGX63ac/9rs/QXT7rp95mmh58Dny5h/97vQ3fnfkEMAzXzKe/RLZ7bnvJU2ePa3kRBP+nH315tlXb422bJelP3qUDFF3m9JKvpxYkrYzC1Se/DvtxBS7VnKnT6eU+HFQ6Q58ldLxVrt8PjGdICWYd+Rhp41GYuNAo1F610NUJwkJyv5W0OXiNRJ+nspIQ1D28bc64hk8Fi7F8cbNy2SoxbGY88gQhkfBBlR27AT9nVQtfjrsPksVFaefiyetj76fiqbxPEU+mfKYA7FjnkAPjvX06fUNqpUzU0zMq1KiCAqWo1Oz1oz4ZHUZulANLS3+F02L6nScpif2BQm2/W0HRXAPAJvN497kksLMSTKT+NsNyU8sICLPx6ub58+TabXHYosjk33fNlg+fpPrJ+dnTAka4r65ShfExY1nGmaaYxbOmCdIMfE/2W3GEzUADl0eqXcgesfu/UYDy3wnhmehBKGTGY88VRye+XL8dL98idShjUWCjj6OTsFh3dqhuVXeS2zh0HDuCYKSuNOAAxXtYyk4mSydK1pfT8xYmu0da8yiJDl3SrWMQJcBlgbHrJah5Nxp1DKZI+moxoJGLSPJVEoSzfi6jfyTHOd+/t7F2D6gU8tQIkG1TIJIkHNPUC3ziPOFt/w4IXyqLeCdiPkTb+865XOrf2oVTkudoSLJLC9vlIdK/Xxgd9oLeS1CemwY5YnJ5l69PBLpiV8I6W+72S+WswAsi5uyIhNCKHtCNGFTX3effoPcY1aiX6zIsDtq8OfPDX0KT83pWsrTUp6W9tIap6U9AJKAliU0LbEs2U0rXGn33zVcv7s9djrnv3iHcr3tdgUZQN4xu4L8YNmYzvWnc/2Dbtn75/qXvlS0FRlAptVWS0UALzYipGcmpT67N8cxjOmN8AU8kxvJCmvZniRyu26YgSXL9vt+6F80VTQBqCYPICO7rMCsGxENVUFwTTbckPnAdRneYrlUOrIzOTazYacBLIjaoAJAmXRHNTOMja6jaEbkQxRg7vQzAEqy3TLDsZpEZDNW4fz4hfc9Wds89JYBkIbyxL3vGe32l9cu3rNE2oenZu95prf7aG76sOx/v1zdjBDlreu1Ny5tnJgqrdZCt+4TZ2b2XovYEufuWrr7I4/Wb21OngydAeq3Nv/Rj5EBY194Ze8D7ztz+OerX7l6/uGTTE6OZMV0vL/9dz80etUrL608+t55BRG27kyBDXzg8JtyAUYcP8I2BKwSjmfgugzPB667djnMz1eopjv7GoB7H68y3PDmgRc8/I0PfeWzlwyLB6BI7qDy6Qsht3u1pp6bygCYP5JIyClO2K/32UkJgF+3AIAJbEVwlMiQBlL4c6LH2ZwHwCSSiQdmeWe91HlztI1xvJwQfoJa6SSAqdYtuZTbyc7N9IeZb1fl/XI7EhLAP7XoelmWtX1fBMCytsfJfooduXMQCIzL8QBY3/cPXBI39FODUQ8OiEHL9fYdqyTwLccFMKik+fDnJD5luTqAohQ+p+GlFU4D4CupgB1yflov4LMMU4q8tWE6ow67Cssbvnt6OnV6OtJtl+Vh4GV1f0ZM7dg6gBTPKqWQxLGG5ivpfEVUNxvqVjMzV1a3Qs8CrjJcOL7uAMhOe2nGTRcCrcOkCwGA3vb+y78XDub0+fLu5SaAvcnQPy2tGlpGAZDZizhf2GnxjtatjlBe1IbRsE/Nd08vRAjypLN9pxkxsV7kp/nc6ucQxbXJn1K50XdOV9pwe5GYwuKEUJrkS9HACOWzh1eFtEzaYaXdyKpIlUylEIn4rOru02+SxH3Vizz6KrDxXEUyhkqJvwB6Fa23pALAgZVY9UVjvwjgBobP/71zoyHCPQC/8cXia0PxPw/gsXLv43eMJPabBYDW7Pnwtgf+yy/tk8Jdz+EcP6IqyQlBThltEdpOpWeHVPuQq7l3+fqgcrg33tDyRpTJano4DGsx2FVYzlWixF1JBwOafkjZAWR4C9Fg36qtGFHivgkwHQHArZGxmsxGiPsk8PGpxf/hQkjK/weMx3/92IebG3tE48zcPHAfgMPyn7B/iShN3trcePHijS+NtDz/lYsplQgI/tR33/zXo2n97gYm0ZOiEbqdVMlOD93Mf+i/g2U5jX3Ss+LvRIk7/i68AqkA8S5vepdHcnk72KjbNinXM+wwpuugIq29cjiFQu+X/MT+hSdG1gjH3Hxzb2c/nAyHD/fN95GReYJTxSATkQNW16prr/fx+nDl+wXo9xPXwXcPnyocH8YpDijvwYOjtHPr9JtfHb0qK3czSkhl7z1snJ9DNGbR3d4zVjSF9FL9tHPizNDo7sH19EAZLudAAQCP5QF4I6s82Ar8qAdZm3cbvLVyEJZnUFkQyS84WQyJe+FgkJlMlc2M0KtZ6LxkCFF5pdFp94e7YBs2gH/y8TsQxY9+5forjT6AVw7m4AmOWSpFFE1NHzd/9xniQm55yFhwZQAIMv6JCR0ADtie7s7+yy+RSrm9b3vfwYuF/83WN4g+j5gbZT8yS5cfEE4+EOH673x1R32VuPDiGLVMtnntaCMBe49K1cD341Vdqp6YWKXRnCkCLq1TKXZpQFD2txvB0TzCtw0nsQOxCcKRqDQbEkXuBcs6bqNrpnCsJ3WD+CNTAMBSLAqbQt8CwE3OLDmbobKI0IBJTvdLg3zlnah8E6fGGEHHEIt+mXTgfVuRSVEpnkbZ9r8Oaf+4x11gE0zLkphu1KPxcBWoFuoDU+SBg9vGiVQ8NXLExAYhQWzUqPYJmnjup+6kOqEKNX73Ykimdjz85BYFn1yEjG013iIi0wWQoEKKave6vxLvuZenMOYDmDDjbbPdKpXrVJPCL1aaHnPGeAwVUEvxxJ1LvxNjy6STo4+UoOHcJTp5QqHwamA8qunO0eWGpsHLta3YPrZOZQVd1eOpkWBTSYQWRb5jiU4IoElAQYl+5514MJ0GIoWPIyVmT5HWl7cbAY1LHh1xf6URf+amS+eGuyfH/2KvOk1zqzPObSZVH0OjM614tQwluOSC69JAo/P6uneRihglBSs51ilJtYx+3NvzByrx0UYpOXcT8d0o1TIBHy97LUxRfUEazp0WmcS2nGNWy2zfJFXG/9cEDeee29+N7UMJa3dMXE9e/PCSd6vj3epwJwverY744aUSx5SC6xvMmfd6n91gz2wwZxaC6+5s2t5OA+ByDgAuxXZUrtN3lqZDPWNHdaW6re9ZqQlJ37Mqd2f1um1pQUNzKmmhoTl3TKSu7OnzNq9uc3fe6b35JletBhMT/t4eSWWaNg9gtmzNlMwXb+Rny9Z2U9pPO5LBA+iz/qzLAZifsVFy36zxAO6ccs9Pu/ub0v4W9hhMBACgARqD33yD++67vN98I2Sx75rw17QM4Mgpkum+tJsCcGlX+f77m5d2Uzzn85zveiyAQeVV15k+SAS663vTLJdivV2HBVDhGAANL8gJeL3fm5CGan3NCHpOUFGyDaPfMPoVJVtRsq9te/fMcvV+MJllALy27TmCn8sAwFa9l0tLPc06N5+r9cIpMpWTaz2TDRjeCNfqREYEsKfaOh8AaHTNs/N5AM2e1WW8/b41lRMATGWFWt/xHF+RZACtvl7KpgC0TM11hiNfd9R7UlP3++orbOZ+Xx1UBv9r5T/92+Uf+K9X/tO/Ld77cPvSC8V7H9ZXLg7+1+y3/F0A3asX+41u/tyFQWP36sXe1YvSJz4BoHznBQDNNy8CeOmVv8KJRQCoFHBlHXcsApg+NQNg6tTMq3/+0uDPVC403Xev3cqfDfeDxnprf71dXSzur7fveOzkH//OxVOL8rsfCZnE//3fPP3gBy48+IGi58B3AiHFeA4kSfylf/fHgw7nHwqPzDz37E0A9z9w6v4HT7/y0o1XXr75Q//w/ddfCtNH3Hhp55t+5CEA4FiMELLNugU2AMAwo81gAgZAwARMNH5yoZLpHPKAhxcwDIIg8H2T82SPcw8Ct7msD+DKbsh+3jGd+oNXmucC9ux91drqiNjEhGY9phTuNEwBgsY6aZOzedbhnbQpaLLGsrzguQ4HYFA572qB79zptt7kw+BRp9126d6IpC/xXL9R4CwTgCcNCf1+d+hhUs0rgeODYzzD5RTeM0JTGQMfQACWgR+ABcD7DACXDWSXc1mf91kmYBgwAHzGZwM2QDA9dWsTs17WFXYUAH7W8Xhn4JeaPXAZkCAIHBzTVIp517B4RTLaXVOUAci2CWBQTx3o3Pf2DQDplMDkeQDn/vLFxsiRsXrX9DqmtFiw1jvSYsHtmp//ta8u3zO/fM/CymvhhnSyzq4ZbCvjl9SwbAiBmQ7OaUxDRMXG1XTw4ZRUSIsdzV6sZtf3+4X0wcb/2ibumQ8rwJlz2e6WmZ8rAuhutRfedWL9D3faeoTmqNXpE1MegNUad2LKW57yVmpc75Yg9QUrO+RTb/qlU3yrbcsAiqLZtmXAu3JTBnD1pvzNH+1cuSkD8q5bBnDZLZ3nQ5sz8ys7v4oo3v0XXyFaVgr3rRQujLa0u9zF10lN0DRPBjnZckkTk+SIshPhSnQh2Dxyqv5/+pZNouUXPju7uhO52w+/v/PD749I+r/zUul3XiYdD1yHvFWBmxKiMSu6bq/rRjzWxUAoegXiwmtLpGLBVcmIjym/p3hkJJk2GTsEUwHJKwW+j2i6HxO+EVWwcAEju+Se5Hsk7+kJXBBVj7Kez0W7OSz6MsmIvTT160TLxSdf6+xFRyYrSlFHDtNw69tkzntlYZZo2Zpa2JqOyOxuac4tRnT6js91LXJklp76EtHyW3/+8mYt8i1+9Mc/8aM/8YnRlkvPXfvJ7/5fyWcAKbRNSfkpOWKM+sgTMx99f0RefuZS96uXOsSFP/VTjxAtclHmhMjXyVeKhUqB6PbDP/RbRMtnnyF9jRaVnSU54llR/PD3Fj/8vUQ3bJKvs8WSLP//4r5xrx89jfEeF++NyLh7N40v/n/IZfLM06QEf+9sqaBEPv2p958+9f4zoy2169t//q//kLjwUz/1fqLl19lPrjORQU6zWoaJzKLAywUuqUrelsk16Oy7zn5kbj/4xtWH3iDjxf/qqfNES6lOBhIQ3v0RouXyxuab6+TIPP/DDxItn/rMa0TLN33b3R//1ntGW578hRef/EwkQtc3/tiZj/74meh1+JWfIBd4p0A+QN8QVIP80JtzHaJljGzeK1GdPqWBRGES0YXEFMTf8WBiUbBFCq0uAIajUgS9A3Fkj3jb0csmNq8SRIaPd52an0pM33L1KknHx6JAk/Deolo4JGX/OgJDpT719PhxqCtUHjVBl85ITYEzd9CleqeAbN5mnqy/ebtoyvmbf4ajsCmc7SjhJkf/Pbog7AGFVwOFqhkACtV4imwaVM6svUz8rSi9jxam6NwAKZDhKU5d1hKzN167RkXcE8Ql9lj31Nr17fhOx45JI7FwDvdPZ+M70eHm86SqYyxMmSpP1lGM5dwLsZd1ulT+i1ZyCROWpxM8KB4PSs49USTmRMzSOdXQYO3NxOxjOTUxFnKjlhiHVTPjn4qScyd0MmNx9iydK2RyeI15JwpMC6CKsUMDX4tfOJSc+zsTvBsvXJoSXSamuZuJnS/IsfE8XYruNM3K7nHncqNB4CW3ByTnROxTONVQqmUKE/GkIcF01ZSHwmg494vPUTl9EQr3saDk3D0nXuw4d46KuHdcCvZQouKcvs87VveVqTOkrWUsNpBYdEyhGj9nako6tg8AJh8/rx6YSoxzp4TLx6unLErivnWKKuISDRLk3BOEcCQDwFFQqmVYMbmwlxR+ZhxdVmvm2AOc0iDXj+eRKdUyX724EtvnwiOJncWj5Nw9O/7hKXXuhDV1PLpU2jAatczezcSmMSVxp+HcGZZK505YU8diyiAN/mPhb96K7fNyLbHkqDfo1DI0nLtkjUuz5zxOEoK/tbKFaBTvu0s37i5HLLalCfPvfKgevQ7/PvjXREu2XC8UI1HiWlx2i4sY3HnbnWuTX/FnfpXc2DNGbxYR3dPnXhb/6Hok+YhoBpJJ2lQzR1S9dWOvZUVo90SOP1GJSAa2qjkqmU/gjE4Gozj5T76BaFm//9vXHvg2onE2Tw59TyaPSwQ2jyjtTsl+QSEVLO0e+b3EAnlzSeMZL7oNcD64yDeVfZSPyEzcD5KJipa/mW/3IvcXZUFQSHllYY086+SXyadarpHEyPrF37N+8X8ebelNz1/+xHcR3fY+ch/R8mNZcj3f++iyI0Z+Ua2UP/5vfoLoFqikcs9PM34mMg4T/YbZjRwbsTWX4chX9hXyGb76K39JtFz49kfv+7ZHR1vumJgq/wNyrH7zv3mIaPnKA//P/VLEEUtZ9cQbEQpliagfybT6F+nfIZuWC0hHH76u4jcj0++UGOQ/SUrGokMO+8XXm3/0lQg9+tbF3skPRVRkOzdqpkwKOoFFjt6f33yYaDGCbAsRplhQdUEjY0wunyVphTgfsFJkUTTPPvJr3/Hp0ZYzafz8Mslofu8//g2i5R+LrXP7Ec+2n7Vyz0jzoy1faiFfezN6HbanSXqlPPNG5Y03Ii21rncmMsjOYvk7vkoO+2mZXCb5j5GU9i5n6rQdEQH/KtBvBOQeMIaz/kBx/mjj7UE8QsKOwhWphHqPixe+PIqw/QAMioT3nEh1bqX9QTKR21HwHJ2+hYIrNyhPX/EUnDudWp6g7OPvZB7roTAAzHy8UD9bpUoCQ1D28bc6slGNRZZCWXTr5cSUJCbl2Tg1sRg7F9+I5zRnTlPFcz+hJHZ+hwZn0onpPB8tUsn0992mh8sYmNfih3SZGaMfHkPcV41442xrLzHnMN6mki65I97cY/rQJdxS+MSURcUv/OfYPq5HpySh8IShsNgNfpJiKtONgSLFbwKsTBWIjRXjn6o3Q8VYBJvxQv32PlX6xiMZdsfdqnncIcMr7fgT5zIl0Z6kUjfTYGriWM2SXnKBhj67R7UGz1FoBZ9pUz3VL27Fz3Z7kTyUMxZ8OX4mr4xLSXibZK40QWViEhWqBUYDW4hnVCg5d0InMxYc5Tn45Xtj+1By7gwbP/8oLHZ/A0iQc8/tkOc1xoKGc3+YLiVQ8E6M8YdGkTwcdxQSJXHXjvUoxs4NqmQdCYLQyYzFxyeo1uAfUCS0+4fLVPaJ+3PxT5X+8nWaW9GAmnOncA6jhNpOLOeW6CQmXSoUGh6bjCo+HsaJeOJOybkHfvz8UxKLVUwLw4rf/hPk3ClBw7m/QJcS6OuXc+9SqZ1AKtzHwaLb4aYmjtVjzadjsHwrfl5dp3CXBHAHBef+v68kJr44lJx7JTnOnUbnXpqk4twzxfgFlqDOnRI0ahlKnbuyeuktP87/D3DpiAxpTR0LOp17MRf/k5Scu6tSZDfcpeLckY93R6PUudOAVuc+Ga9zP/kAVdDEKyfPxfahVctQcO6UQkBtL/6sCaXOnQaCSsUj03DulDr3KxSeuJQ6dxoI61TeMm4jfiaP5dyZ3/qH/zL7yc3+Hw8Jemf5PQDevLh554X5Ny9ufucPPfrFP3vli3/6ysUD3d2UjSkHJ68+W10QJ+alvU0LwP6G7fzIj8r6qqSvWamlQelf27C+FLobc+cXvMsb0g99qtz02kuTxbV6e2ly+cnX+jPlq98VxmoIeJ9xWcZjg16eF1gAruMD4AXWczSl5ADweoE4y9nbniEJ/XaETGdZZFjyK14U1SlfqrHWlC+pjKsy3oOCwh1opbuWn5fYrXogyYw+skFKOa50tuj3+mwu6/f6/Oysu7195vd/FUBu52Zv5tSgW+21F//EOXPBXwVQY4pTQXv6wn2P3jt/Imh+kT3zAf/6KlPeFCobl9YzGzcAZNZvAFAXT6/+1N8DIAqcqtmZtAig0TI5MfBshhPDyeq3PKbnO3oAQEgxAMAw+ZOi44QdBIEB0FsxsGoDQJFD28MJ0Z1K+wXZkzgAnOV5Ejf/pafnv/i0Ojujzc6kt3cAdE4t7bznPXZKAiDqFgA7JX3D//FLS/eEM2HttU0AP/bZ/0mQea1tiQonyLxjujcvvblx/crm9cvzZ85vXr+8cePNhdN3fvtP/GMAgWkzshiYNoCfffozg/vM5iYAbPf2fvBz1j2/flG7bxgbMv3JC73HT+SeCL0ytn76l7NPXsk+dfX5B+6f2xka3H75X35YMhyXHy68V373r15/+g0Ad9x/orHb3t/tfM+PfeJv/dgnAQxiVzHwb15c+8+/+ZKx0wCgbzdSsxUAb65uT52YAVBb3amv7gCoQFg6N585O61e2wWQOTv94InsgydyzMjB4oAX/vKX3uhwvByE+5PJsPf8o08RM61xadeoLgMwqicBKPu3zqV7PEoAZpe47TVvZ9VlONdSbwEw6nulu+8y9vYAtFmuIjiNg5hLDVfMn8guXn6tsLbRWVroLi3m19bZ6vQdezem9LVaagnAlL62a00bfshpLtZurk+dAvDdv/cIAOybALBnYkLGjRZ0H/1BUC4FO60/uGr84TXz03fm/vDN3qCcU9jvvCttN53MaUW9YYhlAcB+3nN6Ympe0zfTQs4W8s6NP+ip285qkwVQUAIAfzHrik8Unt7aB/DfvuuOf/X8lXtc8afbEoC+o2eFkMn9X86Y7yqW/83qdQA/fuLMv1m9/vwn37WtLm6pC3OZjdnM+ra6KKc6N3uRTF0aJut7J8WTitd2uKLgtR0Aayo3Gah1JgMgA1uFKKlckXH0/y937x0nyVWdDT8VO6eZ7unJcXdmNucg7UqrCAgkEBkEBpNtbMA2trH9Gtu8trExxtkmmCxyxkgoIaFdrVa7q81hcs4z3dM5Vvz+6J7pqTuN636r0lh+n9/8SnevbldXV90695znnHtO2g7A6Snk0vaa9HNSNqM2tXCzZV2hIAmL1zLbt9cD2La9/vq1BQDTU6NMTtKd4urR4/SdmFA/0Kl/fozZH8C5OPa28H15OSs4XHJ5YcsKjkNc8vy8AmBfAw/g/LxSW+/3Zwz6dZ2XSYLZ3ej82rnIO/eHLs3l5mOFBgYAOElRRR6ABDX+yh3cWDk6Vu2s48aWPnxHLYDZtLfJkyo1UmlDvJ8j4pqXLksXFwLttfGJZQCdt20eSI+Fupdm3A3NmfnSsSU9z3tePQ2gdCwhyE7buZu372kBUDouCLi0xiuzIEIpprqAyJQUmaos+r5oOcGTLTdROmb6K1Ffat8UAO/Pnw+EmwITiwBKR8/css5XdDqd13ReE4Sy1OZXGu42tWRncB4AEJu4fBSa0YoqAL5qtv8CW1w9AojrateKYu4TWQBpmz6zYFj/cxk1uN/LerwASke+qck7N1L6v6uN/5S7AVxiy0kHFxh/J9QOfRnAHdoQgA59uUNa/sUzT609uXtyWFzxjbpXssp5fFxRVVYlOwANKEl2rDScYQ4rMr2C8ZVHEFdL/5R3VcKkSiK+/F2zc+7ZMqER2dGTDZX1X9lVMVNKMr1y6wYnW3Z1uQJl4kWw8wvTA88+/H0AU8PlgDB9xQpg7OLqcRWzqfLcdV4YA+C6uCaU+NW7VyU7gOY/f5eOb+vHBw6dv7D2DLa8DIBXKg9bcJanY/+FSrgqAx0As1Lzbejy5PLzlexRudkogMUVmb6KWkZIDy6kB8smZnpwIfOKbnR4db6iCsUWsgD8asWIsUOL6yRHkd52VF7TmQ91jS7FPZEYgLnxymcddXXEcXghHTWyvW1Pn/TnEgD8E1P+iSkADUvD9a4igPrcRGlMvTajLVTIw7aF0pw8DAAhe+V4IY/FlWHpiur9k+up1eNMXpOWZQCZ4TyAUltoVQWfDMDZUv54ZlYG0FFbeVsDTv2xmXLs4N+e6QdwhZfcfACAm6/QF6cTs6cTZS21JOK/ODjz3p5KPaYm9+Sy5gu4DDF/MlMnehwAuICwekRGK0l2ABmIADiJyUnl+I6SiM9l7Pa5fn62MpM5SQAC164tACgdAfh5CQCTqxwvzkoA9/kxBsC5OAD0L+WdNtkpVe6bWMifz5ffypKIB9CxSFIUJzIcgMtzWQBfOxcB0MDqq943TlIA8Bxsv6iES/JjSwCa7+cBNHvL3HizNzk1pU1NGvLrOSMuCShJdgBjTw8HarMtDfMt6XkApSOq0jI8a07LRD1UkT7KgrkdN3ukk+ZUctqc2LWvU9urIsCYG1+OIFU4UItobqPF5xM0pyqq5gSITMFsABAGzOOdOh4lUy1WRcsu8yTslBjdSjFnju0wHwNcePyM6ZhXvfNWmlPlKfipmnrLAk4YzjL6Xi/QnWrR3HvU7KAKrGg4aM65/1qBig3bGzSfDC5YlofHQfGeAkjL5pKBMtSumWJeKXShFsmEdYnDeMZcuAfTVLls7LvMPQaeaao8IZzN3NdR0CzbmZmPUjkVpiXLorVsnDmxK7ipZpbca1mQ7fRl8z17lOjqo5gzx6/SnGrvyw6Zjhm+RJVFw0ERMBZboPKu00BXqej7ktr+34MN0LnXd5mnJ5zJUykN6Vnz5eQyb1ngQxZUqRp4yfwJ5une0waX+cUX6Cj3FGiS91HJq2TS/HXOLlfRP6rcF0U3d2pRau40SLdYluGPEjXsxmruc1Srl6LTuTgpwEbNPZyJTe1WfV3LZnLj7o2DTnOfHzUv/rd5N1UWDRrN3UJYqbnn6QJjF8xjLSg1d0+T+cq0iyKaEMDeWnMBUqdTrfSKaP4EKTX3+az5xfOWBXbQau6tbeaz3VVbRf+4wTj3LXPm3wegcNncHdz0rHmeEGsxqpqvz45aqqgvCzV3VaPIfb9It+GLQrj7RyZoTjV92fzpnPr5j2hOtanfMs19ftQ8r+zDXztBc6qcbi4i4/MbrbnTgHFYJmYoNXcaPEixKR3AhWXzybDEUK30NIhnqIg1j2A+GRS6JZVGc3dIljENVTV3vv/7lXookevu0LZMfXiiJnSyuHjNFt5e6t+uSe/faryOrU03MaT469xiH49rHQF2PF6eK+Ef3Dz6nEG+p6flzIzB+ZDpEuqPnwLgnpzOtJUZIclu8JsDmOA62TW+wXSBqVMRZiuLkxvMkq5nVHK56lKDGWMZyZAuzudEO19ZzB2OYmdLMeDV46nyI2H7r7Q9cS27qRwV4xoZyW7alGuqk71lN5GQysle536vJug6gOZabmZZBSDk81dOl1MLLcyk65s9iykmVxeSXS4hW5ERtf/8H8XNXbbhMulR3NwVu+tOSdO5NeVNBAdjr+VEByet6GiCi83GFCmviWtULWm3nU1UPqX52e0DJ7VM2Wavmxtbauz0xad9QcNNSHR17LlUyYXSsDgyH940vBgDMLSw3F1f5tO6Mzld13RdZxgGgK7r8bgyMWXQg1o2b9F4FoDOsoymlY7iRPnt5eIFNWAH8NV7tMm61NO78wBuu+QA4K297nvOQNGeODje+c5EqX18d/HYJdvx3cXX5xKiXJAEe+kIoBgmmUPd6bPJhhDQW95850TaGdpSfoKR/hEA6ZOnQ631kakFAKXG4POjUV7wKGqa5zyKCmAODJIGX5Gr1p4xbg4uskzAFgDA8Q4AqpIH0MvE73Y+MVRo7F5J+6U48oI3NZ2pa3EvTWfqADCcwnHpVntkqhAqHQHcVYgMyQb71bOv3mOMXHY5WKfdMLcFn0/qrSSxEjc3SsNz+XOkOvWdC+n2onGx59nbt1ZkQXtIlNPKNx6vJGW6Mlvc2WR7b4dPEjRRZiVBAyDK7I+uqK6VwOUsy7o0rU3CGxOGc+9UxMl4zilyOUl1iuWb9ptLdaNeA88594TUUGuYkEuTC1x32TerTse5lkCzNBCQyQpHmde/f7XtDooAilfyWqp8KpsbxQxsjU22vTahp0UeLJMQjrRmWzDIK09bsFCoCCLv/QdTPznbPK81hGxL40kAdR0+AInFvLASblC7qWl5ZFYrIJcGgFB7M4DIxAyA5A5FGFkGIG+qLTUOx4MpMKGmFgCR2WkAgTqlpk727GlIXyw7PJ2tDa5nDMLQ39RwvmFm3u2cczsbM7mGTA5AYtiWdJTvXlZQXTKHbbX2fQZp7qrN1tRUdvY5gl356Cjz7vAXiNvXteVM19aza3sag5sbQwZpa3cL9T0knZIfIfNFKJ0+pdNgf13+yciVn4ys7ZGSjJQkV7lEK5lK6fFXvIro6dSYTuPGn2WGia5LnFvLkJsOcuvS3YZ9mbDPkOyNP3FOeOYcMSx9jAxS9qqyYLTrly5GI5cMuYfYJi/TTBqh9n6yxNfAb3wg3WXwLfsUxrsukcDs7LqkdB2kMvjW75Dl5ViRYUVy2VsX7oEvPkL2fOZHf7Hz5m1re772j9/66j+RGZd+NPgQ0fPlj/4+0XNix/SJHYbMKi1zLa3zhls60zA600iy/F8++Vai57PPJUeXDbLvA6/f84E37F7bM5xzDOfJX3jmxz8len7+k+cW5gzJ5h7YE37bXkOw9kJSfvQaubNv15//MdHzJ87PET1KOq+myWBtQU8QPalR0v4rtHRqTsPbq4guxWacyYLA+Envpe/0eaLn9HT6zIxhzuze5N6zifzgvvf0ET3P/S0527/zXGbaeNs3CeomwWBNZiRkJHLSygXyB9plF2sMKmccKTgNN7k+k61fF+r++AnyB8p9MS1luCqhwSE2Gh69Nierc6TVK20hp4d9aIFNG9Z1zhNgvQZBpyQZdZ28Ohcg80t3zbe6C4bn5dxWcGwjA0yyPyc6MH14gujRvkGO8W2t8W4zXJVfZ3brpByoQssQkr0qCpmNLi/XNWK+VfelmPEd0GZvsJDKehSLVLTMUh1FhhY6Qo6Q7FXx5t9+gOZUk+ENrfdWI1Ddq4Ymc5//QpJqtk9qluUoJyR7dchUVzWb2tC0bqJ1lLSFYDyWXRZL58bO2M13YKlUe5jA7DQfUxUvbr51Qm2vCm5dmeYXGzQBk1qbZS+qhbDZLGNsKfHg33/fdMz1s1RE+WSdZcKdUNurIiZT3av5WfM3rN5H5VmZUs3nDAOqJYfNWcby0+D8oHVlCaxDhm6XuIUg1PbqY+jkVX3CPDvQi40qwn20j0y1vB7+BssifykRrzHXsDa0EB81GO9GZ4SpW6LYxL+hQSIAcOtV85333sxGh04RnExVUAr3WwSSx3txIVBdVRPF9Ds3aNlaQlGwxGIwXvM1QE9TXZbcsNHTjwY66XeogkS1nLIvrubOrqvCsR5qwbJ8Uv+r4R01jyinpGWoQPfkf+3332g6ZttBy6IaKPGybvPkTZS0zJ4DpOv+hkFDy+igsieoaBk60NAy+3uovq6FLscODViKLC5uiSpkXk+ZD7OQlqHEgt889RtHlTcMjPlGhergxdvSANQJmzohltoBbTbh2ZX07ALgS18GkPbfzrK5Ra4lrE7XazOXhZvsC5d3uxfC3vrF1ELYW39l+lLYV3/+QhpAR4M4Pi91NIgA7APxyEKudktguT8OoHZLQIsX23YFE4s5AP6wE0BkMN90S2h+KAmgobucRSGnybNLHICmOhXA7BIXbJi+qhR2sHMArmqNAHw8hvQmALUrQRI+RU2CBSBAAZBjbIKuyoIkC5IgiwBkQXLm3S2XvhMP7wDgW7iWrN8OwDM0qDp5pqVJn54tHfWpOUlQAYgyt9oAEBtccNS688uZmp762ODCUlEBEAy5opHslm0hAIsFhQXkJh+XLqiecrB8Ma0k02UmwecRkmn5YFAo5FS7kwNQyKmFnGYTuVWTXVfA8CgmNbi4YlYHIGU10cWmYxpYCAG7HC9wDh6Amlf28IndfPySEgCwm49/pdCx+Lr7by8OAYgMR0KbQ30P99W0BUoO1Zq2QGwyDkDnAZataa4FEJtZLjXwyFBXsx3A6Ezh7pt8Y9PF2OhgbHQQQE1XT01Xz8jj/+VMj968zT69VBGdqdG+1GhfqVHqaX7ZG+SVNP2yJDvdjlwmz9wS/HPfy59Onr/Nt690/PLco++66eWlYZemRwB0bjn4kMoAuNVf8ajrT40DYDg7AHB2qIXHh3Ksq6Bl7Xw4qSz6+HBSceQlFbKqu0RGUiFysOsSgCs/fCK8pQvAYv8ogNR8ZYO7tymcml2MnO8PcHyAYwCMSXqnyLhsnMRxRZ61KVqRZwEsJmWPwgGQWE3U2FKD45MAdM0OgGELAB7p8xzhZADX+iQA27eKmblk+4qj3uFx5NN5AD973g5gb496YZArHd+4zSCeGAgzLfXuXDEZrAHgi8aSwZoaWQoospTXAIgONhNX3W5tbGRpcbi8jTO8uQ5AbZ1HWUrzdR4h7JEX00LYM398kgP4ogxAsQl8URYZF2s3iGmfnwPgdcDr0GdiTHONnsozMsMDYNdsv2AKSvlYUFA6g6pAYBzNDgDOZmduJhfwu9QMs7yUA1Bb5wSwvJTLy5yv3g5g6mKidY8fwNJZuXaleGF8TgbQuK2mYKzExEYWMZJj630A2AafNl+m9ZZXCt0tL2YAtOVZAPlYZUIKDZg7HvO0OdKT+cZjNQDSk3k5l09mcwBaQqFULgtAn9MKszl7k7N0BFBYznGSnihqAPw2NlHUQmohu5wA4PHb04mCx2/XisxSJAXA53Ins5nWUDiZyzaywbQtB8BTdKZtOU/R2ZHX7N5ruWKd07YUTW0Peq/NjdRLNnJrJG/jYM+jsOIutOeFuAuAEHfJgWzpqItQwzofdiiL+dIRANt/VttykO0v+0eLee9QbdkyHppOdLf4h6YTzIc/9wni+1T+1Tpj0EGcDAhNyaFcbkx9lPjg5f/aTvRsEnJuYxbN0K7a0C5DcTC31+XykoqDOp4geiAnYAwO+zr2fU3ft7bHp2rbc6S+thgiCYiuZx/pfNYQF5IX6gpiiBjGrqvuPTRHxvDO5ciTe0N+37qi0lcukMv4e/aSj/l7b/gz8gKWdXaZtLYcB8jdVY/6/ovoSXQ2yC6Dd1kpFpWCUcHROazTIsceWRd5litmsoZL1RhFg+FBMBrYImn2nlgg3duvestrX/WW1+L/P77+x+8her6UTo8rhmt468Fjbzl4bG3PyPXpf/+zbxMfXF4kw/YbPSQ3um1f8/a9hkKg2ag0fpLcidb0j6RNc/mvH0n0GyLb7tqeuHNHYm3PwKz9Uz8lNbGH/4gMMxs/cuf4kTvW9vQkl7oThu3404PL3/vb54gPfvxhMrLoM6//MtGz41jLjtsMFNngUPLT/0g6Tj75PjIz5U9/PDU7a2DnD91bf/A+w8/h8oxzjlSTI+tS6Tp0EBUkkwUukTdMSCdXcHKk2fF+lqx9Vj9vt+cN36jquraujHCujYxrCERIwipdxyk2A5FgL/K2ovHkmppVSAL4aDNZkrCzkYwo6+vz9F03BCl5Grjue8jXpNBPZhEv7pO1gOHnOK+dcV0zZOC4stxwOdZg/Fw145zRzZNl2+UNTXX7koVIsccsk6aKaghFJl7o1ayAkOzVQVH4CQAh2V8INm83T2NLiQ7eMn6gqJo/HVeQyrNHSPaXCFq2mefgHRyi8nUTkv2FQKCYfTnVsnJvlBAKFEXqKfa3A8gXb5RMWQdCsldF2FmlbHcV4U6o7S82JIq6SABQLRv9jSEwNWI+CBsdw0MDnmL/NwDnIlXCA6vA0KXI6N5uWZaCcYrE9pu2WVYKeOMRb+kwHTM9QBVJ17ydVOhuGE1N5q4ObsNdaITaXhXquqrTVSHbLbv4grGA9QsBG7/Bq7pBh2pBMK9ABCCjWRe6Vy0bPYEk9+L6h28Mbg9VVEMk1P4iX4gRFPW4AbhdG609/e+Ff4tldSoSrebCnRIz18xLUfd0k0Tiiw2ZYvYJ63jRqig4LAvQITiZqlA1qq+zi5altKTBYq5K+ZoXV3N30z2elySoZJ9tY1cTLUUVwJijKAlkIXS6/EdD1/pf7Ct5kZCNWpbpkBL+qXHTMS29VMEW09ctI4toaBkpsNExtgW7+TdyBSotky9uqL3uqadieLj4DUoZNruyrmfny38lzl2PDOqRwVIDgG3mbOmv1G72l70TtaGO2lBHqbGcMmxZyhVtuaINQHhnxWfVsrdiLIsr6fwnx3KTY7lS48QvopNjudFp+fFnK16L0WmZtTHsyrrKexgA/pX8c96c6M2JAHwq+ZhljRMlBwBRcqw2AtPD4xE1ntMBxFeqYSTycum42lh/s5IrRbqTglhqJwXR5bQDKB0BsJLhGlhJX5z/la+EyPKlPwDb+o7/qmE7Oyrn3MJHiePCbKL0V7nOhVTpuNoAICXK7HmpIa2kT8nHUjFjkkVfc03pD4DO6MKalD6ldl1Xxd9Y19UU2twEwO3zrB5X4VoTazHy9NXVRulv7Jnra/9K/3fqeP/U8f5So3xkGKzLKhHKuFaPALIrk2XVRzByfRrA7oNNpb9Su3xVNb7VY7yoAogZw7MFVS/9rfaspoFYkw+ClBe63Uf8s7QLR1wpICOKzFK+vO7WBAwUh87bdd6+2g5Ml4W7K1XxeUw8H13bmDgbHWcrt7fULiyRPv+b3rxHB1b/ALDr0gOUOHeR/ZVCRJUZABpXOf4qMCt3sn88u9pQFVVVVADyCgfr0HBpKHtpqDzmKw8tXVzZSKUXFb2oAJA1HkB+JSAyL0l5SUJxxQ4uCuV2USg6igCSAYPLVLYrxHF1MmpqpS3xeumv8hNklVkpRZ9lldyKNzhndAu7wjEAoV0jANKFhXRhYS5xudQGEEn6lhJV7CF7nbd09G9vBpCfK9/M1YaeGl49lhrqynqZHYyvHhNSOQCk1Fjl3H32BQCtgYs++wI/9jD5qJp8f+cQDbfJlZ4Jpgz7Yjre+pptb/lL4oOf+8TJ5ZTh3b7lw8G3ftigX6QH9fQQuTz++z+cMvz7SSzHRQCPn6zI94/8pr6pi1lVqDkv4x+z3TRs8FooohLzk5O7NkaGwTzZLwNAtPJiFO2popF+yCmYWVdUV9jRTvQcdfJB3nADlYwiLxgWhkwmO8mQJHiDk/S3PHVqJvfkD9b2vOd9m9/zO4YobGlyXJokdbpHvkHGObi2NPMeg+zwOGxup8ErqGpKQa640WKj0wBu/R0yAmT8+cGJc4bEDx2H97ffRIZFDX2XDNtY/s5KEouVu3jyE9984sMPrh3DC5ywroKuv0gSQY0fK/mHK/f51rGOdMLwcxq9oevjhucVbm58uu9DxKne+nZy+f/udy9cjRu28NS0FY4ZsyTqBS3Lkx9s0sj0LNF4CO2GycaHmYYGw9NpbXW97C6SclEC625C3rfjlCGw59I3rl188Nranuda5VOtpGX8jifJ3RKDJ0YV455stsh4pg09tTm+YV2V81SU1Cu37a47P2KI+1IVSVgwuPIGR4t//+8R4+fw1x9rX20rRRnAB/5jvm/U4Ku//4Dcss7QH3Gs8ytopMRM1c2mV/b6p2pSABhVZDTD9GA12IxLmg4s+EiCRcwVuUx5FpVGp23cgs1wH2qCsQNHDQla6nYPD36xrLPOxVfizdRjMOajD7Q7299izBe0UDP7U7LqQMj2r0TP4qlNCwlDzZyJUB0bMsSG2cTxo11fMn7u4g0q/Ne/TSZgqorthyyrFD48SpH8XqIyvsQOKocBDQjJXhUhkYq2bnWYx6VwPj/NqQjJ/kKQmDP32i1fo6pq7ah9KXpEtlH4G4N+Kq9JoMO8bHeLjy65v9OcWGtJWrYx5xU3NZkPAuZi5rTM0IhlqWxUytz3RfPYMG7D92ODSVh1pkzhBlnW//n37fhJKtGwucsyOkzotEy4RxXzqwrZqIT7VN58mJpM0JxKWZeJcD00jSr6iEa4O9fF9VdFftn8DbMrlnngF2NUc/s6hb9xcIJqd36g3Vy4N/sTNKficxsa7zQyQ5XbrrHGXGm47+VUZXwItb0qVJYuQM5mnnaELs8QOIpSboGgeb6KjYdNrOKkqfICEJxMVWx7K1n6/YbR3mpZUaeCiyooWx4zD9J30s2G6DqWfz0iRctCxcU2y4IoWJZKG/U30m2RtggKS6VieQLmHs6dm6n0PhrNvdZPFecen6gSa0zguUnLnmBzkmr1WoyaXxUlCE6mKn72GNU6sbXLXJURZcvSvNJNK6gUOQXj0RrTMQCg+6mGUSBTMP/GolRlXlWZH7Fs8/pOApS0DA0mpqhmAw0tY89S6cg0mvu6ja7V0WszN40pNXcarCfcq8JCWoYGjjrLlmdeoxJYc+PmVZgXl6lOtbRkmeyjASUtQ4MZH5XE2rHFfPUamaa6CQ0Umnv3Jqo0eTTCXRJempkA6cBMWHUmt/0GbYVqmrtgLm3rtvfc2Petx7GjVJtN7nmZucWkiHRbFSzU3CloGQs1d0rOnYaWoQQNLZNfolqeaWiZAk/1BBs7zGu5hWs3mmeloWUowWfN3+fpdf7Aqrjab847bWqx7MopOXcaWobT6GpYvjQ5d719I7+tKi3DfPubn+7o5saH1NXjPymv4yaG1fZKnEbbhSdbWA4AMzcKQG/sCtfw+pWflf7v4ni5vE5CKBvLqaTf60sAOLpvf6knMhILbaqJjMRuecOOGrcTgFzkBJsK4Ps/in3/R+RUvqOnrEpMRgS/UwWwb5+rLtwOwObIF/MOAMom/+n49n37K2F8c3PeR8d7bHahWJABlBq5j90PgG3ZqU1fYVt2Akhdu6QqOgB9JQCj6R3vbnjHewEkrl/3b9sGYPYv/0/q8pUWm9pqU6eKHIBWm/pQxFj4qTa0RUR8tlJdyOHx5j3eUjUoNwQAGchBQbzJXzGsZrL5mWze5dK3ebnrqcr76ZGcLo0BUKuXX48TzW799vZnRqIAbtkUBPDMSPTRNx8GUK9HMnBlGGe9Hpnb0whAHignjRB6G91nY9pcQrSX9SypkHvw8WsX+8YujC7v7aotHbd2Bu+9o5u47cX5WLgjtPJYIwB0uNxevljkigXOZleLBU4TZFWQADR0hObHy3ER+25tUFd2lHMyowr60586zXFsqe6eqmoAGrrC3XdXtMil65lYJC2Eyp/KTisAGvYGem/bHDmXBBDa74ucS4b2+8587ToYDTqLUg1CnX36qiOS5Ow6U2D00nHX3Yd3veyQKksAOEFUZYkTxLHH/3Oov5wprHtLeKh/8amf9Xtq3enl8trgqXXX222elTiipUwRwNHdDY3dtQA8GtIsAOQS8vD5uMgwAFYf2OYPvx4Ak68QRB+6+RSAK4O2nT3FK4M2AC09aiaWlmvKDjEhFpf8NfPDWJzQd97GLk7o4XYGADNT4fRlpwNArYMrZbaQVJuk2UW2oKcLz3xl5sDrmgHM9acat3jn+lO768ri75lICsDJSCpzEd2t9odPJsoX2WrvbWYVuQiAF2ylRg6C32+fWsq31jmmlvIA1JxcmIkDkERGXCnsebDHsCfL5uZ+cj6lcwoARuV1scBI9t2bcXAvX0xVluTJJfW9M2WFwJfnfDl2qlY+86pGdsXHo7ECgKXrtkLKoEBdbViKBnNzQvnRNMru0+m0LWMbkgvdgn31GPnIe0sDLiz694YTAL6USQNoXwnOfnq85Q11sadHO99QvwCgL+MGsNWd+Ufe6QuWT56MugHU+MqUY3xZBBColdJR3V+bA5CM2nzBIgDbtO0jC+Uzn9NcAM5nkkyeTNByz87K3qDHZtlNXp1tHR4LjCwvdtWGR5cXuwAIE3ps/CYAfGRRCYX5yKL9YMZ+oHxJ2akaV2sMwPLDIdvm3QDU5QWuth5Ajet0cq4IINSaARCZcrdstyczHfVN6YVZD4D6pnRQcB31tw3NTQ7PTb1q/y2lc/Id3RyAtcfDbP/pdsNOcb2xCwsT5QYAYHFsSh+fwq9ASbIDmByYc65E70ZGYgAiI/Ga3U4AJckOYNsWx39TD6ItVJ4TdltZ6BTX1E5bK9kBRBmfzS4AKB1LjZISq01fWT3yAkNsX1+V8iXJDqDp43+9493lhIWttuoqUu1yJKYV18r7fDqVA+D1Acigujuo2eVodjmuZpfXSnYADrZQu64A7DMrLGep8fZtZcZsgQkRDaG3Mr2UGtEeq1jQJSl/YXR57fHC6PJ64Y4Vmb4KTXemVgImiwUDB7Uq2d0BUV2TK2S1raqauuYnhrcZ1MO6bW4hosdWWBFXCw8gFUkDCO0ve2hXG9DZyhGIJDkABUZfPZbArWxE4AQxNjO5KtkBrLZXJXupvXlz5arq3DYA0kpMfXrlaTj9grjSuXoL1or1Es5Mtxxqmd7ZUwRQOsbBr0p2AHJNgIEebmdLMr10xIpArwqRK4pcEUARKEl2AI1bvAA2tzuRK8+xW0Le0vFNPx0ZnqooxcNThZDXVruS0p0XbAAgayWZXjoC4Jzl90X81SWbixkVALNS4JuRVjKepgzvUsJZUZKTDjXpUAGwa7z3K+0q7M2qZC+1fQ5ciRcADMmV4ypKkh3A7V59nK3QSrd1TCPrKkl2AFvdGQAX3brPXTl5WcrLZeEeWKnm6qstrgwoN/azFSN4P5sFsN/Lf+G/pYte3qQBKCnSteHR1WNqopOPlCdhqVE4614V7iXJDqAk2QGUJHsJJbG+pm2vb0oDKB0BBAUngO7Gtu7GttWRVWiZ05plOUBocL3fMmYt7KbKf1TIm3Mpjp+uK1z4IiMkm3PE37g+YzoGAB8z9ze+4U4qYo1hNrSeojdExQ9sazX/gTXNbaZjAKRz5qeKjFNFy9BA18xdRwAkinQ9WYdlkUVqjuopb6o150l2uem4FOuwVrL/KuzJWJY05nyWSsiMBcwTWNkPmrOLlBjIkXsLUFW4H2YptoknzYk8AKtq+3+DbevK1N4wFjOWpciQe2+0cOGNIiKY84KrmvsLxw+eJEv6VgXzK+yPtXDTBZP4Gs29oJS4PmX+jbEZcnvIDSPUQVXLosm7oUViBdkyInlVc//vMUJR3fDyhldXpsG8dQX7GgWq+IjO+CbTMYWzlr0RvU5yqyaqCvcZvco4Em37zMcAy4UNDdel1NztDvNlXBigqG1lKWg09yt0rksaUGrumm4eIJFJUKVeSa7Lhr8enjqq6R6i8yXSwOM0f+9ziY0WWG5xQzegSMtUpslLU3Pv0MytvQbrkgM1iFThQDSaO99EdVmSYh6OTKu5U2Hy/A1+cB22Wqe5U4KGltl4zd1CKDXmAqtvzDxmGQDLmAfeUGruNEivS41yw4jNUmnuNLRMNr7RicMy0oaGd4i1VKZJLG9+VQ/OW0Zh+SiUMNDRMhZq7vMSVThQIE8XDk8Bt33IdExQqPIE+b/5qsFL1j/B7H7VVNjdD8C1PJmtbXMtTzbZOjZnbQBqJB5ATFRQezN3i2HGp6aS97Z1t7U0TE7Pt7U0nDh1AcCrPjDX21TsaWYBDM5oPc1sw5L8Rx0oFFEswmaD3Yb58XQHS77SBcYGgIWqrXiweCHjsmUBKJqdZwsAnpneV+DrL05u3dPWB+Di5FYA/uAwAIFX5JW9juLr/lWKDMvRYSG4GYAcHQ507S7Ks54duz0798x98yuNb3tX/PKVQp4BUMzBVlJVW3f5pgfgW0l5VsjB7tz6kY+FDldKrkROX9SOPxc9fb6kc5dolTrOoYuelCYDaBYcKVUZSCeOZ+YBTKTSANq9HgBbHEKBg10FgJKf8heJjMhwkwUFwGRBvtXvCNUHvuRqAbCfd3y2sAxgp91b/9TjmY4uMRGX/AH3+CiAzM/Z4i2bbM+MqG01SmsNPxXTtrUojhxKUQ2cwqj8n/zGkfqw49Z9bQBOnJ8E8LtvPdw/MJdXZAAOXsgrco3D+fZ/u9Tb7ByYKQv0gdl8b1Oxpyk0MLswOLv4moO7AIwMzh/d3O6vcyaWcgD8dc6xq7HXfmTv6owCoEPQGF035sSXM4XurQ0A0isuuIXZ3NCAthzVa4Pl1/hQq7uxrjIhHTYmX9SX80FJtbnFNABJFQHUcHGV4+yMWtA5O6M6WFXIxqKROACbTSwWJQC9B7Z11icBdPWGRwfKjqzYRDKRzLe31SQSZU/PhclEJpqL5KTQigp/X7O3my9fQ6mgT8Mmz0Cy6MzrsgAAggwAH3n5yOmRmuaaPICZmKO5Jn/pawNLHY6F8TyA+g4HAFVnFY3NjuZcXU4A2dGcpsLV7lh8YjF8dzgzlgXg7nRFr6c9be65E4uNt4bTkxlPm3vz/WFJA8foHKMDUHXmM2cj2ThzZiYD4FCzG0BOk35nuyfBKgD8Gp9gFb/G3/QnCzPPuL1tK3FrkyLDqPPTvNvDS0VNtLFSURuS8hMoS6jJYq7N5gwXuDq7HYBicwDgi3kA/y6cHWVjXVoNgE1aDQBeDooCOE5VVY7jVADPj/LDURZAcy0DYGZZd8Dxiaby07xeVLbZeADPXDRIupBf6kspujN6zjkPoFF2A8gt2SdmXIlZAYC/SQbg0bUmh+RVWK/MpgQtxWvNef67n58G8N0vTG/f5712PrV9n9e/l7/93SGJV0Wl/NTOz6a+eKbMOt5U7wegFqIf7nUDWHQ4F51lY3TM3QKA13SFZUrHTaOxaI3NmVcBOPNKzsFfj7Ifz9uOcTqA4yoD4BjHX3fannDaANydK99GJacPrfiiu0XmZ1m9+ZRW59UBLA8CQG0P/GK6fuu8uy4NILPkcdelZzL2k8+7gjUagGiMjcaZYEDXZ57xe4MAEqmo3xscn+4P77+7roZd1NsBLGntdezEA+oTe5WhGFsxWTYr6mseyQA4Uu99diFVOjLdr/oXGPGyzeNhj0Ha7mA272QMFJIUSiVv6yM+eEAnd0l98j+G/uazhmXnN97S8ZtvbSeGfeVPybLxrs2keX7rQTEUMKzkz+Cek7hnbY9oywZDZOKk9E9IrsbWWyf2GnL6FCSuKBkWOXvf6cb/+ybig/Y5MmTzZMNu8uShensdmdf7c9evET0f6Wglev55mlzhfnNf72/sN1YvYtK6QHLlmQJpfmltfq3NkI+iORBoChh6spnixChpyu1//89AYh4wRCls89Vt8xmynjV3177xD44QH3vq0w8RPW/52L6WXsM1/PhbCz/59iIx7JsnbiZ6PvNHpJGxPBGVcgYFKrB/r//A3rU94VrtZTeTGvdH7yeSK+FLT08QPa850vaao+1re0Zns6NzpEL6rS+QsUYP/Uvf/IiBN3OocCqGSavruq6QnFJOJk3JO36rq67LoIv91c8jf/Vzw/N6YJ/rgf3ka3IhT+7hUJ6s1ZcMiuvxYvyEZJjJDYzjPo5ML/PTtl8SPb2xhi1xQz7wVJFPGl+cPU367kZSwb8QIWf7QPh6ym54MecGvXNDJMFy8wHy5RUfJoXMHe8O3f5uQ63E/zg3+9lzhtCDQ0HbN4+SqvSjzXcQPd5E1pswPOjvpGzfSRlIdpuQsfHkZDgskOyl86Gz3LzhJu+6w7nrDgNLMZOx/2BkXW289TtLOu9Cx51rOx7wPfGA7/G1Pc/FHG98nnyCVWgZQrK/EBCSvSouPmVe1Y8SUpHKuiQke1UUth6mOZXvpv2mY84tWZe2X6cKJiEk+4uNmSGqkkCEZK+KLbupNruKLirekwZ72v2mY7qaqOYVFdbV9qyKpRHzd/DqHJUngJDsLwSEZK+KBTqvkKdo/qBbGql2/3XsMXcLHaIrlGgrmN9SRbXsfs5k6Bhpo2SnRxXhvpi2zId7y35zV0ADRS49AJGYOd8n2qj4PjVqPszed5rmVDTYX0dVcOuwz3zS6BzVQsgkzKNLXW5K+WgeG9DcTZV/5tRPyOLU69F/iUo2SFnLUg9enEiYjomlLePcmV+dM30ttr/cvALnjsaNdl1G7Zalapj1UaULpEE7hXA/Y125FZ6jOpXSYM65H66nizeJm784z8WrrBPVhHvGMj3l6AHz135+nGrGbO0yz+Jis1lmc1DCd7NlmvtM0TwChFGpimTpfnONYGmRMvDGstwJN9/fadWpCE6mKihzy9Bo7pRo2GyujeqUmvuouf7xrfNUqgxTZy6M5nXL9prU0+UZ8hbMo5aTaapA/l9+mSo0gAZFu/l6Sam58/MkhbseMxm6rFMB8xfnpkCVJ1iNlnGbTxo5RCUaaGgZWs09bv5WFIuW2RyUSJ4ivQU3jJmCuXC3UHOvC1Nm+zKff5S0zPSAuZ5CScu4Q+bDNj63zIVHzLeYMetKSr0U0MBYFrFGScsQhHtV+DyWFek8E6Uy9QjCvSpUzbrIG0rcqObO20OGJVTJFkPtPZ0NBoHLJydtLWUNTpnhAdR4g9/57GDPfhHA4LmyatD5Jh2Aw4F8vnz8rXs3P/N85c1fzOTrPC7NmGw23OlL5MglOqBq4ppsuhLHanDIjNGk0NGU+mqpmbbt9hQv2WyZ+izJgbD1pGUQL3AtY+VrXgyEw/HFEV/rlKtR5jlBUWWeAxDoe05zVTxmjLysC7W8seiM5hb1nNb8zveu9qQuXfDedJBx8MXRKVtX2Yn0gJS5dXb8H37y7O/df+S5gembelsAdK9b0v4pnZ8pqKeuRQDcvD0EYHuLW2quvCtcxAZGZANrdiEkcvA7i/3kCqrk3dqanZCCg3+yb6S9MXTi/ESp59Z97Y+cGHz51rJvyuUSslkZwBtf1XV9KLatu2JR2nQZyF8ZngWwc3MTgKDDFXS6ACxOJACE2/0dW4Jabp2Cz4rQVTAVe0vOiHLa8GK4nDaPn/P4OQDphOrxc047r+ahFgwSUF4pr8iq0DgAkPJFXobGgtXKRzCMuhIbzqi6zjFiNjLyTKSmNRCbqiwqi+uWvYLMbmkq/95IOh/yOGrcTtnoBZUVtjaHnADnGkr2m0+Trvvljpr5kVQdoy3p7HZOuabyO21oEDSf06BCffoaSey+odkhS7IgCqUjADjYvGAQbXub+Q/t9QA4M18EcKjBxkh8XcoJIGuTXUUha5MB7Eq3LYoJAGHJX774HkEOiwAiS/lQnQNA33T+fS0Vk/r8dL7Vi77UaCgdiHjWLMCLAQBwFwAgYwdQWPCWlq9mvz6TYACEAozPwRaLOgCbjQEwLRQfWWcHcFoKwLKWqmW9y1oKQGEguCQZomxrWDtXw7XUlCfMdEz1CrZ2PzoCtvF4sSNQZhFTbxSWrqt128rDlq6rdVvZ/1oYXnsqnVVe2WwQdodqxTOx9JmY4f29NTOcCda6o8ulIwA9KABgJEEX5VJjvz8fMGrzPAunLX0uZ3s+b/vN2vLrOSMh5DD86rYDrdy8Qde0NbBxY36Rgsa0Cdk2ITspu9qE8rrS0UUu/zF+KDZj2FjKDo4+n5EANPZycwMqgFuwcGJdZQLeFjQoQbYgOrdv6aw3iJ4a74jNU57KpXt86dnETz+bBQwL3TvuIpXr1JS+K7yGewqjzuNSWYMonxzMJ7OkcK9Zl7gju8B7ag0kTzj1MzlliNipUTpqFNKZ7m8nV2P3ANkT3+S4UtcFVDJezB95g/uZtUJzMwB+gaR9dnzmnw0/553vkZREUUng7kr0iFfO/l6i6fdee3TtB0c6yB8YLMrBoqEzLibiQmL1nyWR4MoYAgMAFPpJ94AUD+tiRYzmUXzq5Oh3f/nwas9ffxGvPdgbrS0vhKtm7Zvu7QIMBb2cmsDrxjqCvK6WCk4eK+/v51no60hwhhMAw2uhyoKcMfQ4nWL3CmFaChrYv6umME9ObmldTXpe1rV1ESaKsDJMYABMp/jCWVLlWUyScqezzm8XVq8qAKDW7ZQVw0toLzK1ORAM47fWCXdpOaArQqkU7HWFB9Djw3Yj9Xoqqn4/Sl75r211OOAAUDoCWJrL85sMv3pvk7Bd8wD4EMrvJivxQimDUElkpQFAzzsAQ2hKLqwpraVv9Jd6evYzqi1BXMPLnn4MxiAv20APAKyJZppJsCuNlS4H21JvuFeKwD6qk3sjAtKqbJot/Sc251ieNXDl7dvt9x5YI5G7UOPGwU1eALeveafHbhXwxjWz6I3CZ5ZOw1gDfBdb86omw31/eHHpzwdJdvRNyWEMDgMoH4GEr0GusesoTxId+QOF4stthrfSJmg2QUetYZ14lCENlq1dtb5mQ77bMac8phrWdQ+X+zXvBAA4KnFQt9eTL/jw9KMjM4ZriA7h7LpipfvjZBxdFVpmUz0VT0KD5vaN3qC0weB0yyxHp2pOy5RqfFuC/lkqppKQ7C82zl0mIyP/H8OpCNWE8bSZk78aXYJrxWbOZ15OUGUMz7jMd7TVylSO+hwFn17jppp7zaK5j7DbZZkfUaHLDmQhYilzyVDIV6FGX9xdzs3tllWNqN9m7kuM8VS1LFSHOQGX7qXKnqYy5nO0XrJszx4ldNGy6hlFxnxiKRufLHtjIdHlV2Bt5sN+fwuV7KMR7ixdxWBbyvwd3+U3D+0AIErmVzXopEvunzK/+FiGyvm8wVBVy4S7WzJfLAHEkjeYbKPKg3/k0qzpxy6dpMriMjNhfvVtPVSGQiZiWRgWjXD3DFBkT6PDAoVmQQmVowpt5tLmT5ASHMXyz1unITSGqe6VFrcsFLIomUtkVzuVo15Nm1/8qahlWXEsxIMTJMV0wwjSae4Or2UmLw02u6nmlRAzjw2zUST4A+ArmvtdMyKV7rupxfxUvEAXCkmD3Uep8i/OTJiHbUwOWia1HZqfZhiXt2wLDA0WqqV9uDFQ1oPXbJYRazQQLBTu9VRilA2YP8Gil+Qu/xdh7oS5VsTQae4qBS2z07+hu95Ap7lTooXi/Xp4kSocWa4xjw2zkJbJrNvXesPg+SqnIm9xMJ64Z/fB0//x2OEPvnzm+VEAM8+P7LwDasPYUt/Y9jfctdQ3BmCpLxF26YtZBkDYVZ49V68ndmzzl46lfwZ7legAn7eXLTVHwVuwpb54teRzwnt33HphcfJKZhFgkit0XtKV82WdUxzTqgonxWzrSlTp8GJ0q6xvb/R+59zM9kbv9kbvo8PFDLyH61IzWVuzq3h6ybvTVt9kw3xEnosojSG+ISSc78s79dGbOrqmE/GZeKzU6HJ4uLxNaAIAsRHSHAC45aTE2gBInN0tJ4ubOyAqkHgYmU2VLZfsWvV7O5nZnN4U5M7mtCYAYYwldFetHltmaqJMTVCP1csVWiZrywNwFR3SYlYMu9JXlmxhV3Ex69lZlypKbr+opVXWwwFgPRxb5JrDd85MPun1d3h9nTOTT3Iq7/LXAcgmllYbAGzhMAAxXCctLgGQVCQWFgD46+sBFDKZQiZzz7EdwxOLAEYml+45toOX1ZVpwSqKZrcLiqICWI2WuT4Uuz60/Ouv3ArgyvD0zs0t3/j5qZ2bW1ycp3mr9/hDU23d5QVeYLFju1+VVE1SWZHjRE6VVJ11MVoWgM6s2PKMCKAUh8LrKDAQWQ0oHwEIrDa3kNmjkxuyGZTLZq02WL9NSRVLT6GU6UXjwOosAFZjFU7hVZ7LLmhgdICDDkADw0J3iCyAvKSVGk6By/FcRDLoayXvnQ4OAAMNQHY8ozE8qyvaWhZuTWUJyG4IGUYMjCVyfsGekAsA/II9IelgfGBs0FJgbNCLAHi7E4BSyDn8IbmQVQo5ESIACZIIUYQgQa5tcwJQ9QoNIhcgFxgAgl2WC4JglyWN+c/TC/ub3Qea3c/PZM7NZPY3u21s4UosBeBtXc3fHJ15W1ezYtMX2UW37gaQYTJu3c0XWFVnAagFTvTLaoHdbg++373lvBQFcF6K7BNDAPgm76XZ1O4m76VZkmlp7SkHq/g7VCzy84GUu2ADkLEX3QWbHmOYGl2PMQD0OMsENLdkz4gFUeUlTnFLdgAOr/Kqo/4f/DyzdbO4dbMIIDKlAfAEmXRUb+hhMstoC+MLA8r7e/nzUe18VHt/L/+FAeVgWE5LskcUvGL55sgTHFYiyKQJHgDaAeAal9yu+q5xyTrNvsQUauJCbUwc7srWxAUAsYDcP5UH0DeZe/0ttT98Znlrm7PnbjdULHNK7UpNEk7XAAlAqRp5kkUN4AK+8MsogPffHgRwfjznUQrFWrtU6xCX87blAoDpsGcoU+xYSaraqTtOXZvSV1LwN+wIzV+NzF+N7LqTFd0OAFImX2qs4sSCcms9/1eXivaofKDBNycLjYKcS6aCrS1Lfnlxi+weqGgAtuvRTb9zd+z0GICaw52lBvPJPR8intzocDxjTMrc1jXRuslAZ6tSIBcjs/5+pY80lusPLttrDJ2zYmLGZqB07FmvPUvaATYf2SMt+LSCgfITfBDW2Q87LpHbpvi9J4ief/ubXyd6nl7acnzJQLLbJoaCD/4TMcz5D/9J9DRzjzsZg6vek2c9ecN+qyKnR1ykxh3/NunCOvyuzlpj3nDRtcPmMianVFJQyDftysWHiZ6L3zuRmDbkIWHrWtg6g+/eDtRrpE533189ARKkwd7ENjaypP/jk3+7l+g5ezxB9Oy4rW7HbYaNl1N9c5N9ZOT+K994K9Hz798ha35l+iNKykCnBA4drjl009oe2/x1cf468cHHnyETIn1zgXQs39PTeU+PYdtIcjqfmiIN9itHHyR6RiaCo+MGc+GjbS0fbTfc9udTeHcfqfo90k4+CM/bfEKrYbZnr0m5dTGUr/j8BaKnqZ3kS9/+arGt0WBbebJud4a0kFrz5CZBeV057NH6ZNZumMnPDcVODRm2LxSj7OIJUmXc0UoSCwcOePYfNFyDYNd5h+E+XJlRP/ZD8rb//pvIk//oL0l6wHu74rnDcJ3cFOv8BnkN16fIjRff/Ns7D+00TNELTPICm1jbMzGR+/JXJogPfvkm8qrON7jONxhuYOxHM/EfG3ZCvPF9vW98nzF5FLD3U+Rs73vPUaLnuCt/3GX41Q1u372bdhDDbtCiViTL7LiCi8oDQ0j2qnBlqGy94bEF80HWochTkXSxCQq/K0tFKBGS/YWBbhOdRagJUdF9hGSvPsZDUZYACIvm88rupdroH4tbZmXT4MocFZ9JSPaqsNNRDW6Kd5ASjU3mPPLOZvNN6ZRQW6newTNXzdmbcZr3FJizLq/92XlzB2eDu8qLc4PC3eY23zRFCV6iElis3bKCCcPj5sLdNjlsOoYS3iLVHK1pty47FQWoswqYB1/eeRdJpFQFoba/EPBec9GguqlS+tDA5qNSGmoCVMEPVuHKPFWyjck5c7lm2/DwPhrhfmWGyvm85Zj50+GmqATdoR3mc6bDuvf0+nmqcOSDDeYaz3ymygJQ5Te73RRvDp3mTnAyVcHTuddp4M5QrZabO8iUvOuROvaqF3w5//9QS1PLjU5zb79pq+kYP22YmWVZR5boVB4a8J4NdYknp6lugoWaO8HJvBBMUQj3JJ1xuei3bDLMzZrbXldmqYR7dML84rlJOuG+01z/aLcuvLvvApVw//cLJFGzHrTCPZOhsHnphHshtqEvYcZNpd1Tae4T5llxABCE+4sOzbIQwAKtrmau4o+NbXS+NiVtfh9s6wj3GwYlLRNPmL/2z9OlXsk/Y24EvH0flcHUah0tYyFohLuFUNvoaJkr5nvoJijCuwE0WldL9gCF5m4lLcOLVMkqaTR3RbBMYFGCRnO3kJZJ2agUkOFfWpb2feI50m24HtS0jPkqPj5mWTBrbZ2fZtgGa+6FlGUv6gfpKpzzFJuYKDl3CxFOmHN0hQjVOkFDy7z90Ian6LIOhDe1KtZ7U60F89F/Jx3uXhfvcxtoLHc+6lpXgHXm6kmip6uVtNqObtsU9BrYhmQCySTJCLQEyKd4KkZ+XQsrtHJGck0rEprs1QxzbZ0SuU8m5di0g5S2XinlkQw6FRf28nXkYpjpJ4Ne+KCftRmjGhQppxi0Eq9d8DhIWvDJU6SpdfTe7UTPyPD46IghSMnrsff2rosUZEii8PKFpSvnDTrIPS9rvedushrOAkuqM+lR8l5lNTGnGwWNpDOS4QkyNpWrIx/9pU+dIXpu/81ddV3+tT3Hz2ePG/PWtofEd95Oxjt97aekJhEfXpAzhkvl99zJ7zEU1nHkx1rmPk988CNfIVeFJ99E6tJSNF2MGuQmx+mOdXMmJZHTmO/ayrgNe4PnHHOzDkM4UJsitCqk4O4Pk7ErD8cX540BmvsV/wHZbxiki5xq7AF0H6ki5OLeXNxwVR4GxD4I3ZXX3eSjf83zZInnv9las8Po7eB5O8+TLvdcjPyBPjup7c7y+pxguIGPDWYfGySJuw/sJXdsfP7H5GTYf5j0adXMIbAuQ1HwD8lTaTOky93dV+CM20Ebd3JNOw0asE101NWQr9ITNaQh0jadIHrUjKplSAHyh5kJoifyMfJUNzXabmow3OSOvRMdew2SYXDo6ODQLcQHq2judorK68Us1Q7VaNLcYPfaqfyNpGSvhjq6lX6DjdAURXkXAEm6CvRWQWKoSHfZugQVS6NUc4YGhGR/Ibi8ZFlIBiHZqyK5bkG98a/TLePlGZnKY3yVIkhJlaleLkKy/y9CUaJyPCR85mFmT0gJmlM1e8yfTrC2Ci9/g69usRp/X+UrfeYbDosK1WNOahuawUSlq2VBqO1VYbNue34qbZlQo4QA89vO0PFO21/WZjqmnXJ9tg676izLB6Atmudzp0SDaM476SyVNiAXLKOwHmi2rF6ChyJDS4Obat1NpzZ0nbCJVMn77EXzzeRd3IsbZ1xF7hQkitipavz9iwofXX0yGhRYy2aDmrYs+o1Gc/d6LJsNlP4skpOpBp0u1pMGE0tU1yW4ze+Ds0CVL8VKzd1lrrkn2JdibhlQZASzFmluQyWy0Er1lBUvRbk3uoQwNBhVLdPVosskU4QbpmVsro0W7husuVOCodgCU3xJZk38X+yrokPO3mU+CLgcsSzJiZ41t/b8GpWUmZcoogx0qmco02RSEqmYQxpaRlOoaBkazb2Rgo6wFjxFct0iXSrHgs2yi59JmxsBPd2kBxQvdspfwptaFZSyj0ZzD4tU6oBPMT8V66IyZnXJ/K2gpGV8tZZtjiC8qVVhYSQaJS2zNJowHUNJy4gUFkzeYVnJ1pcomA2NJqQEy1umks9RCDUAacpiwBsLGlrmbtFPcyoazn25mubOt1wp50txJMaW2+50JsdPxu63iWxstlDTZI/NFgDUSSd3blcz8+WC5em5mfpu1+G3bTKcfYELeZqlQlG027KJFACX31t378/+4IGeU1ejz15d/oMHej79rcE7bqk5PxV555HeS9PRy1PRXa1BTtc9nL6vsw7A+bGlc2NLAC7947uTUgGAT7QDSEqFB6d/8uDcz34t9IrL2ZEruZFfC71iYDl1j+vWaEEN2rnScUliGlS7W9Y9ip7mGQAeRX9QKwfeTMpFAG2CbVlLnVy+Wurc5KoF0FJgGxc8QS8PIJpSgl5eRzyckK5fmAk1eCPzqdIx7PJ43E4A6Ux59a7fVBPRFmu8dQBiqaUab522Y3vb7rvLN7ehQZmf9+VitqGrADiOUVWd4xgAbCEPloWmgWUB6Cy3PBUTRFs+JQs2FoBc1HYevfn2d71lfrzsKpkfm7pwYuD9b3rivjd2Dl2PAxjsi/dsDfzeH9wOzrB+eF2QFQAQeAgcZBVLU1PHfz7Vtrl1crh8tsmk+J+/KCf4P31uAcDv/ubuz3zqA7lEwun3R8cnnAG/0+/f7n37r7/pMIBL18uEsqgy0wPlkIytvY19A3MAPvel1zv8fD6hOPzlicg4RQAMz+iKXmowGZlJFsqXyrNQNEZlGaYsphnGruuF8YjtsMaoOsMxOstA06HqDMcIAKtDAcCABaAz9rwYZjWJ1WUAGiNw7JRd+xGv9wMQ9D4AMrN1KLe4rGSX5Wyt4CodP31309obtSssp65Fxn+6CMAe9DiC7vjAfPO9e4MHep2bKvlznJsa09cGhJYwAHm6vHC28ExqfNHbEfZ11CfHFwB89tGxB3/0XG/IOxBJ9Ya8AHpD3gN7Gz2yIU4jk6+wN4rsBXBc/iGAaD4NIOjwRPPp0bM9hbEggPyow9GVz4868kfiozdP1XG2Os5+TUoC2M8G93B5DooKXmE4m15UwX/iJ2x3kz40ywzOMj1NOoDfum9aLthsdYtK1s27MgAG59gfTMauKpEdfOiqEgFwT2Pj6xwtdpUrcCqAAq/aFe5P6hsGCoXX+v0/TiR67XYA1wpJRtSWZGVJUep4fklRwllfZFkDcHm2rOXsahLffsANQCkwLK+XFPmFOtZe1BM+xr7CQ1yfLf7T8RSAl3c7Rpbl0WVls6iJFxKamwfAZhTNzcMuXC2yPh87PSV7fVwqqXp93Gs+IEbnNADBRrbUiD9jS5wzrCjpDPqhLytM7cpK0+ITti+xAEolPjUZrIBrv4wsjVe40LoOVzxbEF5u0LHe6WnewjVE9EiICQHI6jk76zzfV/Y49vVlQiExFBI7GR5AYEovHeOtzHM/n1neIc9cTc5cSwFo3u6NNwfc97QBCK1QcyFW+8vvNgIYDxXH66SOJXG8Tvqrf2sH8K8/nwPwoVc2nh1O5zffvHo9qYlZAB03HahZKZElFSfcvtvqWzHHD/jb4v62eGIy4G+LA+AdiUoigdrJJwFkI+XqEiXJXsL8+edgRE29RvwzMQrRbgPg8lem76e/Nbi2cepq1ObD154dKHVenooCcGlKSaavRUmsV9qCAODByKOlngcjjx5gb44WVACrR7fKNRTLV+VZ8dOWZPoqJuXiQqFStWQkuwzAmQ00AtFUebGNphRAj16YARCZT60esUaslzAaGVKEYixVvvhYaqlzx3a+oRKtyDc0SDHOyZWNUI5bY42WiCZNA8Bomq8umE/JAOSVn1BqNHSU1+SGjtYLp2IAfvb9yiMb7IsDgGp4FuE6R2y54tMXVpiAVckOoM0nlWT6Kv7xs5c+8yk4/X4AwY72Uuexo1u++j1DGT8PeDfKZyxJ9hLyCWX1CIDhGaIRWcjWtXvKl6pqABilItwBlNqqzpSOauWFZQEwazKYaqwNKGisqK3QS4Le79AM/sx8+spQvhzRuCxnS8ddYdLS0lccMIVouhBNA2D93uAr9q8doylKSbIDWG347KxvZcNEqfHgiUcBDERSq8fekLcxb0iypmm8LJMmWjSfJtrxJyrVM/KjFQ/eklpcUivzmYNSOpYqgnFQBmeZwdnyDS81RpPZzS0JACXJDuCqEr2qLAMoSfbyz+fVAq+u/ecWu3OL3Q7gtX5/qTPDZa/lyzJhSVEAPDGZv9JH2G3M2w8AAG/XsaLIF+wo2JlSo4RHh8rz87GVxthkgQPYlWBBNqPkNCaVVFNJFcDqMQQEV7ZllRpxQIobyK4ctBwDAMsrHNHyCWX7PgGAtvL8NRlXf2mIt16cyIkf9LMBw6s071EBlCQ7ABfjhIy+vkoQYCQiRSLS643BpoEpfWgpPvztStTmzLWUq7m8ZSSyQs1FNK5EHXZEbB0RW6mBQwDwoVeWp83BzZ7hekfaXp4PnvYmACKzKK7kBRHt7aVG+61lsVCS7HixaZkdPeYuJlG3jJJ2apb9HB1UVAPNDiwxblkar7239tAMWyvZfxUuj1m2ldxCTNPVsshnzW1er5uK9CxGzP3YmkrFDxzYar45jhKeNnOuYW5dqdKq2NxizhwOpy3bD7Wz0TJvTj5H5Qlwt2yoT2shQbXpcnjJfJtn7RLVD1yV7P8NYo4qmy6rSMOgaFkdn+3dG+13pUFG2dCtfdmubTTDcilzKbP3GJVwp8GuTqqIrmO3UJUbfAkitW7DSFUUlizbXvB8n3kuCl21LDinkaEK2xie3tBImCtzlnkCHM6NjuGhQb1/Q3dHvxC8uJo7DVTGsi1FObodIjTCnYFlL6EQsyyvACU2dZsv9RZq7t09VMl16yiipG/estHvs73O3I/NUmygg6Wae3rS3OSl1NxpMEKnuddo5lr5ZbqkMVvqzU9FqblnpilCLZqpBB27yfyqKDV3C9GQILcKU+IGNfeGHqrtDNeGzPc6cTqVe/1ywryoaY6jMurd/IZWoaOEkyKy6MLxQau+jlJz32BMR6iW5wIFLfPSBEeXSanxVsv2Q9Fgk4fqjYix5oJ7F0XSGEpYqLmHrUsNv0BXLZ0GtYuWTWOHXEWf43+RuQ3AmNTeKU6UGvvqhna0nQ6HytNrMdLc5Co0OQ05bi6eT/fPFa6eVwDs2FfWa975od7Qti2R62Up7AyF/qPO9qWvVHLX7O6xP3UlHZOMRVJ0/UhNJYZhd1Pw0mz0aqac+mpJitaJQQBnr6lA99oPDrvyKSfp5vV4bwaQEhWvxJcar01VTt4regA87SxfwH5P+Fx6cb8nPBpfrpH5+lrXwspOovpa12K67DZprAvMLcUBaM6yQMynympvs16jCMaSQPEcd6l8VezCjFbfrAFySGeKDADdpms2cCkmxGlFnQFgY3Qb9JTOZuYX3W1rMo5yyMwn5lOTqx2Z6cTc3NwrXrdt05Y6ACP9ZYOAkzhGZXWuLBY5mV+aSmXjaQCl4CUAQndr3RZDsFTfePrPPlwuG3TsUDOA42dmiokppZACwNu9AJRCqjUsvOvttwDYvbP10pUpACyP3oNNQxdmAXTvbRq6MGtPZnyFtF2RCnz5xbYrUr9uWP6LujAxW/C1lR1qKqtyGudy6NuDcqpY0Yu31nv1IrnqFFyGNDWKuJTtVOR4TcERt+cDAAqOeD4rRgcMS3s743utu3WLGALQL5XdHuHbtgJwtTdmJ8qu4NiZed6uAmAFXpMVAJzbwfocrI3XVqLZGEV57N8q1zB6Kdq1O3j/H+4qX56NtRc1AF12x2tetffafIVsbQ24JFR0FxXggK9O9QG4mIjs8ZctHpveycf8AJSaBAA+5veNueuI/bw2UY4bAkCLBfcvfNGxQq7T7gRQatzUW1hO2wDUeoqlxrWZQtJWmMtKjS4RwLnFDOI296APgBgtn9DdIaTW7YmdzY74uMpDTKoZtuDXp3xMbUWhbs/J+5qXJ3IVgd7WYJ9aUyYlZWO9RW3ml4Y0JPFxlQkqrwsavvHwHudPrxpEcEOH58RzalbhALhWnL2B0xXpkZ9hHM16ZNIGQFkxtXmoje1MfZuBDwg0Cjm5/IJIGiuymqSx3ccCtW2O5ck8gFJj0yWW3VRZUTokXr4cHastz8/EXMbf6F7mtXMJUmfdvjXiSbkBpL2ZUuOBd28ejxlMq8A29kDr6GjODaDLmRnNufOvEb27KtqMuFOXrjC6Xv6BOrRSbJg0OpmJLUlj02JnCwBpbBr3bSlur+iCGbvoeJ5JDxsiwQAw/mOniK6P3P7lzXWGrDRtzZvam8gtIbftIzOOndc+TvRETvy4GDXYAddmCtdnDIRAs8P3hiYy//gjruNEzz9/jbTRirXDUq3BjdDqaHpry/3EsHfmu4ke+Eh7NssWcozhqhiO4xzkdL8SJd1cdQW73bgtRXMXNGMOJr7RwzeRmlH/568SPWJHWOgwpJPWo4xuzLGn+hTVR672vgSplZy/OHrhoqGayh2vP3rH6w15hRxs3smSzEwmRzJIY1FP1liIuWBXC8Zaa/7ped80WS3v+8+Soejv+5if6JmdUGcnDBLZ63T3tnQQw7469BmiZ3gRRHYZOS0o62ou//zguqT8HyNrQ57/7S/FLxpme8u7b29+z21rewZPTv/9q79DfPBvp99O9MiPz+qLhluazEvJdZmF7nrsF0TPe6VDRE+dHTbjHs5ZV2rWSU6/iJ302m1dVzU74ZQSToO5EL1kj14kn8477txE9KRrzhE9+YEONW2wL2u9sVqv4Rr03S36HjL15fgfk2TAvgNCoMYwt+ecypzTMK9OX2NOXyM52+0Z0vSZLJIv18238jcfM7wUGQUjadKsd2rkvbqzSDIZ8rQizRiu6ilRedJGPtNP3k1anA6H3+Hwr+2pd0fr3ctre/IecW4LmSZv03dJP+XoudjI84aqnMKbdolvNhTgVK7kcn88QXzwBjn3S+epUngTkr0qZvJUaWqaw9Zl+6JwajEc1Z0pcuY0gpamsuPYgDnTxRaormp+wdxTr+gbvf1vfsrcCE3lNjo1PCHZX2xcjlHlyqaBzFpm1O/qNHfSWAtCsr8Q+HhzpisjU3G/cYqLumPd8lkV65NlroeDTjLEZs3dY/zOKt71GxTuu/dRce62IGkp3DBmFq1LRkGxQ5USNpVis6vHMhZSs1NR0vPz5hKEZywTDQUfFWM7N00Rv+i0LDXVDg+pE1VFYA9pKKxHz9EW0zEACLX9haBA8Zwl1rIs8wtxqisn1PZfcS4qXS0eM3+dm+voUgoq5uErboFqLUlQZJ0ap9DnNh7KlSre9RdXc6dBs4MqXPLwLusCe9ZZVeuhUeQVoASl5s75KcI26DT3hgbzChuUmrvbZtkasO/Ihtba3umlEu40GDw5TTOM22mZ/uuzLiMuwclURX2AyrvOeSjCRusti35upiuCS6O5U8K/scnA89apff8DmruFmFkwn+5TeboIfetije0UeaA0ippwAORx84QwlJo7DSg190zRMvbm/LPmOfA8TqoEO9alc6fS3IeepRLu6pWY6RhKWqZIkVdL1KiCSfw5y+KyLdXcLZvJVJq7dRxkB4WxDkBRLJujsbkbtAj5V3SNjMRrRmI1m2rKU/OC/EpX+0LjxA/T/i0A0v6ttW7pv740+ur3tPzXl6YB9Oz1AmiolxvrFQBzC3xjvTK3wJ/42g/adm0FMHm579Z3vuHE134QUGYBJBIqgI4OcXxcSmTUGh9iSQCo8QGAw5/ICHkAbtkx7yw7HM4NJRpr7QDmlgv7u/3nhhLJuC3MlfWLjKa6WW4KkCN+KeIDIIaSUsTX6mxCN05O9R9t3XJyqr/VF5xKRt/Zsw0AAjziCgDEZDT4wKto9AHAXBKNvjPPDu5w4vRY8nCn7/RYEoDqEvmUdnIue7TRtXp8Q5DPugVXRs66BUHSREl9Nhm5vpQEsK+25vxyDMB9bbULM/G6oAPAUjQPoHFnXavu0lA2BVgIADSW01iO1dRSG4AoMYzEoAgAukdnJOZi/4KeYgBcHF/Y01EPYMee4MM/urj2+W3e0rDJGwIQz8oBl1BqLM6n2JXgAQacDjWzPJ9envbUtswNnfLUtgB4dpTxcenjZxaPHQqvHl93p2tiPNXe4Z0YL/vuJiN6IV+eWy1dTdOjs6qdU+18fVdwYTS6+2W9lx4f6LbJYQE6w2gs1u400FkwGlQBADgZ4QZRVVhNZllBA8DxmiqzIa6i6OW0/HxsPM4MAthecwuAa7Fn6hxtMwnGa9cBpAoMAK9d18CEi5UPhor+YNAx58kA2NkUBPDNswM72r3gV2xVWYAgA8BUFACmo2gpRwppuYK91gPAFizrK7agWz01wLYEATAtQfXUQPeRlqOvaARw8tG5o69onBpJt27yME+OojMAAGNleT2UzQ08Z1Av6hs8y34GwDan+3oucz2buZ7LNOQD84743njHvD3RUPADgAtLyAOIrJQjZxj73HhZOhzsCJwdj6NJsmkMAFuGAVB06yzL2H2iwpT9hLzOKYy6nOKXjY5lvkEpFERnjRQdcQc3ZaIjbkA5XB+cyeSa3WV1r97mKNYwhUBlRfGNalqwA4AWLOdfY6Nj/B2b9PElAFznShj+8zGpKQRAbgoKs1G5KQiAkwMcV754js0DUO/xr70k7pFE3M/9QmE7V2iQMY0RlhSbu2zj8kGxOJidWWLqhcCCLO9xOOcVGcCCLPM7NADaEgOArdO1JSagFLdtcQGYGtNaO1kAS9PM4AWuZ69aOkbn2eV5RomobBPYJmiz0GbBNsETygBwKxVzJK2600Ar7zlZnG3lvAAa4LgW4OryFc1ynFMBvDFoMEPFXBGAkGNlZ/kF+PkMw/PFnhA/GFF6QjyAJ85oxzqY5y8zB3aVz7bjCDznKpFFxcacbc559ewMgB0H666eXQIQbnL1KwWBYUdUCcAmTgSgXxroUpc8D9yV/tYvPA/cVbw6ZtvRGbrXAyA3JAFwdou5IYn52l//GYy49NoPEj2Rzz0U/dzP1vb467Q9d5FsQ2RynujxBcO+oGFnR9MmpWkzqTOmJdIA+4vvkzGOvZnNRM/AXLZ/1mAnutqWnW3LxLD+P/o00YMGP4w1Zz/5qcf/5lNPrO2Rwh45TFLJ146QxdE/dGnoYsKwAeSwy3nIZTCR9t+xZf/tZDjQ2a8/T/QEO8h4pC89fOnLP7+0tscBtQmkHfCe33wN0ZOaiSdnDRri5h1K907Dbf/mY9y3HifNjo+8xk/0jI+ShcJZu4u1GX5gW9j+9jvI/TvfP02anLt3unftNBp8UZceNSiDQ9pzw5ohlQ2AX06QJufdSzvrioZL3X7IteOw8eR5CbPrVOkYSU1O6zqhF7lYOI3ca8plT63LEho4T9a++dGl6A8vGerZL3XbFnsMH9w6Wb91ct29ar5E9IjkPcCWoNBbS97SiT3kzzl/miRYOE5ljZv7Drv9h13+tT2ZOowYahQCgKP5PvKqmHnWWHyXZRVuHfVvi5DPayY4SfRcPZ5MRgwCpHMh3rkuEICxkbUnrxwhT3VLc03QYbgz/We5vucNc1ssqv5l8sW5+wFy68wXFFKmOeRGp2TIDtQm4J3rmKf0uhieT8bEfsmg4yuDOWVdHcHzf3Mb0fOTbzxK9DwoZSd1w8t7jM3dZizV4u1u3fK7DxAfrGJiuKNkWNt6JJaobBMbhX+sqFJZ4jnO3DaRk3R59CmqiXPZjd6HJhesc8elLTtVIGDOXLfWUZHppGSvhlqGynW5aKOw/fMbnRG3f8F8y2hfm3mKAgAqRR3tgpuK2WAo6im6rdtDzcqWcZ4WQrJRXVWDdbvlabCvw2/VqVJD1pXZa99BxdgWKSLbOIbKdelUzX0+wroA9uqgKFan0uVzp8HcuGWJwxwURe8A2D3m92pHF9Wp4nHSErphXL5imRN+R8q8Yh9qqNxCNMugN0vFn26pN9ctQkmqq+IoNqjaM1Qvr66bC6wMneuSUNurQhOsKzXFUKl9UYpVXCxSXdU8xW75SbpIi15xQ4NqvN10lZgsBI3mTgkazZ0W62yo9bBQc2/soEq9QoM83fOi0dyvjlKdyuEwF1hTS1Syj0Zzp8SSLWE+iE5ztzAJA43mTgndPLUMLVjrSnIrMI/C2njNneBkqoJSc9/Hmg9ro8uJ8OOMZT7cNtb8VB5K4Z4JNq7vJJBYpNvjQ6G5q3RF3K0U7hS0DCX2+C1LUyPYLZMzFmruL01Q0TIU7zzoNPeiYNmLGvFZZr5Q0jI0mjslLcNS3S0qEIT7SwRzFOnH2+mE+xYKzX1fp5/mVJOaOU2S/u9pmUyaL/0duv4IgDYt2qZFS43WNhbAkVv5lja2pY0F4A9rAPwtjf6W8krgb2lsbOsFUDruv/W1WKe5b725sgM+PswDyC+zhVjlGq4PZkt/pX/KOV7Old8rp+rIMAYfQobJtodtx3b6ALSHbe1hm5x0drDh0t8dfGV77tJ8tHS8fmEAANKFhSsjC1dGAJSOAHZ6HTu9FZmoumwHedtB3gZgtQGAF1ibU+AFtvS36k3dFwrsCwVKjVJPMOwv/RloGZYBW+Vl41i9mFwGIDHK6hFAS2to9YgVWqapqampqbw77MBBw851nmcAJGfjqlKxPIr5NABf8+bSsdS49+33rA44duuvLDdqtzvcAlv6A1A6btsT3rYnXGpU+YxeeaD8Gjs3Ol8EALWK5cuIBUYk1f/RlH00ZWDzd4jle7vDY5hXdUGlLqgujS8tjZel1NL4UklzH59JAvjlc5WpPxbLrm0vrPwzupgF0H91CcDUYn5qMX/ySuzkldjUYn5xpvyUB88tAlieqwjoq97Akq0ybXp2Nq1td++q/PNge+1v39YdSrp1UdLcGQCrDQC76wOlv7W/yx7mSn+rPUsysyQzpYY9w+biBkmTiwuhOoTqAFSO7T5nu69sft3eEmr3OZkaAFg9MjUrtIzbD7e/3AAACP1RoT9aagCQtPJ5VhUyrSBqeVHLV9ZRJV5+NMVfocK7pzwAfKEqS2/3zq2lv1IbenZzd2VZLbXb4uU3cbWxFsnl8tHP5wD4+Zyfz7U5oq60DEBficZZbQCIZLj+RVupUe5KiWRjBXK6+JCqAbj2K+whGYwMJq+zWUgAWsVCq3Fit7SrLe0Vjmg8myQaJQxvbl9tt7F8c5Nn9a/UmWhsWm2U/mIz4wDWHpnfuZ9M3PFnHyct8YWhiYXhibU94pZt4lYyTXnNPCkjbA0CZzfo+BOnTkycesbwMxaL//QLUnMIrws+djQkOLuB7toZaNwRMBgZ3qzHkyFtf91N7i8vTJIO94BeG9CDa3sElvfbyVM5j5IPWwVPWB6S31YMGESSu97pWsfG9n+ZTOmTbCODKGaT7EyStJDCKdJid3pIgnbq6sTUVcNvfNVH3vCqj7xhbQ+naXZlnUawLtvML7/x2C8ffGxtz+3v2H/bOwyFijKzqdEfDhAffOw5kuW8/y3kD+QUlVPJYZt/dInoUXfuJHrgycK4oeb6hfnrFw3BWlmZmUiT8kVZl2nWs26rZB5cniE/6HeRCht//36ip7fN39vmX9vjcOTtDsO7Pdaf/txfk6k9f+c9O4iev7/cP2SMwoqNYXmUGAU/Q/6cu99NrpE9sQavZMw4VqsWawyPPuP3De4mb3Lzy8kcOJnXdmVea6hMK6Q1Pm0Qc4quZyTyme77Kcmeb9vxKaKn4/a3dNz2FkNX6pyeJoPKHn87GQF13qdd8Bu+sVaK10qGYVmGHeXIJ1iUSAGdznOycYeBpkAz/ppkrzPZS1rG/7pEnnyZXZIYw7OQPbrkNUy2bI4fnyRN/8XmA0TP7rplv81g6Mzr/nn41/bUj52sHydrZP/P53PfeLB2y5ylum5+A6UMlQuGL1D4hRQq6pCQ7C8EE5fXSZR1cDdRMcTRJfMfeCZCx1pQ7DF20W3yXJeDsgp4jiqIgpDsVdG5hYrH6/GZ31Leupc37aciKqVec86dEpGlo1adipDsLwQ8Zz5n7FGq15mQ7FWRzb241QuqzI/jx82vXo1QRYAQantVDC9SuePUgjmFWhSpiDytsKFhjpTC/aWJ9l2/krF5iSMrU0lknuLhKNWopPWIJs1n8lg/VVmMwaR5mT3eurg9z7octjcMRaO6V05XFY6YhM3c+QegoUARDsRQLYR5ybL10q3/zxehq/Jj2tst83erFAmQNoepoqRFv2Xl0HgKPUWmcGJQQnRTrc+K3bJEE76w33SMylLNYxrNPTNrLokocSi00WktLETQZz6TR+mEOw0Uuio3NKDU3GnAV3Mp3SCK5htuAMzbKQL56Qo1Oyi8oIUg1essrdts+KIiE6CLlpmYsMzModHcN4epSBIpQRX0SgPFOj2FYcxnA6XmbkuaMxIST/VokosJ0zGcRjXdaTR3SlomWGe+etHSMhSgpGVoYBc2mr2koWU2HtIWy2gZC2Gh5k4DSlpGhLlYczmpTkUQ7vS4Qc1djVIFT9Fo7sOLli1xRboaZjScu0ARWwq6OE5KzV21bajmTgkazZ0SNJw7reZOwb8t5ekSPFlHe9LQMl10nPv/87RMLltF0yThJV3WVdFAlyqVBgpFvjZKbLDmXhW8/LLb2FhCq/GXjgC+/NMnO9KXAUz0Rdu3Bif6ot4Gt6e+8tY1BZXlZD72J1+s7WxYHpsHUGpk5hYBNHb2AJgbG2zs7IF+eeuR7uTcVQC+xh3JuatcwG/bY7ABtwN/we8AsLXB9/2LU9vqfQDSbgHAYjoNIOzxLKbT3HA2OOuPuhLBrB9A1JVYHkhejBlSeXTsclyPPlLjqollYzWuGgCxbOwPv/2H/u4GAImheQD+7obpH5/SCgVvW2Vb3rLbkXl6rvCqLgD8cEzZXONSpbpMHIA+WWBWisPteddFAPvrxXML0v56EcDgmkVuV9hzeTF9s0+/ya/7wgEAycW4LxywO20vay3HHZbKy6msKuVjmlp+/CxnA+BY9oj+doUr8mp57XniqQvHB6fmpyp7RDU939LAjc9OAOhoai813vfuP1wdEHTnoxlH2hPILs8prMprHACFVYuazBRZZiWjqc7q55/tv/7slXPPDpw/2Q9g39EtAN7xmnIqD19jIDkX9zUGlgu+2n13rZ5fTscvnq8/fyra0i1MD8kAWrqFzEzq5t1ltkFTOAAsr8or3nwOqgoOAKdpNlXRGEYDw0JndX1OdT2Wb61nswtaxSzT4/8KKQoxCCEIOQrgdXNbtovMNUl/s4f9bloD0PzgomMxDaBzkwPA2Ej+2Pt7D//xztreSkDO2X94bro/AcBtFxcTmbDfDUC+vWXnPbsBLI4shDfVA/DpWrRvAcDCwGJ9b7hhS72kYLxvqbE7PDe0WDq2b6sLeAxqyuxAdOS75wF4kykAKZ8XgHziTKQtXJwsTwlbWx2nO+SJDADnJg+A3Eh6qd4+PRsHYLPxAIpFBcAzP+wD4As5ASQjOQB3NCl32OxzK9ZVI8vGjzLX9gHA3LR64GZhdlpramELx4vNfh3ATIKZiaM5gNo1VegKXsae0peKC7MriQydLlsuW3TWKTZ+WY0luRpf6ZjvObBNrGQ8iIRSoYj3J8zltT/Zr7trrh/0dG/TmSKj20pHaTlZXE4o+YpM5B3MV/7SILgHpgvqNjm0pawyRvrV0BbOG/oDwf7XieFLAPybdwN4/vtPPPQbf9Pc3QpgZmgKgLtny5T/3aVP9S12ANgaHm93/F1a0AFkBN0tMxlBf/e7gm/k9KtT0o5WcUeL7VvPpl9+2B1TfXOThcY2O4BzJxIHjjaOjnu6m/0AhmYS3c3+oZnEltfuBPDD7118/Zv2AOi/Pi8yxT27gxcvRQHs2R0EcOWXM7dAOzuePtjhKR0//WTkj76bZuqgr9FvLxyNXlCiABpY57yWA9CdD0Rl3bsaVClBtdmnMwEA81F7Q7AAIF2UtnRNaCs7eFjVoXH5lw0FwunMoscNIJzOAMj+l56dgdYpsmOSepebGZMaXUn74fIb58rlAAgL8D322rW3XamJ8gBKMr10BIDZ8YmZstCc6DNIz/L/j/KaZgfyJckOYLUBYG5scLXRu1ctSXYAqw0CfNa7dWVX0Rv3lKfFmXQOQHilbm/Y4wFvA7SSZAcQzPqZIrOMxNpTTQ4k+CBi2RhQPgIoSfa1DXfYC5Bmb0myA1A21wBwraQQWpXsqzi3IK0ebaxdZMv62uXFCp2aXIyvNuwdFYnDaSUxx61KdgBr26uSvYS1kr2EkkBf21iLaMYBID51pWSSKWyFxmHW5KpmNIbRmM9/6serPSURvyrck3Px0lHwGCxxwRNQCwBQkuwrjUrECbuOOFJX8lMuRdT6EM/qOrtSVnRRdQBYK9lPXPoepCgASNFyAwBwTdIBfDdNGoJjI5XYzbWSHcCm+3rj4+VKkCXJDqAk2QGUJHsJDVvqV48lNHaHjUdSG23qDS6tUa5LIl4GuyrZARQnl1jdycEBIDdSnhurjWKRdOqUxHoJapHhbHqj0S/S1MKuPQoZLegvX1izX2/2A0B+TSyrPUVedi5bBKDGkiqfLDVKR8fYJNZU2QtFqpBCCSZjH5j2dG9jdBuA0hHAWskOoKuRNIt7W+xLa9JmrEp5rIj1tZhZsxknM9jf125IYti32GFzVk6VEXQA7e0igB0t5e994IhnWU0CaFx5c/ff6oeGkmQHyo3Vf5YkO4At2xqcfB5QS2K9hIPtHkwkD3Z4AKwc3UBaNzIXJckOoCTZAUyzGQf41BoLcy7qmM+UL2k+agfg9kjamr2ZpXZJoIfTFX5SSgEAOyYB4H6RAYAdjpJM/2/Ax4JVLBpmZmx95/88kuYMD2OjMoUErzl9v2SzzLMX7iTju6uCs29oqbM9R8htClXREDDn6Da3UCXDokFbPdVVWQjrmHkqTGYtc9TL7o32BLh7yDqrLyqSdjIR7A1DZ6kyQ6i6+Wx/etS6YgJ0kCniFYRqr2CV+aE3d67vvDGk4tYlmvCZT2W9aFkAe12RyrPHUZS8WByjkn2c3Tz/IstQpShw+KptHDXiy3/3fZpT0WCZLhmWhXAsWhZzQhMwKfJUKwC7TsFfj2Nhy5JMCBkql7hMV1zCKvR2Ub2Dgt1vOsZXGKY51cSEuQOGVS0ry3VbF1V0n8O6Kjc0yO2u0lntwTeZ16ZRVctSQyguywLpLESWt8y9uThOl+W1YJ5/UdUsy9G45wiZYr4qMnnLNNudWy1bemkQH6GqeSSDItaCzjdLgwm60CnOZtltFyhqfrL13VZ93U8fp1p35ULCdMyU/x7TMQAmxs3tdZ2hyorDMZYFCtIgk6aKiRIoRjkvVem8wVkrCFSBid6A+c3is3RRX9bRMhZCpahEGu6gomVowLGWKSAvTUwuXKcZll9XR2U9uu6xzAAV6bgblWKdsBBClkpzFyiyQqqXH6Y5VWZwxHTMa15mWSo9X8H86wC0d2yo0nCsk8r2kinW1IZGqrKgIoWAlKvJGP71uUcAzPD1AJqVhRm+/shrJO733/Tc9yuvmbPRd/HUPKepAFSWUxl2Zn7xqfMjk8vZttqKlL9FDts9Bmne9MkPyIuDQrhn9Ti37B37+S8BTI9oLZvY5x5RX/ZqrihPh5zOSC4XcjoBRHK5g1vZZKEcpOazywA6v9Qzdy7XuN+5esxkPdlsCEDH3prxCzEAXYdd509M7Lxtx+LE4uJE2eXxvQ9/C0DL7tbpS1Mtu1sBjEUK3z6xAODgFn9pTJ0tbx++BsCzozN9dQxATX2Td8duW4AtxssPyRZgP7jXkCLmQL0Ae1Dj+Euz6d1NHgC7mjzfP3eJYZbgKSJtg6fIeItLs0lRLifN0FiZ1QRWE3IiFB68AmVldXBwjZqi6jrHrKgPAu9saq1JJfMAvD5HKpl/4829iURoW2vr9ampNx89+t2TJ7e1traGogCCtmK0aIsWbUFbUdrROr1UnhFttTUANgvCxS9+fe3F+zva7rmzA8DmzrLLdHgsPucbytiX07ZoQ7Kn1Fnvztc7FzOxEAB3TcRdE7nyxF0zM/WijZOKKgDRxrldBThTAMol9QDwUjEXYDloKnhB1zSGZfXzUkNDfZmmy9i9AK5fuHai//zk3Mjk3Mit+18BYHJhutXTC8BnC5aOU6mBk5fPAAj6aqPJ5aCvdmBy+JYtHUea69gGFwBtPgvA1tNgDwbVYsWgHH9o2OdyCnz5/sqKIquqZnNoNgezklRH53mfrDkFuywXBMEuywUA5yZTj/VP/cvZ2UNNHgCHmrwir//Vy3dwrEvVshzrAsCxzt+9Xt5Vt8vLA7icUpwpFsDWWh5A37ICYLTXu2t3TW9WGnCJvVkJQCSWFHPgZKgCOBmcDMmJB20CgDZNAzDJsgDYR/TDTSqAxSWE67C4hKYdklAnqQMprterRYsA0kHb3CDr9rCZtAbA7WEX5lRlVzMAbzGXsjm9xRwAze/Lb61w5eJsZKq5zbM4mGyo7GDw/+Rp5jf+KHDvXYWhMXt3eVFcFu3LNodTKZsaOV5oEXjx4Ue4oRG1u3xCoafbcXOPksoDUJIFADMOH/+lb7IX+7Q9W1ePzr/4hCCIsiwJgghAlqVffv1KuLOcDm9xLBLuDJ36+fDksicj8W5RcYvKQsa+cy/uORoHsLm1MDxl39xaAHD6VAqAIKVk0cuqRQB/0/zRQnfP/ctPDzjae/MTAJpnLzTNnBdT5fdITCqZloLj6JvtNZVSMIXY9OlvfQtAairScnRLcioKoPPuQ76OVlbLynwdp2UBXJq8/pUvRA922s6OFZ8fl37rTs+j/fEPH8bhZhxqxj+vFMzavql1b8B/IZ7YG/ADuBBPvDfU/s2huR2B8lJ3NZ5O6uLOJvvJGfVoM1c6Klr3sGzQkt/U2DvY7omt7FOpCftji4nozaJ7UxBA/EI0sDdYs6d2+qdDt9/V8OylCiXgDblGazPOHe7c1cqRGfzUb8EI4XA3ayx0cOHU/MVTht1ixy8NnLhM5op6I0Mad++7/Ami5/h/Pnn8C0+t7XH6xfZdpC/xgSMkKxI+0lx3c9PaHiXFKGmDrpTPqTOTpP31yKeeInpOpsRn04bz38rNHuPJ9FuveNdvEz2ddrKGdcFDbusYzvQNZ/rW9uzZfM/ezaSB+b0f/CPR0+R+A9Hz6Pmrj5w3hBhtqvN/8PZdxDBRJK9cyIuMkWnN64WcZjBreLvgXpf6+PkuMp3ZZnvWzRoW7IFT2/tPGRJdNTSk7nsVmQzrW18hdz9tevd+d7vhdn3/oZ98/6Gfru1pDTS21pCbzk9eJlPlvP3wnrZa/9qe229rv+N2soLHiXf9hOgJfOq1RE8H73YZs3h/4olr//cX19b2HOsI//K9dxMfFPb8PtHjTjK80QVjO9IiHjHUlnLPZDt/OEF88Mw9TURPaFiuGzYwnw0NmcYGA+MhyXwiRRrQsQMkpypsahQ3GW5plnFl19XBaH/nXxA9PwiT2e7eVSSTdin3vVK+91Vre7qnr/7e9/6UGJZ94hGi5+v/9xrRMzm9PLUuNuxvf/ououc7ryDfyqW3vi3ywNvW9txz7nuvPP+9tT22Y2+0HXsj8cHvfvTNRM+O973Z12EogvX1z1148HMXDGOatL99HUlKd/jWlc6KrSNFnDKMu5YUjcsWyZscZckdANEeR9FjmKI1klQjG17nKUmfkkj7sgotw1KUsGmrD5qOATB/boJmmFVwOOnyaknmwzbtPviCL6eMhWUqv1BRtaxgk0ZhEloIQuj8KhCSvSqm4lQ7zmmQGKgSxfuigmY/lGuGKouGa9mczxQFqsAbQrJXhf/HT9OcaoGxzElY327O63Zup+IzszvInJrrUTxuWfjAzibrip+oVPKKkOxV4auW266KcFeHyTrXN4w5CuGeS1D5Zgm1vSpi1lUAiC1Q0WF8caPjomjAUgRI8A7Ltmaev2D+aCjhc1jG2FIiq5uLyNs6zaOPKLF4mKqinStmLtyzOaqwDTVmvvQWtrTTnGqBfXGzGBIYu2ZZiC3XZlmI7ZVZ6zIZCFTyypY2nwzJarntXtwwqX2/cZvpmFC7ZYF0DhfVStgqbqhP3EJ0hSxL8FSIW1YTrqGBKt4pM0EVvmIVknSau4tCG/3Ek1doTmWniMgIn7asFrVAp7nTCHcLMdRirkcDWJgwt2AoNXca8O1UsWE0uEop3CnCZwsyVUgejXCn1dyZ2g3VniITVBHl2WlzCZLPWia1KTV3jbPMULVx5qVWH7tuWaJ23k6lhWXUDQ3X9dktm3tt9/dadao/v3NdtZAbRbaZKswsW7OhlUgLve00w3ar5stX93T1jeg3AErh7rpq/o0W0jJvO2jZNjSeo5JXNLRM9fN/KUpSSPcMFdgaAcDEUJlzUKOxFhgYYW+9a/tNHTtuMkSb3bFs0CtHLy+FMrPsih8vYwu4i/Gt3a7QfaEf/CzyhvvKsixSZF0i2+5lJlJ6u7e8/pyfygFo9JUF0FxSZs4m9zvKJq3iVPgcf/LU3MlnDSI4n1M/8L6biZ+z7ZaeyFQ01FpxEtQUsd+YSe7i47PbXnsbgOs/frrUcCpaU0NlXU2nVY+H+/tHyAVmhysJoL3FPzGdKPVIAq/xWwAw7qieCTLuqCZtGY4vDiXKKlu3v24osZTSuJRWeWZeVoXjTKntkZvSwqxHbvK3LxwRbNMzSktzWcJykivmJllpP6MWbSoAX0os2NSiTXUxzJBvqT7lXfCmANSnvHmN8SXtABjdDkBnClrenm2aJk715BNkLGlnMOP0qgA2hfmRRQXAjlC6aavhGvoXE9Nny6vOgSbf87NJAAf3GNaq5QXbI0MXtqodAPpGy7WxRifnfLbKnCkoBWjM7CC52As+Mjb87Pn4Yo0OoKnRPjtXAHDrsQ5FZ5Q19ULnHupnRR2ArqJUWElXocoKAIZl9ZW0LWfOXSop700t4dnpRQBiZOl2r+EFZqOzVy4r4XDD4mKFsbztjo6J8Xh7RwBAqcEVuJnhRLi3BsDiQCzcW5Mo8q0t9QAWk5mwzw1Ak6PBgAggV1Cd9vIE6N1HekE97SpzfMm1rRZA9vqya1ut3a5LDp2bnVKb1jjcSs6VoSUA6K4DINSTXuXEY2e463POnsbcYPmpqbqmQuNe92r1R/8FgNnSI/7osSldB+ADkitHpUiKnssMCyCs64srOcvs1/vtuYoO7tq2LXru5NfdBwDskmYvi2W+7i3raOrdtzecezbKqprGreiXrMJqFSNDZ2xPfeuZQ68+ZBPUUsU+m6ACiNb7ATgzBQA5tx1A/fL1nce/UPrUWPu+zonzgcjYmMoCCDB6XGcA1DVvnn/oGwBqu3cCWB66AmByKOmrtSeXK8zqws8m2w6IAM4+Vw6duPzcbCpqsHGfvay9efc6jWfHXVgaQ90aeXj1uGFAXkPbZoRbsTSCunKs0bPHzz3xC9Kx/LY/zxi+FQAATy1JREFUbuQirBrSAHARFsCFz14uyfeG/fXz5xYAKI2C3Ci8c2v91/oqzNV9jdkIGwAQ0somMlP77g8QZz86VVObN6h122omt9UYdEbtzt3qHbuJD+4aIAloXomwukFeaNmsmjVYZExe5WZIjeCjZ0ib+nV373jdXQaL79Off/7vP2+ownX4UOt3vvU24+cwMEjq4B41L+iGiSu5HZLbEL7qVLTWDMmICe97muh5l4v079eH2utDhhdsoTm92EyaxvYF0iz11ix7aw2/2qsEfArphEyDfOU868oBTtqXJx2GwIYtk91bJw2budVwrHgXWcPsgXeTq1f3ETJf4JHmpqPNBh9d32Lyr39xiRh26sMvI3pu/lfSsexjGJ8xF2t2Ghky9gf8LeRN5q63M0lDmMHv/N6+3/m9vcSw67/7PaLH9vuvInquPnI2OW+4VxILomaDX9R960LdJ2tJR2VStMuc4ZN2e8Bm3IqpjUel/zSUmQSQ/MzriB5R1VnjF6p5p5I3aP2aTZaC5PPKF1uIntn/84XcNUNCEUFL8ZrhseYYT5YhZ9G4kyQN7DaZM/rqQ9l0XZa8Bvv97yV6nvjqfUTPc2fip88YaLqFoeGFYUNg+4H77zzwmjuJD37hH/6C6LmDm72dM2gbs/bAjDGZByPLXIp8Bx99MkFeZ2bromKMKs8lkTPcq30d3GffT4a49HzsQaIHw09j2CjfNx/D5tuIUTZ2H9FzJv7rRM/37z0986xhis7e5J+52SAZfqst98F2kmitQssQkr0qGLpdl4RkfyHYYp1Ti5DsVeFUqHziC6r5vaqfsYxqsMwgpEYuaU7L9FOkjwfwnkPmyXNcpGi6cUQeo9oPRUj2qijQZYIlJHtVsB1UYWYbDMG695QNNlh1qud/8iTNsHZmQ50K58fpuN/YhOmQv/zE51/gxfz3uEGHKkOXL0VjNnTz2OFDpE1aFfK68sfrkeP/F1eXTQjm3KgWpnJvOn3mC8oWuvTxF9bZZ+shWZZXHK4uqrgUGhToXmdBNVcItHEqN69GsZpoFIVkNx5a1LJYuwP3k2r7DYORqfQiUm1/kfHxPydZk6og1HZ63KAI0+7cfWMfvGH0j5EbiNbjn/6FNHhfbMxr5ortwjpO5qUA4QpVcVQLNfeLs+bCnSYBHgCCk3mJQNBeilFYju2WpWGgAaXmPjNjHkM8O0CVnraDNX+/dIEqLiDMm8+/fR2W+bqPP32OZljzEXOT9/lkFQqhinBfdphrBJS0DA2YPNUrQUPLHD5EUd7lpQqbw7LYRBqoYSp1gEZzf/1OKoOJBjQ5kiiRHaUKOvQ1mL859g0NXaEFn3oprnC0wp1ipX9pYm/nS3E2nE1UEe78vtrgvtra88vLAM4vR9/f3dM3FQFwx+u3PvXDvo4tIQAdtbn50bJTq8GVmc+66wFlNFHqUUcS3CY/AMB+/dlr245sj0wtAQi11kWmliSBm15Mt4Q9LfXu6YUMJ2F2Sgewu5tZWNYBiDaxlLVmWeW6BWlIFmtXIoQYiceKA0d3QXcxTFbXXQxcYJZ0uLB/Z+P+XQ2fe/D8/p2N567MHT7UpuuarmsMwzIMq+sagPH+6Yn+mfYt5f3BE/0zB2/q3NzbYKv1ASguJwGwbsez12YPbK4D8B+PXD+wKeRUtNYGP4CcrgAQwMrQ9m2t2be15gs/GNm3tSwRWiZ1IG/nvABsnBeAHIwDbQCyvWOugU4AtQUlW5AydhFAfSJTavi1CmEV1G1RpsjlbbYcrzjL7Cefs/GiTVc4AIxT1XMcANWpABiNFLtCttFIeWTtSgbBmcU0gJt2NHYj/DwmuhEG0I3wEBb99VprQJ26xLXuVgEkF9hn+pXz2dybX+O8NlBey7f3CgA8PiWd5D0+BUA6yWcigjtUHrDY53CFlFlnYcZVBNAcsAGYiRdTUsFvExJF2W8TAKw2vvTM0t5W154218XJ7IWpigP5lrbQrW11JyaXrkxFm1jbrFYEUGp0BGxtHfZL40kAuzt8AErtdsE7IafaBW8H7wMwvTU90ee546D81Fmho0nraFI7mlQAC1cX6nfUL1xdADB6esYPyGtyJyisVqvIADK84F5pNAU9npwyk0w1+7wAZpKpZp9vLJ0E0OYqR/IsFpJhpx/AYi4BoNTWZR5Am8xPCgqAqZWo87uy+i9cTKcEAHZWzxbSBbtYsNvshSIApW82B04By6/Ur+ChsgrDKqzGaxqvl9osqzEqdKHC8zCcxNs0TREBaKqN5YqaTeFUEYDKSWLRLdkyABpnp+eaWvadeW6+uXmuqaVxdrr9WMsT3ymfZN+trfuOtVz+5bV9N9/+lU/+BMCeW8oBownVe/b09G//zk3/9k/PHTzcAqCRF5+9MH1kb0vpCMDGlkMMDm1t+5cfPHNoa2som3bKyvbdvd/96k8AbNvd2z8zD8CjM6VjmtHTTHl+piU2I3ENbnk+I/hWFqaiDAAFGbIi5PIOpyMvy7ysCAIv8zq4NUXAeaDIMMgrel5lHBwAOHgAYls5D5I0OSi29djaesTFSFAIRmf6APQeen10tj/YtGXk5/+pOHQAfJ4pNRoO7ps/e371/A0H9x1ZZhYVJ4Aju8rhFX/3uWR3iAcwFFHu3Wp/qK+QTjN9CVyP440d6Etgqx99CWS/+629b35g/tqaoMz5dN9ctsMvAhhPSAB28pfB+1f/f2T88vgz5wG8+w7HxXHl4ri8p0O4OC5rsxIAdVYSDrpL7cYOT4mZqenx1fb6lwcSs0CroEzJfGtp7sl84xwz2ycCaNoqlRozfSIz/uOfwwi+O8S6DVz5I9+9+PPvXFrbE3IotzSS1tBigozCXvbaJGNxYbvmsWmkX76hjVwMf3mKJGH++J9v6d1tiK7LTmjZCQPLydo5+7py22+/+f8QPX/9k4/tOGIIgv7EF49/4ksG1/ZHb+/66G0kcTHnJbW8U/82szxi0EG0Q1PaITLEsP40GfTSzW0hembY6TxjUN4FxS4aCzMNZ3P/PE7e5G6GDLz5/Gvu399oiOWQa7JKjSHE8Jn+6Cv+yujNB44cI/WpZ79L/uSmLra5y0AJs6xq40kr++lBUpV47P1332KsW3L2zMzzpw3BMd6Affs+UvV77jkyMYPQsMw6DD5AdcKmTZDRHZt2k/Pq3l+7g+hBhkXGaL9KTJX6lw7SrtcHSONV5VSNMxg6Vzn+qnFXZ3I+0/cwyTY4P/5qooeRWdaYaV63FzW74bJ0+DWQm9pe+/l/I3puf+QdRA9kG2TD7lZdkZBMEKOy6yx9KUq6AjRG1FnDFJ2eSn3/O33EsDf9AxkONPYU6e6++NzMxecMk+Flb77p5W++iRj2qQ9/lOj52J++tXeL0WRnwmANHpeFhcuPP/Z7xAd3/T6ZdSq0eCa0dHZtT9/x+esnyODjT6VIsuHDrgTREw/4YwHD02ngI428YSbbWCZsI2+popOJT5IeW8JrmNtyNiJnDYapMilkv0aavTfIuQftljlzRDtVKAIh2atCo/R8bSzE1IbuAwJASPYXAm9oQyN0fAHLalnU1G9oAl5K+Bqo9mMzNB7V/9cxeo1UkqpioN982OLCZdMxlJhSNrZ2F1CwmRNBfFsVgXyDwj1aoNrfSKjtVWFbt3a9FHBqY/fKW4tzc5al36KBplnGQk6PUd12rWBZKRVIFNOPYhoD0JkNzdcGvBTzGlHCEdhQh8Gu3evMlxvFdLUsLjcGC305yiSdQ5XgZKoikt9obXTgknnSRPal6fmig0Pf0Ol+os+yJJQCt9ExeazdPONSbGGjNSwaJOepkm1sbEUgWjDW5Q2z+83TMHRtt27Xg3U4YtvoIGkazV2dqCKQb/BCQw7LrPVi8aX4ElIiuMkyGoEg3Ktis8uyBeD/vJ4k/V9s3EJRKLylk3ROvCRgnbK28YhfsSywjQYtrRsaKk6Jy5e+bj7IUhCEe1UkZcvmFddeRSD/z2/V2Xha5tqzZGWJ9Xhu/AY3DqyHmLJM4RnObmi4JIBUxNxEkym26QJ4hmLjWzJuWYScw/1SpPsooW+wCSq/FPdDPf7d5zb4G51Z83SBzxYtI99s7Is7RfkvXyLdEV96/WeJnpubXUebjGaUhoN/cS8xbOervkH0qM+zasIwT9/zas97Xu1f23NxcPmDnz5DfPBNXWT+5Qd/f7SxKbG2R9bnJN1ALjvEkF0k/a5dr38r0TPdF/nGm/9pbU9PIv09jYw5+eXfkPkifLVkdsDcVln8XQNFEDsjxf7DEIqQ1tnhdQnW+xNkzaP37+jp9Bli+ZdsswnRkG2jztH0D11HiA++7809RA8SRSJVwSe/M/SJJwzJ8zhf3r2b3Ez47MPkRoG77yGThk8NKFd/ZmANRJEL1JK3fctBMt7pt//sGS1iWAY+9Kc3ffSHDxDDtop/RfRcGSRzlaRHE4qx0vTcYX7ObliH1OVE8cy6VL0H30f2PP4gkoYApE9M5j8xYaCzjwXcv9yz2fgxcP/0EHkqKQvVcNvvfuWel71y99qefJaDjeRcnOtSv74r/1SHagiHiM8VE3EyiMclkLtw/4whE/jUjR5xG/Nt/P3nHv/M5w3b/d7x9r3vfDuZmecHf/040XP7r991x7vuWtvz+Oh/PDH272t7pAs3yy4yxGVPO5nk5x+HL/1T/6W1PV25TV1BMj6N0cjNCnk7uanCVeMZGZ9Y23O6+OnniuQ37lF/g+gZfjVpSn55fG8k61/bU7DliyKpcJy+/GWiR3q6n+j512/9AdGz69ZX3nrrR9b2jM8lvvwz0tP70H+QkuGyi5wwvS1NW5o3re3ZGlza+rukp62K5t7QZl6FubbHskrNe6w7lYXQ6bJxZuvMh3noCNROn3kKmixHt4NzYyFQcIKUOHvCspzGnLEO369E2LIdWP978fVvXDAfZClOj5qbcTffu9+qr6tfoDoVIdmrYtt+y1JJdzSafx0AF4WpEMlV8WHcIC1T22O+r2/jIauWsRY6SyWRMyHzYemNtrGpoCapvAWxpPkPlNflhr1hPH+cSrjzbnMiSF1OvNCreRGQm7EsFwVL59/s7my36hs7dptnMuAaqOIXaXDqIard+RuMbfvXGcrV0Bw2T+8xPpd4oVfz36Ka5t6+oar0xUEyoesNQ1GpClR6ZPNhjHXhfRbCpVrmreJ8lrHbkkQl3JmQObF74NiG69GL5svJ8ThViAsNnM1UKUIJTqYqONayeNBdOy1L5ajOvxRDXBbqqdaJ0Lq9SP+rcYOa+/KgZf7GS9adiueoytxYCHfEfA2gpGXGkuY6HS0tQ5HSkvNaJtxFkWoh1COWOZYJwr0qtBhdhkkK4X4sQFcJUjUPIXM0WZb/WdWoKnAOjU2Yjrl8xbJUjhbCQlrGQlw/Zx6O8RLB/3y0zO6XJMNDScvQ4CVKy6QsC+Kk1Nw3GGyNZfVmLUR+9qWYItRCzV3cS/oDq+Jwl3lc7PSQZXvx3BnL9mxbCEpaxk1XTmA9+E7fYId3MF6sBRCwLY+netyHc294QyOAHXscVy/mAYRFMZ0oLwP5WaX2kB1Qk8+WowXsLd1KchnAkx9yX54pv+eXZ1UAe+psj55HYy07t6wBaKxlPbXy55+bWv36zlrn0JDUw+wDkEPaCU8O6ZyeGaw551Q8DtmTF9I5Pu1UPAI7HM5WYjl0NVHT1VSzJqgm2BIKtYSmzixs2ls7cmG5pqEsvGqiGoCWRsf0XL7UqG3ekpiKpicjjbduS08uAeBkLT+y4N7aDsC9tQ0AI8mZi9e8nS2psQqH2BRCbDQFoKarTI/8+IvPAsjOaq4mNjur1R3kr1zH5XEuaNcBRAsMAIcHuivuYu2SXtHsGoKVB1aQAODf5k52JSqE2Ghu+QPh/e3cLgCSLQuglBnq6/Gh0oBJqcwVvK+4CwCUNbEim3MI8QAQUbDVjojy5+F9x7rrARwfXfzzl+/8xGNX3n6g92eTI9s8rdfTUwCuZ6a3uVvuf1X49Hj0cEfwn58cONQZPNwRnEjnj/XWf+NC/9v3brkyH7kyH20/Wgu7DuCWPeFPfvkqAGj4+6+WowWkgiraOdHOpR9yCLWavMwCEGo1ALzDXjzCcSmdT2rFFo5L6ZlYfPmpYQCOjtr8+LIQcGxv8QxsDTqzMgBnTo6GnM6szPEhAAwjamqa5Tyamv6Hp66dvLZ8uMP7j0/NAPjdO5ojLPvAy9s4TVdZBgCn6Zkim1dsAJSVtMw8qyDxLfArESbKEvg65F1Q1pgUinhMy/25J//n7kpVjaelHLQc1np0WAGNK6ujg0NeBdA65WpLeAAkubJV1KA15gWnynCiKkmcyOlqni0WNFWDzoLhGEbWNRbMe44ynbUYW6Enx6P4h18n1cOeN7/x7t8oB0o5unrzowPzJ3+xMFeenNkVhiaslX9LhlHdOpdh1O5jzYCCAgO7jgID4A8/ceTWu9qO3NT67HNTAJ59bnL/vnAwXJQlVpYZp0stNV758XsBxCaXa9pqS8fJC5fVRHkLcWx8HsAzc89/oU9qbGAAzM3rjQ0Mn3ff2zHm6xhPjnfY/AkAxYR/3189AWBfWxOA85Oz+9qakkvy+7bU7Qu6AOwLur4wsDScUCdS5WT3WbkYcnpthYLGunRG1BkBAKPLOiNklnVFAi+idARwYughpkcDkOXKRtjwlP3cKD83V37F5ubZ/fuW5q+dBRDy10QSsWgyFvTV/N2XFpIDpwD4esu1OcPnftC1p3wnRy9Gu/YE+//xG8WZiVLPSMq1yZudiSd+/2V/3LWzc/TKWNfOzq5dHQCiD5eF4czYGIDmzs4lQfj6L4K7enILUQHA4rLwSM1waO/Xt3OViDhX0P577/Q+d7nYXF+eoi1h7hWN3aX2nm3uL313/j1vbvj0l2ZiGfVD+0Nn5nKHGp3/ei7ya7/e0OfiGzvZc79QGzuZuTG9CP0X/oYlXzm2rS5ZCCfzzNNfJAuPOduaOZdBrUsnmEzSoOO7ijm3RNr1Upo0S6+OM1cmDB9cYnMR1vBBJcqnniVtVfv+k0TPHf66OtGwdbb7tt3dt+1a28PqOq+Q18CLJBuQ42yqsbobJ9pYkYz54yJkXYX2OtLSf+AdDxM9I3GMxg17E1hRE2vIIDafn+jAlBTPGQ3tOz0dd3kM1TXntOyPixPkB99FxnjhFhVBo4t9sAhjEcRYXj2/Lu3q5kbyOtua1um/djDGW/XJL1z75BfIIsVOJ6nOsz0euA3Rir91y6YP3mLwOz07Gr//38nIDe2h3yIv4Z7PET1vv6v11+42xHHmYtnZs+PEsPf+9SuJHjyWx6LxUnMMCKeMQ0MTea/Yz/2C6LnlDMnedLx+U8frSMfayKfJEMMHT7yd6HlfiEyPdd//efer//Q9a3tiM2Nnf0yW8jn5MBn9eez1R257/VHDlSscpxjmf1EqRpMkyz8VId/K7OVJJWmIWfhB7Nnvx54lhv3xbWTqq+/PjhA9b+toeXuH4Xl95yF852eGMa9+y733vZmsz/fRt/0z0fOaP7rQ2JtY2/PdpyPfPW54eWs08YBEuhK//FVyBdW1Pl03ZD2Tjj9WPP7o2p7ZfOCHs3uMn0NdgsyD9sSSZ6lokDyhdwZC7zDs0XsNW3c/S2Yyt82RDyJ6rSgZ4xoS2x2JHYaXcCan/HCW9AxVoWWUnGUxJ4sJc4NCXt7oTAaEZK8KRqLa1rFli7nzWZMs474aWTqnQtCyfRZFyZxHvmUfXc0jt2UP+tad5la2s4buXi1ScEp5qieY9JifKnZqlOZU9/3hy2mG0aB9C7l3YT2KMlWZPd5vvkH6aHsTzal2UmzgHLw2RHOquQHLdjXrMM/J0eSwLOvUoE4VAGLzmU+/mTzdDlXeaf4IszbL9sELtVSZDAi1/YWA081ln75O36+K/n7zUB/eTbVO5Cj8Y3PrdlpVR9Sy5cQmWrf0ZixLWXHiijkbuzxCVawDYQqPiMOyxdLRQuVhGnyWVHVvGBP9U6ZjvC6qKCwlYa72TSWofP5X4pYVVGzstUzaMjBPPTubp1pLwjbz2T5AJ9yLSfPpd7iWJB5www5VUaESWDSaOyWWJHPlgtGoXkIqzZ1uoz+d5k7lUHVaF9m2wZr7M+fpxCiF5v7sSILmTDSae+0my2qoWghHC5Vo6DmyyXRMTTNV/TwazT2VpZLINJo7JWg0957t3TSn2mDNnRKLRfPZ/hrWsik6k6PT3AnC/YUg7LcsMw6N5q5yVGKURnOHYF0GPDrQaO602FjN/U/ev53qXBSa+5FySS8LkIvRWTn1lsUyJbyW0TI0mntshqrEKI3mbhOozGIazZ0SNJo7JS1Do7kHNMs0p5m836pT/XRdcoWqkCg2ElYF7289RnQtObt4Y/ZBp/x8Z+LRBGr9WE6gFoCfW/7ikxVybWhc6u4QmxkVQGRODjWWJWNXG9fVBAAtdTKA6SWhJqAOzpUdNT0+22CyiFZE/DyA7s0sgKFhDQBCZIjrpHYtJNr4vABAcch8XhBD/qbuYwDy6QgAhyeUil+V5JVtzU4gBwB/9N5nduwKXb0cAbBjVwjAB/7wvkLR4B+7dLr/8pmBtT2MJO98VYfH5kwXc6vHpkDFxaQoDp7P/+Ax8s3J2WWHcbdOIWHPLJSlrZyF4CqdP8vbeaWg8HYegFJQApwjp8kAOIlVRQ3AicQcWysAGM+mOlxeAE8tzpz40/ecnp4GcLilBcDp6WmEVl45l4ysAAARDX1KJWAmxCOpYm2qUh41NnZfIxuPVQRuoIZns7DbuEJRLR0BjM0nYmmDJ9bpElwuAUAyKfl84uRkemY++2u3lFXpXa2ey1NpANcW1q1Vds6fkOOiHpCYuKgDYJbVCWOhZBfDvPFoxRja1uq4PpUfvH5pOVIJxK4NNdzRIqSGDa9rKjX/mVNnumubV3s8SdbNsQCKjGzThVIDC6QUS0W1dJvBvpQ1SKoeq+dqFsovlcsOuaPebwxc+807NwE40Fnz/Fh5o8YtNf6hy0kAd/1awy8enO/c6Vma1l3DFWXQvZzNhLyPOit3RhJUUea+/bekT9Lf1bTFJwIIbQpFRiIAWnba5s4YPLGaQ7DVOjgHr67hW4W8yMocAE1QWZnTBHXTlm7WUVbXGJ7XFeXUU1OnfmmYt0WpkJRlAM+di9y0v0xNyOMzrT0GAv3gK7Y22wxBk3fNIPN1Q3Bnq+R9esWPPTGbam/yTsymOK6iL8rzrNCgyYz29XTZn3llcXlnuHZW9TT3egA09aZnBzyeoDQz/8zffu9ib8uh0rCB6TO9LYfCrvIL7hIyWdkN4PrDxbPnDUrk7JLWUDBoEjkkRqKkmpz40cOF/mH7ls0AEj/6uf91r4zZIjqiABIjcQD+TYHoKBdJGvIN8L0Hb+sy5Jjq2Lnpysf/FIDP4wSQTOd8Hqeyv33tmG29dcWxxaVLcssCO11fVi6LhfiYVK7zFx+fAxDoaHzoQnlin7qaAXDzDvd9QVVKarlM3uku38Zzc/bpPk+4RViclsMtAoCZvFJ3MltsrahitimFGe8nk3ZdbDhA9HQO/1vHkCE90FcfDXz1EdIaunWdpfX6X2eaiS2HWR8yBoJP4vSYnVTwi3kyxuD4JJnG69gr3nPsFYb4gYKyFMmRMQz3tpMBLZ/9xq/vO9S+tucrf/ftr3z6O2t7jrxp95E37yY++MoeMk3uPe/67snnDbXB/B35QIdh5SgkbPOXyIlV20lqLsV1RJfEsTJnkDsfv++2P7uXXIxRv041WwKItF1OFg4jRSYA64nWNFn/4ScnR+aiBhU45HXW+UhfpRAl1eSvnCZpNDWdVjMGh37PtoaebcYIa4eCRlI9FOfJXH2nT/LLRutkIjQ+ESRjY97s2kf0fOL+DxI9A55CijdoRjlBywoG286lc+0yac5ml2aInoIuFTXDU8xeyuYuG+7MrJT7cZzcoO94kmRFXvPRHU3dhtfJKaddMjms0E1yxMc/fp7oueV9t97y/lvX9nzqEyf+7hOGCosKmPQ6I/7V7eTX/dm3fmPLIQMXdP1zl65/9hIx7DdAvqoNUdLLatucErsNq0KHo6HDSbJtJ6MDRM/tM7cQPf95leRSakKLtUHDCzAXb5hLkLH8pzaRudJG/UQHRiRm1FjvcNOOzg/+zXuIYRc/QAZ01dy6VwwZTlccWyyOGVT1dGIsnSAn7bse7iB6vr3Oud7vrul3G5w3QlQSIqQ6VcV+90jmBNyvv4LKiUFK9hcAv42MGXopgJDsLwQ0lNJf/uxpqnPRERI0aArSbc6kgCZZxjvVBM3pvm0i1cYcj2IZhaXQ0H10mBsyZy2UWqpwoNZ95i8hJXnad8acU3oIVAXtCMleFUnZMhbI46BKIGFdlQoQkr0qPH4qr0k/xRZ+1VlFfFSZ2WnR3HV+aaSKc3Y9ZijSQIl0+68SRTKF7A2DUNur4tnvXaI51R9/kMxuesOgkQwfv+82qnNZl4VhNmr+VnA5Ku86K1rGe8bWVWpej+sS1Zb6NG9+3110iYZ4Ckc9JRq7zf2NlJg6b1mizdd/+G7TMfdil+kYAOqyZZOhwWXuHvPYqTYGW1hfTookTMekE1Reky03uoX/BjV3Srw0Nff//JenTcccedNumlM987xlOfCskwwbrbmrTirns4WaOw3qOCqbg0ZzX/r/qvvuMEmu6t5fVeecu6dnenLOG2aztItWEkggISSQeAIFgokGbD8w8Ex6gI0NFtiYh4zs90BIBmSCCCIph03a3dmdnZ2cc3dP93TOqer90b0zU3eG1/db1c6Tf9989dXeraqpqbp17gm/c46E6s53WHOXBqheM+GTeS0YO1teGFFq7hKLaJPBkyivW7hDVGbcDmvuCiUVz4dGc5cktwm6XqXmTgkazT0upzIKjcryxSgosUc8zf36faLVwKORDLRumf+6mjud7kTjlqEEjeZOCRE19323lecvUrplFsTT3AmH++sENJp7pYnKjNthzZ0SNJr7tm4Z6djESZulNJP8gUWbpUafRkyuA8CsjPNVbQCYlYkE44oz1Vp+Kc5UO7gzrub2t96iAjAxHQfQ2qQFoItGXZ2a5ZEEgOKOvdkUC8kU8jCATNaokIflwMOXSivRvopSkEpSsbFi6uSyWDZXZQEAhXnjGw6f8nJRwbMPeyfC3omwd9xY0Rb2jgOobjyqDmlyyqwsLS9uAdTuMtX1muYHN4IEK7lQJ1Oj5Nk0wyl5NizJp1meN7Yw4UkAvLEFQHVXhbTAcCwPgGPA8mA5JrypGGE4kTVq5L99cRZAnVVfZ9PP+6N1Nv1IelUNSw5RGfTFbQGZL7635uVLkWO7DMUtgJdm3CPLgXsOtAIYWVkbWQ68541dcrXse78c6Gt39rVV9I97sxKmoSBfJ431mAwNHY1Dc6ftxmpfeKm47a4/fObknKvGWF1jKu4sL4bZaIFR5qs0VgAribUqjRU8mw8pF9yx2krdgjsG4OihSgC+2dJj8c2G7A0me+UVOlAuD5kUclmV0zQzHUqHc6Y6TWg+YarTGA3q2prSdItEUgAYtYLJJrWOEhElvpqJeUuB2ZamDX5R0s8PDnE6WTKWUwNQsDm9U26tYgBYq7C2AmsV+JxkLamxytQA1nJJAFaZ2qdJAtBoNtSclaV49RWKS02NCYA2kNXaSspgl61q2L9yvKLNOxlmJCi2IWIY8Dwg0aMQBasAU7pbSYpJKUo2ezLLA7g8tXp5uuQD7O61A7h1b41CVhLccrmkWCjtFW8SwAVPaq9TtbdC/W8Dgfu6NTJIAMg5FkCW5eQ8278aHlmNAOh0GDocxtHVSD6RLISSEpO6EEoCkJjU936x5O4Yenmm+1jj0MszKh4qqzIYSJstSrNFEQxkZJIqh6MyvjyudW2QN7yJywBUCm0w5lHJdQDibFbLybNMIcsU5LwEgKHaEfaVXofRrgz70l0t5k9/VkCaePb3K+3dlQDOXWm4ur+nYunUy8fv7Jsbc8+Nu4/f2Qfg1BOXjvy3XQBCc7HQXNRUr9f2mioerPf+cK7iwfr4YEjba/oz1K/+MbPf0QLg3OokgP2OlidfuHigow3AwY52AN/++S8LAfltncem4zMApuKzt1bcnNYH69SVddWVAOaX3HXVlYOz03tM1QAuhZcGw4sP1h25FF6ql2nq6jUvvuCrry+tbQ80OACcmvAdabUXt/9+erXFWt9irQfw2/EXijuHDNVnvdEDFRua62VFetCcAnD/lOnx5hAAxUnOZlfVmNQnZwI1ZhWAOBDPMrFEXKcpGYInnjtTbTG0HO6YPD1a3AJILq+0uKoml1csej0Ai14ntxnVJpPKbA7MzFgaGwEkYoxC6Yz6S1Mr4veaj7wxPFQSfdKWHgD5ycuf1uPUUAbAqeHskS75kW6FN8t1mGCq0YQWEwCCSwmrLC8bWABQcBoknpJ80PXVZX0ZAHK7IuvLyO0K5ptfvwtCuPJqldDJqLJUqywCFVVaCMsKZEzV2kVaNdFZVXROwKp8fGD2sQGBcXdTp/zGLnLtvf2ug8TIhacm+58S8F6b9mib9grKX+i1jrbG48SJv2PJmGe9s1WjElgnT37v0pPfE5iTN9yoveEmUjNieXIN9YwG3KOCJNXJdGoyRXJOznyV9E+dvEAqCbZGg1zYgyI0EgiOkCaZ33iJvIdlkoojNWYkRsE9pP3GlF/gxjVnMpYMeZ/v/DbZOvFfv/rSzJiAjfDG21rfeLuAHMZJc7ktAStFjrT/PvU3I5eHBR6/+z5guu+Dxs0jfFTOjZF5YSssqafznJQX1trkJLECK/Crrq5FnjsxKjwPX7n/74iRZ5PzvrwgcPfkb4Z/+eth4rD0wMeIEcNfkSysj3abP9ItmCEP/3jw4R8Lir1okW8E6f89y5H9KRGYQVb4SLV7oSP5wd7BHxMjX/vIt4iRzgN9HQcFJ9Y0K2qaBZTQZIRfHiE/3pyGnB6es5FUUGClGQ7pjAcF7i/FpYJigHQR1B0gyW+X5ybDCcFzqOhUOjrJSB77R7Jhk6KNfHqSpFqaEnyq7iA8AcGckeazmhj5Ke3Vk9PD8YO8Yl442RQcL2yLqOIl1XmSOlVtJ7sb3vzhB2/+yIOC+1xOSVYEMy2kkl2sMhInXj/wVWIEWvKLePrRmacfFQS3dbe06G4lc76u0pDkWKqAaiZU3mKa9VNR9AnJfq3x4vNU4XVCsr9OQEj2bZGSiGaCsnkqnzsNGD2VK5anqKK8uiZa6OhoH1W9FEKyX2vEvWSxtm1BSPZtkYpSubkIyb4t8hWi+aaYaapvkJDs2x+T39F4D4CGvl1ljzGl6PqSbynRuM0h09sIoqsV7gyVcKfBrI9KuPfdXj4XWacVLZ23roEqml/ZsaNdq3JSquleCJd/OyqKzhKU4KRUc7S3u3wsh1uh6mXBMKI5ymnwSj9JtN8W/zokWtsZyMtHg+V0s92/XL4Oj0pPxVhTmSm6G1aIVnEEZqpvMK8qH1hOK0Vj9Kboeu807ivPGgqp6LQiefmSAfKmbQTRVQp3li+vG1Lipk6qV+gWrxsf4ZN5LaDR3N9/g1GsX0cp3BllecG985r74z8pnxPA6KjKE/J8+Xnb07bT/d76HOU/Qg3o1tR4+f7R2ThV8rrNRVFB00AlkWk0d8Wl12PnFhE1dxVd752Z8+VZQ2EVHR9URM2dZmmi1NwVJjq7QySs0BmqiZRoBjuN5j4wT7UQZigYXbK8eAqIVLSaKnkFVce+nq7yayofoypywtBpTzRwULRm/PyHyALl1xwUmru2opvmSjSaezJC5ZYxNZUvHCaiW4YSNG4ZSpAO92sMY4puyZGUV54y/w/h7p/nR18q/WH2Cquu0qSrJDmY535eKki/Mur3DW3vAe+/yBZ/iv+MzqlVRh6AaksFsesbS9ef9Req9PYqfcnG3F/dvb+6G0CQy6Wu0ANTPOeeDGQjpPjLxZXrPwD0WgeAv/thP4ATgxtzOjMSWt8p7qvyaQCmtIBNvLfD9IF3NOztMO3tMOltG7T6qvZeADpriYuZ8axlPKVWADF/8vq7awDUdhqKOwD2NDDvv0lS3AGwu04JAPorNDLXjds+Op1DDWB52L087AZw5okLcpdesamfssKlk7t0AHT2Kp19wwtsdBqMTkGwlL2iuRtMtevbzXDUSFX5DfnIVzr4vg1JsbISXt8noqlGdUnFCyYy69uAOwsg4MkGPKXJGvBk567wLkKr8fXttojMZta3sUzpwfpWBJM15FkLXXnmAIDSbUQXfcWf4j9jK5HYSqS4s7oWYf4EzZHnGABcgeEKzFCm9AcWi/wUoQFb3DqYjcFsgZzDyisFTIo7++wqAGk+v74twoDC+k4apQ91z7GmPceaijsAkEoBQGg7x85mVTGzUWoiG+iXS2TyTV++fIsUKBRKv7qmXWewkQvn6ZOlq6373N3RdHHbvxwC0D+wuv4DYDXoWw36AFyeLokCLaNwsMb1C2oYJQClk1E6meJOcZtTJFL60nPOKUpelFioTGLRhbN+AAOzifVtEYyKL/4U9zlZDuZqwQ/g83mHhweKx/t8Xp/PG0gIjMJAInPY1XXY1QXgUwfeCeCwq6vzvh4A6gaBb3Bfb/fmfY1Rx5g2Fjm2saTemaz6+nZXcQdAcGE2HlgEUNwCiAcXAVwKzl8KzpdOSeUAmKRs8ae4X/yvQXequB10py67k8MXFD63FIDPLR2+UHqPdXtd61vdLQKvdVUiB4A5pLqDeKZf/s+jHQcFNSuefWjwmX+8tHmk/Yjz878im9pYq8jo82PfVezfI1jJ1TGot3zmvJzkz75gIzubXP7e6OD3BIMt+1ta9gv+pJHV+JefIcvpPWQnjeX9D/Tsv1+g9bAJlo0Lq0fptLHmZuLE39zyQWLk1ofe4iT6T7qn4RZmaeva0PoZ4kQkyHbs337nP0y/Kqik0feBI30fvI44TJEkbaZwhEykkjJNDASqn1w2qZAK+s6sRGpXIiSZujFO9vFJTHsTU4LcYEmDQ1JPJpRFRsikG7OUVOcvTEYvTgi+570uTV+14D4z2awvECZOzKjJS8mDkGQF74tRMYxKMNOCNj5gI8Xx5259iBjpN/lDcsFn/9xjl55//NLmkXvf03vve3YRJ978wE+JkVuPmN98nSCmeuqJoZNPkMSb5wpfJ0Ywu8WUlK6vX+uIgxFKw3wFCmTyx/vu/AQx8ulHPtq2V0BW+ebfvvDNv31h84it1nrDe8iZ9vCXnydG3rab/HTvfuBNdz9IlD5JgyNDFIEI+Qa//8kTxEj/XOrCHHmYSkVaxkNjW4Lb2g9DKhAgz3z/u8/+4OHNIzq9Xa8nJ+2Hf/JDYuRrTz065p7fPHJ33dF76shqTvd87YvEyLvqya5tVQfI6NHTK/Fnlsn17JUvkb20jt59LzHyPsMbiZHmA/rmg4LrSy66JQOkibaNDUVI9m0xdkq0duk8Q1VheLWfrmg4BQjJvi3yWioHCCnZt0WMLH501ZDmdrprVY6ycC4FCMm+LRR0JQoIyX6t8ZNHqbIuadB7TLw8IJYqACMi1mI0rjwqh+3uN5XP0mJZunhjnqqK8g4jEy3vOXzvMbKJ4Lbw5MsX8uKc2zARthHuP/tnUgHfivYjVOm857YwXq8pRv+07b8ZK4Pl1wlpnOpSA4+TDT+vGs0H28ofRAcO5W++yiBa4iIlnJby/vRIjGotyWnL+0YpOBS06NpFlR1NqO3bgla4i7d4EWr7tQdVhMkzU76+Aq1wV5aveBOLUqmGhNq+LUYWyDqOV42BBbKJq7jYRrh3UmjulPjY+0VjQHv7yzdJ6XCIFm+UxaiEe0UvxSJX+TaaS029Wl7B5yRUFEAJRCvVkA2Ufw4Zn2jUKRGhSu6odg9garF8YHnwZapaUbR1Ginw6397uuwx/gXSq7At1uIUmjtPFV2nQV6YXPanj6N7pBS4a+8byh4zujhPcykazZ0S3sJVUmyvbWj73EUKYUSXD9X74c6yx3TSCfeVy+ULTKacoslHEd0ylOApNHdKaJp3tNKyQUfFfJDFywvulJpKQBIO92uNnXfLtO4t37HPVmuluVSbUzRCoXdatB6qNNDpqXICxjzzZY/pqKmjuZRCLxobbbeivO3FU7plaHzu7Yep3DJU4Kj0vh32uVOCyueuo/K3TNNo7qxoyTsrEdEqdko0okUCMnSVIwt0xeZo0BAXLenh9am508BeRyXcqXzuDFWlwx32uWcy4jnpdhw0PnfGs01Ai/n8Z/tmX9U3HIyGlhUmVya0rKgwZWoqXL7ZZQD2BpdvdjmiNUS0JbJdbqEAQDMpNWSUAGp7LbW9llcem6zttfRPX1q/bo3JDyARUiTCyuorXaSXxgJBptLZWNfZIP/ps7HORgWAkZlMbddJAI3OGgAznsVGZ004agxOl4wyc5M6OJ2stLFKBRPzxXV2rXvIW9ldwWmli8aIvlCiJUUlSWfa5kxbfzHifXvnht4tv84LIL2QB6CslQLQBXWeS4KAsG2fmZHx3rC3wlg6MZ0w5Aalvv5Ze1+Dr38WgL2vYTw2C0CRzwPISKWKfH5huSqdKLmSW/fr2vbpMZO74zY5AEgjyBuQM0IWBnfl15kaACA0W1pW9Rvr6JMn0k+9EAeQt5SyTu58o+3OmzcOSBYywSn3z/7h0c13bml0HfjoXXKlTn4lMyseWnn4o18fP3tp82Hv/OR77vzYbTKlLZf2y5Q2AEMvvfCt9/4TAFtbq398orj92Ll/hBBfPP7EhL/EQG21ySb8ufve3HDfm+sBJFlezTHF7f2PnBv1xwF02LTFnR/dWuOLpAFoFNJEJg/gK0/EAZjUTCjJF3dstQmJvfSW61WauVTiqNW0r1IJoJDiJSqmkOIBrPVudMzhbRYAusF+WSQEgI9zjJYFwDXt5pt3lw5yu1FZKYlFVRNj6yfGuZiW1bXeKbD0VXknImsp1qxJhxNKY3E7edHTUC0FMDadaW9SABgdS7TYcqrGjtTMRjgqJTGNeze4uW0VsnQ0x+VKqy+vkTOJrKPSEvBnO3oqRy+7Ry+733Ff3+hldyJw3tzgCs5upHSFZYauXe3Dl0p327WrfezsCABbld2/UtJprE4zX5CuLm18wz1HK8cm/wjAZmvy+6dttiYAv/hmaXXMMREZbwBw7K97ANgcRv9quKO7bnRovr2rrr2rtLSfeWXu0NH6dIz3jhfOzq26jNoqk2YllACgVE8PDIYB7O41DgyGd/caR1e4aIqPeAIGp6W4bdMqel2WzCZSY/+I//1fPFncv+5IxclTXgD3HqgYOeu/5xOdP/2Xkc4DNgDv+PzRziMu74QfQEWr7dKvRzr2N+Q8ycFhP4DeLhuAOU/sxPlVV7VqeSkFoLjT6CsZbY5mfnWKAZB3/A5ARVOfd7q/uDN/MZuOdmeyqwAUckc0PqgzN0I3AeFiUaO0eS4PAnD29HouD+6574Gf95+sGXUDiNh0Bn8sYtMVsklLV8/U0khzdSeAqaWRnqq9iRmBn2SZlT5zcmJkId1ZqyxuAbz08AENZD4k7FdqtPqj0tNrySaF/uno8pv0LgAjnEfnKFF+rdamtbVpAKpzJT19IRmpVRtOBBbUK/HJ2UtGvTMc9QAw6p36psqUsW18NtbWoBufjQHoNcZ0BvcMF29ktcUtisIdQmjCIPLJNwv3ddh+S7JcVC4yuuibI88KMpVBkNpuUbhvBuMhbckGF6k1hPXZsF5wo0XhThxWFO6bkbqYKoQFHjHeludtgpHCKpt+mgwABluJAXjnHevCvYg7brHf8aYtNqBkjBzZYjM9Oaj75aBAiySEO4Azv3rx1V+9RJx4z/dJat037v3EVuF+z6feu3lk/NWhr9/7N8SJW4X7l9/37Oh5wQNcF+7r6F8If2hLYPmfDpMP4eXh7MvDAsXcWpu01QmkbbVGfk8jyX7zHrmJGFEtzKoWBPqaQLgDAOTuZbmb5OQRwh2AKiBjs4J3kc4UMlnSQsqvhYmRlIRUUTcL9yIclRaHUzAhV+bH3XNkGC0sIz8TNscznEB75woyLi+wkFjtqkRLOhhfeYzUiOuOOGoPC3xrm4X7OubPk6mzgQzJEVoO8stBwR/YplW0aQQEp83CfR09ID/eLz319s4jrs0jWU8q5xHW1YplBqbJYIDiAmnoF0yv8nJBVkQ62pOOCqxzXh6HmXzsskmSO1jg5LzwwwwaJEEDefONabIT3id/QlK3P/euhs+9W+CC80elvpjgDUY0voiadEisC/d1DI0PDY8LOMopY2vSKBBGCkXMbCdzj67S5y4PUHmUZBR58EoNlfctFBXNIyExlb95iUO86iVs+VAwAEKyb4vqtjqaSxGS/Vqjr9ZIc9g8XREhGhCSfVsUdFT+FkKyvxYQkv1ag4tThUMM1eUDUekYlRuIkOyvBYRk3xbhuGgufiYrGtVCxVHFCI92U7mnaEBIdnpsM7NpykBlLaJ9qAo64a5SlI+heZRUYpQGhVXxQs18+aRtAG2O8pE9VxupMmyLO/7iveUPokPHvvISpH8hLNavc2mpeO45Q/kvh1K4c/LyAotLUoWFZNs1TCCgN1J5t2mULkYumh85TVcVUq8SjYD0s6+/WvYYI91kYHKiFe+jKUinpBPuNFBmqegDdmv5bzCToQuopilkkWaSKtYhp9DcMwnR2kVSohAqvzIVvDtdIqO9orxwXx4XjWNLidHz5ZlFFxaouqXTYJlOWcsbyzPK5W6q3uU0sVlGJlrEWEcp3HfUBoCSriokjXDvH6ZiVe4weDkVs4imIF2a3WniL41wVyi2CaheWxGWoKg9SwklhebuTFMx9AmH+7aQ94rXTpEOY97yOT6Umvuvv/2DsseMn6UqskY43F8nkIbLM38pNXeaZFdK4V7IlV8nYuGdln3G6vLqIaXmTuOW6euiWr3u/gzZjeeqwWnKd3qgdMvw11geElDkqDR331p5BUu+XQdw6dx3OgEoXQllVTK9ogaQkIe19kQgzFqMXCDMAsgHC6lCGoC+tmR4umpzLYeZVECqsuQBpAJSANKDb1E0tADIzE7qb3oLgNoXXzK3tnsHpwD0PvjmwR/+HhGf/4oVvxaWWo358fnoyAtGvT0X9cn09pJL6F2fql2bS1rrN4wIyXTaVVV6Q4G1REubbXB2qata6U6kKzXK/tVwpVYJYM1XUFfnc9HSS8pF2CceSgFwtZbu/Mxvsn3dcwAuDs857SaPL+S0m9i8E0rN6izXe7N08Nl8781S7wy3+4F5ib90A6xfzdmS5lS1SlH6DFIZRqXgMzWnskETALk5FJ+q1zbPyc03Q2IEe2X94CRgch/9XXzAk9/tlO5xSi968gAeflc1AGRzACCXIZv71U+ErVXs+iF1PDI8U9dinp8M1rWYX/7t1KFbXcf/8mZ7s8M3tQrAN7lqb3EUvGSkKLv/uwA48x7p9P8ujqQtd4Sfe0bZ0JgPhaQmE4BsUpI3tAEoKGwAJBl/QWH7tz8fadhjmL0YmbsYrd+jB7C3Lq/VhP1RhU2fKW4d9oqJqBJAIC21KPOBtPSO4x1tN9T2dth/+PORXR22S6N+ABZNKQM2E5MqdHkAFS9zMllOy3Bxno1zrJblKg2a4y2lwrzVOt1ptzseMbz7f1nbq1IdVanRFRWAjqqUwXMGgH90o4TO7n0tlZV1eaVJmi4ZDfNLGu/5kcWhjfBUTbe9iztd3Feo9Qq1AcBLD/kB2Fp1/olYcZsySyQZBkBVlWFlJbLijt5xz4HKumoA2sqabCySjUX4XCG07wNMYAnBpWJdKgSX7CZDJiyI42k5ucJSUrKSqx4Apr21nH+J15XcC7xOpnFr8wMlzisHNc+rGSZpc6YyaU6hZDNpDoDOJE3GZQByyTwAlUWVT+Xy8YxCTdh2qgLjkssNCpk+llgCIJcb8pFpAIxMyefSALhcpqbpLoPZDCASDAIwmM0r04mXfn2x/xV339ESL2s+GGCVbEOVQDcaG5y8PBgA0NNr+Y/HJ3p6rMf3W9OeEACl05T2hJROE3+oTtdWAUBuVWXXUgBu2WX6zPLGAvaZD7V9/ZFxPo7mbnNTt+UPPy51LPrWW//20L1Hz/zklUP3lvp3ry7zz/ze09PruDy4CqCn1zExGuY4uaNGDWB1MVnc2dOogp4DgCgLPcfr+NP/WRILZ85qDh1IABhfPpvMbPipF9YWr2+/buEXkZ59utWVDABHlQLAjbfvAqCsrUgvlDQY+2rUYjWurUUBWK16AGErHzQxfl8cgM2utdu1Pl+8W6oPruXMVllwLRdcy5ut0vtWBMr1gj/982eXRkZKs/H80Fr/8NqdRyslV8idruqi6ZkEU4ohx8czALRtihNnnndYTQAqrCXfo8li1FsKFqs6sJYE0NxqG+GNTZ+t9o9lAPjHMh136f/9C4HUCu9J8E4NA6C4w7xbI+gFBSCiDUe1AkM7H5QWhD2VDh5lDh0lVR7+nk8TI/VGjUYucESuvnzK+8pp4rDvfJdUZ77x4iFixB5mlTnBb+S4JF8g3Y7TW5rZf/mDZF0tlhkABH8gm3eyeUHZa3Nddv97SPWwOkKWEpuHNyHMtz7esvvGVgFt46I78edPkezmM58lL/XAh6bPXRQYj10tmq4WgcbhbFTd/hGyTHlqYp4Y+dWF9l9faN888vbuybt6BArO5ELmW4+RIYrgCvlOnY6FSrvg+uraA5raA5tHapvZ2mbysftPPkWMXHih0P+8wGY6sN+6f7/AYTq2LPnqz0hP60H+SWKk66YDXTcJ7uHccyvnnhNwYypt2bceJ+dVYG5LYVENk9UKbv7wLQeP3HKAOGyR7SBGHAXSpuFlGp4VElpkQVYmmEXRxcjoE5eIE81tpF8rzyk5oYtAgiTLCMgkLDQsSBfqY589Q4y85//8RV2fYLI9/s2zj33z7OYRa6fc1kUaji/+J5l4cUuLzKkVzJCbPnj4pg8eJg6TDJDF+wY9pK38wo9fXF0UEEXGFhVji+Srd9nIYvQP/g9y5PTTq6efFryLONxxCCaDEho7yPSxr7xEij77atjhC28eCVn5sEVg1sjyMEfJ2f6jny4SIyMTayPjgu/LZZZXC9uPGBKscUse9ZSd9Bm0tipb2wTUxPx19YUjAiN+fGjt63/zCnHiNmZIRl4+F2N5QbQsi5UV0Wq+p3jRAj7Bebo6+uJh/17RAvo0mJwXLTMzEhBtMrS7RAvUu/1Ub7AgLz9nIoxoBTmiS2GxLsVjp1vHeWMUoYAYVfkBe41oj3SJohtfGlTBZ22ivD9dnqMSMr410cLdhGSnx7bCvfxfuExXdYpQ218LlHTPlA7lA4BNb9jpknvnLoj2GyfcdFE7Cug0YbEu5Z4VLUpIqO2vBTTCXcFTFTkh1PbXAo4issdANP1jbYRqnajQUbikdVSSyLdYntimkFF1bqGBAaI14MzKqFQZ/1r5OZOhu9RV4yoDCC7RctfFhIqhfFjliXQiau4X3aKt4ZWNVKxKGtx2jCreSCPcw0Gqx+6e29Gc+r7O8hWGKZFhRHvslGB3tkksJag0dzrQaO6ZHJVxWd20oyYvpeZus4o2Z9bWypM72rq30ee2Ee6KrGgUl0RWNCs7oi4/sSjdMjxvfK13cwUaiurVeyqpYuI0bhn3DF2dPApMLlB9ObGEUazfWFkvmu3lm6WiOdJAkhVtyWHy5U1efbWR6lIo/+FwdK6G+fNTNIeJBjq3DA30aqpu6TRumQzds6IBpea+wxgf2oaFJb31L+vW/zH9arjpoHFiJuUZLNVCCRQYAHUdEp2w8Xkmw506jepqAFhaAoDqahyXpQBM5VTNstILnjsxPjew2Hy4Y+p0qSiHLhfSV15ZZGJJAK596gdbBQ2VAAw9sxEP6X5j1dAzK/FgOh4syaPdHfaBUd+eXtfAZLS3SQFgcDoDwGnIKnWkgdnYW3INhbycqYIFkAxWJEIlSZrN+uRyu61Sa6sSzCSNy1eYF/iUGCP/wnkyPMBKZazUoHKkU6slKb+gzg5XCYI54/HMsVYJgJcnCseukHYGLscrHHLvanZ9Oznru/OtmbEJCYD21gKAqXFNvFAK+ssZR5ZfnZoEuC7iHnhGwvAFnpEBXHG1rpNNZ6TBuXzrDcrfFI8x5wvLIcH6r9Zjj4vxRHnnJo5zxEUmQ2dSskDYGAxv2Drdlfla2yiASMJm0PgBQGmf/anH1FkRGvECMHVWAEhryLIWWf7VRIGXs8hyKG4jUlVQLWCs5/O5//FF7cmX09cd21g1h350IBksSXO12ZUMLlvMsOBsElVqrBS3aoZhJAwAngPDgufgiRsZ4xXzK52EUg3AHQ1X6tPuqNIdK3kPLMmgxSRw02VSfJZX57BpSWbw9LcfazrQO312IyP/bQ+U+uMoFMhkoFBgZnhi9MJ8cfCuj9/x5Hd+3bG/ouNAqVoRI+P4HMulpKFlMhLr3WRj2ColfnfBYs37hdkDBZ7Zqr81tvAAVDomFSvtZGVKABIuX7jiIzrx8spS8iXBaVLF/Z8j+y6FUzNNLh2AP5713HLACaBdrwJw/oJn315ncRvjeauOSS74AahrbckFv7rWHpmLReY3/gAmlvrh7wW5+Mf6bKdfEhAKWrqr9M0OnBw58t7jSwNz1bvrARR+41ldEjyZRD4zn8gbZapwLgWguLN6Iq+tNcQXNjIfq1qTVa0lJXV5inc1M1y8mktUT7qXWypLebBJpGTQA/C707ZKZXFn6ZJn+UqNKdcuJ4DZy2QeydGbnS8/vkFjq+01jQ6GmNSGTryrw3RpNOQd8xls+og/CqC447DLGEZp39TasKPH0dFLMtaVw1HvZLiixbg+8offLwAwq7lgsvRbbq3Q660bHr9MPCPVKiQ/FGSB6Yf811Vprq/aMBf+/pyfCfq/T/y+333yD55BwVOu2a+u3i+wMqanuG9/m5R0L4y8nxj5wrsfGT4reK977+jZe0fv5hGLVXPd9WQg+69uJrtPnZ7wBIRKAauxsRqBcec0ZG/rJlewvJnkhs/36xJBwVrVdohvE9JzguPLwQmyMsmjJ0nbZ4+JdAt23KLsvFWgzkt4iYYnlXfPAOn26ewmNaxLwysDw4J7qGtqvf9jZMe+xOwF8uJP/II8xlaTsJKuNMksqWQlqkjy+//80m5i5K3vWHvr20mH6XOfIk2K2r//PDHynQe+Q4z03tjSe5OAyHHoiOTgEdKa/MRtpFg71vsLu1HwZP74ouuPLwp4RH0HZfsOkql2X/0GqVwfbzhr1wgYjXve+b7d7xSk+E6dHfznBz5JnPiNF39EjHzvr787c1nAZ73rIzfc9ZEbiMN++mf/QIzMpUk/IRuJICdQU1K8NiWcRSazfPdBMgHw+d+RpO9prySeEdhM93/upgc+LyzXE4vyU2SLHnaLy/t3bvKTt7gTZqHX8dFXZh59hSwOcRAkU/u33L8SI09+5dVffFUgsIJ8/jxHatz/3Ubaf21fIh+CcrVauSoob5BkUksgrb0XHyFZPcweIzHCe5IQVrxJgJnb0kulC6QPsPuGOoNd8L4OvKV1/22CArGFUDp5kXwyu+8m+U5//4XK7g4hW+bUZOEkKS72f+ZWYmSn8zC3wmoVrXm500DlaiAk+7VGgRHNNzU/fW1bt1wdQjNUrtiqtvLlRHYevnj5ZNfmA71ljwHQ0FO+crqICAXFY8vEtzRx3Q4WuWjiYugl0ZxFyXnx8g09omWfRnyiOYKGRst7unQ120zjbd6Ws1e0PhWdB8q3JlgTjzNECY25PPlSZaWKN8byojngEgnRIjBye/loFRuiYqC+4Q3lM1RDM1Srl6t9S2vjq4U/Un6dcC/vaItHALOXSU1wK3wTJCH6qmEyi8fW1YpW1H5XrWgFs0I8ldROzu/0ixYLEpNo0c1tcZVL8dQUlVAbOUsm72yFiJr7xUWqCAwNKIU7DSQ8FR80Gb9KNutWKCiEO6/aaaNteYx0c101bIbyAVX3MpU90eUQTYUkfDKvCbnyWrnRQmWAEj6ZP3EQleYe2FIJ+Zqika5Hm7pONL41tKKRWQmfzGsB4ZOhxzZf+MXHLr2me9mEYQrhTokARRTeaaAyVF+fbhm1tvwfeOyWO2gulfGVJxEzKaoPdX6+PIfH1Cja13XmFNWzIhzu1xpTm+Korx+EA6Jl/1Fq7jRumUviVZEL0mnuNFDzomlOGvG6ZBVCVF4gGuGur97GLSNdnXKvTq04mqsczZWXf3/e0VwV5tgwJzGyBSPLAQhzrHuCubCWb2tmAYxPcW3N7PgUZzVxNlPBH5LYTIWxWZnVVBIW3uklz3Qp419pVKfDSdOmJl7h6bXi1thkLW7P/na01qVS6fSpWFSl0wc9K2ZnlcWmBGCxKSdHwy0dxoA/3bPL2bO78j9+cKFnl/PyJU/PLqfTZhma2pjfvqQEQJrXSpksACmyecilyAKQxkqxdU6hBNBxvcxWaZgfLsVJFkaShXgGKIWPCrEMAIlOYTx8UCFZWz0x7bi+KbEQBNBXrwDgDhfcoXylSQrAoGW4OPQmNhri9CZWb2QAzI2m6jtUc6MpAMUdLcc2dmhnRuPF7cxYvK5CplOTTUtefr62rqF0q/OzhlAijitZ1DK2NKUYHjyzEdDhgVQsrdJt6Dj5bF5/6EBseBRAZmYWgKKxIW+tSbiTAKBVwBuDVg4ZGHUeOZbPses7M3OlZW9mXt5YlwWgZNbskliCK4l4DRsHZPFspVbuzhZ0ckksnq0MzSwAkGhlfJbjsgUArFwCIHLxhGHP9RnPYtqzYNhzfVa6oRZlpbKMVJaVyiSJYEFjlvmnOLWloDFLEsHl6Q1N09VkX572GQupNKtTcrE0W7LMfOGqhuPHAMQv/li7511Zz1BHS25xRbAOdbZqNFUlF4HWVb169pSmquaB+6KDl+ODl+O9Pdri1tlQy6ss3uGLFV2lfk+OxioAfNLHpVYllm4AfMp3+8ffPXmuFG2eOHu59UCPlAtJ+GBG0ijlQnnWBKCxp7HtgGvy4vzUhYXmvbXFt8Nj42Vx4H2TixzDsPyGgOAZBoBVxyezDIBkBgBUGtn0Wq5KhXMhVCkBQIpMipdF06Wokl6psEoVAOQKaTaTL+4UChyAVJ5JFxgAJgUHoMLAhVKFQoGVSLhCgQUw9sro6CuVAEZfGX3H598BoCCT59UaAPJkIqvWSHI5SS57+sRcda1xaSF8+Pr60yfmAHQd6z6xHOqprVkNR1YjEYfBYFMocimBhHrfHS3qasX5y/59PbaHf7QRpG0/1A5g7MxY+6H2sTNjoWlPrF6tq61aOXGu6vr9sYWVxr0WAMXtzIXAmz7QfP6C7/z5xHtvqhqY3QhXOvtynhfTAJw3KONz+dh8PjlfqN7bBMBqrlsLzgNIrJZUhCQTBaDm9WusVyvbWCqyBTZbYBt2CQTi7KVgw27T2lwKgEEqieQLBqkkIslyDoFs9a6m7awywecBaBipj0trGCl4MEo5q5IXQnFGKQfAquTqClnSm5NpWZlWkvTmACxeLAnGxQvLNXtdqNRNR8IAKhWl79edSe+xZz0JCQBPQlLcHxpNOcxJe4XF5w0AsFdY5rWWgjYAoC4enNeaAVQB04Fok6W0SE8HogCYf3nnPghxdloXjAt022CYCYYFxl1vfb63gVQcNCypOM+ndIm84FKsN8p6SRvwY0++iTyxnyzybqg2GasF7rzh52eGniMN4ayVNOWsWZI/03xDV8sbBITCpXMzS2fJS9nuJ8Nokw+R8UxWxbMqwUp+YSh2YVgQOmfBMFvC6+96L2k9LM00L88IqCMVrpNOl6Cpzb7jd+678U7ixMVTzxMjmpZGdYugn4vnjxe9fxCQahiVROoknbYf+lqYGGmXhvWM4EV3vaWm6y1ku5+pR/qJkayG1Ep+M1OxEhe8nRtaUje0CuwVTsKlNeS84idID/vhL9xn7RCSfyYnMSlwsORM1uje64kTzSO/IUY41yGoBPVtuLXLhQCpqmta7ybvavYJYqRgbuNkAhVYLmVkUoG2659cfPEh8kSNkVSc/zCe+sO4gKRhAmfaUgj4a58layt+6xGS9MIU8uAEj1SSjUuygkDX7R+/8faP30ic+MhnfkWM3PjWIzfdIeBQen2zqz4BfVBSgGqLCarcvZ8YCQycCgyc2jyib2o0NAkmbS6UjvWTzZKWjKQbbe++B01mwWToP/Vi/+mXiMMqVaQN2vu2TmLk9PeX1uYEj71gQMEg+MBjq/nJF8gwobyGFH3732I3OwV0o8jAanhAwI1h+qrQR5bKYX5A8p1yZh8nbOPuNTR79AJZoeai1VmyG9S1dbyqJeUNq6qu8nQFSvAy0f4cWYVoaW87n/Mgs5R/pHxKtDBUyi1aLihbEO0N5k1UNRiYZKDsMXILKQW2v1S2/HOwtZRvDE2JhhrRwkITZ0XrEyARL7opo4s3hoLzYv1GS51opIYgBfGGp/twWIqsUjW3TdTkKr+lwTmqyEOyIFqAQmkQLbJsqROt0ARD8fdR5mVGg+Ubypx/4Zd0FysPRiVeGEo85BRUbtbAaPnaRtKQaJXTswFSJ7pqjDx1qvxBO47WA1R9AmiQFY/Ck6NzSdNAJ9vpOoAigqMo9pVkt1npr1K4V5iownGET+a1QKkvH1Vg6JpYiincKXKRKTX3aKi8ul1Z3172GAAyi2h0NMInsy2S4mnusgyVNmDpIBOyrhq8uvyaKlFRlTDk5eXJWvZWsmLztphaK//Y6yk1d768Lv3Ud0jP3rZoaC1vdsjpyPdJr4iU0Lqyx8Qo2EcAAvPly3vE6FoBN+2haDDnpiIpET6ZbUGruRMO923hDf3/z37aioJaNEMh5xWtRqOI1Sx3HlG+/GRQV4rGQBURlG6Z/7qYW6QSDTRo2V8+H4UShdejQQgFS3VbhMN9W+js4v2FlaI51tak22R+XKWM7q3f6S50NGDFaz8vos+dE8/rXlXfVv4gIBcoT0cT0ecuIijdMjRQzZK9Jq4aErVopp6IPndazX1nQSncUx6yhc5W5ENUNciIaOq2yHCvx9kuIrbV3P8vPjScT/RGsL4AAAAASUVORK5CYII=) <br> Beirut Skyline by <a href="https://twitter.com/gorillasu">Ahmad Moussa</a> ``` #@title Setup #@markdown Please execute this cell by pressing the _Play_ button #@markdown on the left. For setup, #@markdown **you need to run this cell, #@markdown then choose Runtime -> Restart Runtime from the menu, #@markdown and then run the cell again**. It should remind you to #@markdown do this after the first run. #@markdown Setup can take 5-10 minutes, but once it is complete it usually does not need to be repeated #@markdown until you close the window. #@markdown **Note**: This installs the software on the Colab #@markdown notebook in the cloud and not on your computer. # https://stackoverflow.com/a/56727659/1010653 # Add a gpu check # (this can get better over time) from google.colab import output nvidia_output = !nvidia-smi --query-gpu=memory.total --format=noheader,nounits,csv gpu_memory = int(nvidia_output[0]) if gpu_memory < 14000: output.eval_js('new Audio("https://upload.wikimedia.org/wikipedia/commons/0/05/Beep-09.ogg").play()') warning_string = f"--> GPU check: ONLY {gpu_memory} MiB available: WARNING, THIS IS PROBABLY NOT ENOUGH <--" print(warning_string) output.eval_js('alert("Warning - low GPU (see message)")') else: print(f"GPU check: {gpu_memory} MiB available: this should be fine") # patch for colab cuda from IPython.utils import io import os if not os.path.isfile("first_init_complete"): with io.capture_output() as captured: !pip uninstall -y torch torchvision torchaudio !pip install torch torchvision torchaudio with io.capture_output() as captured: !git clone https://github.com/openai/CLIP # !pip install taming-transformers !git clone https://github.com/CompVis/taming-transformers.git !rm -Rf pixray !git clone https://github.com/dribnet/pixray !pip install ftfy regex tqdm omegaconf pytorch-lightning !pip install kornia !pip install imageio-ffmpeg !pip install einops !pip install torch-optimizer !pip install easydict !pip install braceexpand !pip install git+https://github.com/pvigier/perlin-numpy # ClipDraw deps !pip install svgwrite !pip install svgpathtools !pip install cssutils !pip install numba !pip install torch-tools !pip install visdom !git clone https://github.com/BachiLi/diffvg %cd diffvg # !ls !git submodule update --init --recursive !python setup.py install %cd .. output.clear() import sys sys.path.append("pixray") result_msg = "setup complete" import IPython if not os.path.isfile("first_init_complete"): # put stuff in here that should only happen once !mkdir -p models os.mknod("first_init_complete") result_msg = "Please choose Runtime -> Restart Runtime from the menu, and then run Setup again" js_code = f''' document.querySelector("#output-area").appendChild(document.createTextNode("{result_msg}")); ''' js_code += ''' for (rule of document.styleSheets[0].cssRules){ if (rule.selectorText=='body') break } rule.style.fontSize = '30px' ''' display(IPython.display.Javascript(js_code)) #@title Settings #@markdown Enter a description of what you want to draw - I usually add #pixelart to the prompt. #@markdown The renderer can also be swapped with other models such #@markdown as VQGAN or CLIPDraw. <br> prompts = "Beirut Skyline. #pixelart" #@param {type:"string"} aspect = "widescreen" #@param ["widescreen", "square"] drawer = "pixel" #@param ["vqgan", "pixel", "line_sketch", "clipdraw"] #@markdown When you have the settings you want, press the play button on the left. #@markdown The system will save these and start generating images below. #@markdown When that is done you can change these #@markdown settings and see if you get different results. Or if you get #@markdown impatient, just select "Runtime -> Interrupt Execution". #@markdown Note that the first time you run it may take a bit longer #@markdown as nessary files are downloaded. #@markdown #@markdown *Advanced: you can also edit this cell and add add additional #@markdown settings, combining settings from different notebooks.* # Simple setup import pixray # these are good settings for pixeldraw pixray.reset_settings() pixray.add_settings(prompts=prompts, aspect=aspect) pixray.add_settings(quality="better", scale=2.5) pixray.add_settings(drawer=drawer) pixray.add_settings(display_clear=True) # by default we'll turn on textoff pixray.add_settings(vector_prompts="textoff2") #### YOU CAN ADD YOUR OWN CUSTOM SETTING HERE #### # this is the example of how to run longer with less frequent display # pixray.add_settings(iterations=500, display_every=50) settings = pixray.apply_settings() pixray.do_init(settings) pixray.do_run(settings) ```
github_jupyter
``` # Require the packages require(ggplot2) library(repr) options(repr.plot.width=15, repr.plot.height=4.5) ladder_results_dir <- "../resources/results/ladder_results_sensem/140" bootstrap_results_dir <- "../resources/results/results_semisupervised_sensem_7k/140" lemma_data <- data.frame(iteration=integer(), sense=character(), count=integer(), experiment=character()) for(exp in c("bow_logreg", "wordvec_mlp_2_0", "wordvecpos_mlp_2_0")) { data <- read.csv(paste(bootstrap_results_dir, exp, "targets_distribution", sep="/"), header = F) names(data) <- c("iteration", "sense", "count") data$experiment <- exp lemma_data <- rbind(lemma_data, data) } for(exp in c("vec", "vecpos")) { data <- read.csv(paste(ladder_results_dir, exp, "population_growth", sep="/"), header = F) names(data) <- c("iteration", "sense", "count") data$experiment <- exp lemma_data <- rbind(lemma_data, data) } lemma_data$experiment <- factor(lemma_data$experiment, levels=c("bow_logreg", "wordvec_mlp_2_0", "wordvecpos_mlp_2_0", "vec", "vecpos")) levels(lemma_data$experiment) <- c("Naive Bootstrap\nBag-of-Words\n& Logistic Regression", "Naive Bootstrap\nWord Embeddings\n& Multilayer Perceptron", "Naive Bootstrap\nWord Embeddings\nand PoS\n& Multilayer Perceptron", "Ladder Networks\nWord Embeddings\n& Multilayer Perceptron", "Ladder Networks\nWord Embeddings\nand PoS\n& Multilayer Perceptron") p <- ggplot(lemma_data, aes(x=iteration, y=count, fill=sense)) p <- p + facet_wrap(~ experiment, scales = 'free', ncol=5) p <- p + geom_area(position="fill") p <- p + scale_x_continuous(breaks=seq(0, 20, 2)) p <- p + scale_y_continuous(breaks=seq(0, 1, 0.1), labels=seq(0, 100, 10)) p <- p + labs(title="Population percentage per sense for lemma \"limitar\"", y="Percent", x="Iteration Number") p <- p + scale_fill_brewer(name="Sense", palette = "Accent", direction = 1, breaks=c("limitar-04", "limitar-03", "limitar-02", "limitar-01")) p <- p + theme( plot.title=element_text(size=15, face="bold", margin=margin(10, 0, 10, 0), vjust=1, lineheight=0.6), strip.text.x=element_text(size=10), axis.title.x=element_text(size=12, margin=margin(10, 0, 0, 0)), axis.title.y=element_text(size=12, margin=margin(0, 10, 0, 0)), legend.title=element_text(face="bold", size=13), legend.text=element_text(size=11), legend.key.height=unit(1.5,"line") ) p # Save the plot ggsave("~/Google Drive/Posgrado/WSD with WE/papers/esslli/plots/limitar.png", plot=p, width=15, height=4.5) library(grid) library(gridExtra) options(repr.plot.width=10.5, repr.plot.height=18) ggsave("plots/population_progres.png", plot=grid.arrange(p1, p2, p3, p4, ncol = 1), width=10.5, height=18) levels(lemma_data$experiment) ```
github_jupyter
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/4_image_classification_zoo/Classifier%20-%20Weed%20Species%20Classification%20-%20Hyperparameter%20Tuning%20using%20Monk.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Table of contents ## Install Monk ## Using pretrained model for classifying weather type based on images ## Training a classifier from scratch <a id='0'></a> # Install Monk ## Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` ## Install Monk Manually (Not recommended) ### Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git ### Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` ### Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` # Used trained classifier for demo ``` #Using pytorch backend # When installed using pip from monk.pytorch_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.pytorch_prototype import prototype # Download trained weights ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MkDsHcgqtnt3ZzfwYTuEsCd4buDSe9-g' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MkDsHcgqtnt3ZzfwYTuEsCd4buDSe9-g" -O cls_weather_trained.zip && rm -rf /tmp/cookies.txt ! unzip -qq cls_weather_trained.zip ls workspace/Project-Weather # Load project in inference mode gtf = prototype(verbose=1); gtf.Prototype("Project-Weather", "Pytorch-Wide-Resnet50", eval_infer=True); #Other trained models - uncomment #gtf.Prototype("Project-Weather", "Pytorch-Wide-Resnet101", eval_infer=True); # Infer img_name = "workspace/test/test1.jpg" predictions = gtf.Infer(img_name=img_name); from IPython.display import Image Image(filename=img_name) img_name = "workspace/test/test2.jpg" predictions = gtf.Infer(img_name=img_name); from IPython.display import Image Image(filename=img_name) ``` # Training custom classifier from scratch ## Dataset - Credits: https://data.mendeley.com/datasets/4drtyfjtfy/1 ## Download ``` ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ" -O weather.zip && rm -rf /tmp/cookies.txt ! unzip -qq weather.zip ``` ## Training ``` # Using mxnet-gluon backend #from monk.gluon_prototype import prototype # For pytorch backend from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # Create Project and Experiment gtf = prototype(verbose=1); gtf.Prototype("Project-Weather", "Pytorch-Wide-Resnet50"); gtf.Default(dataset_path="weather/train", model_name="wide_resnet50_2", freeze_base_network=False, num_epochs=2); ``` ### How to change hyper parameters and models - Docs - https://github.com/Tessellate-Imaging/monk_v1#4 - Examples - https://github.com/Tessellate-Imaging/monk_v1/tree/master/study_roadmaps/1_getting_started_roadmap ``` gtf.update_batch_size(8); # Very important to reload post updates gtf.Reload(); #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ``` ## Validating on the same dataset ``` # Using mxnet-gluon backend #from monk.gluon_prototype import prototype # For pytorch backend from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # Create Project and Experiment gtf = prototype(verbose=1); gtf.Prototype("Project-Weather", "Pytorch-Wide-Resnet50", eval_infer=True); # Load dataset for validaion gtf.Dataset_Params(dataset_path="weather/train"); gtf.Dataset(); # Run validation accuracy, class_based_accuracy = gtf.Evaluate(); ```
github_jupyter
## $k$-means clustering: An example implementation in Python 3 with numpy and matplotlib. The [$k$-means](https://en.wikipedia.org/wiki/K-means_clustering) algorithm is an unsupervised learning method for identifying clusters within a dataset. The $k$ represents the number of clusters to be identified, which is specified by the user before starting the algorithm. The algorithm goes like this: * Initialize the $k$ cluster centroids. * Repeat: 1. Cluster assignment: Assign each data point to the nearest cluster centroid. 2. Cluster updating: For each cluster centroid, average the locations of it's corresponding points and re-assign the centroid to that location. The last two steps are repeated until stopping criteria are met such as a maximum number of iterations or the centroid velocity drops below a threshold. The results of the algorithm can be highly dependent on the cluster initialization step, especially when there are a large number of clusters and data points. Performance be improved in a few different ways such as running it multiple times and averaging the results or using different initalization methods such as [$k$-means plus plus](https://en.wikipedia.org/wiki/K-means%2B%2B). Here, we will initialize the $k$ cluster centroids by selecting $k$ random data points. Mathematically, the cluster assignment step can be written as: $c^{(i)} = argmin_{k} \left\lVert x^{(i)} - \mu_k\right\rVert^2$ where $c^{(i)}$ is the centroid closest to sample $x^{(i)}$ and $\mu_k$ represents the $k$-th centroid. Similarly, the cluster update step can be written as: $\mu_k = \frac{1}{n}[x^{(k_1)}+x^{(k_2)}+...+x^{(k_n)}]$ where, again $\mu_k$ represents the $k$-th centroid and $x^{(k_n)}$ are the training examples assigned to that centroid. First, some imports. ``` import numpy as np np.random.seed(0) import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina' from sklearn.datasets import make_blobs ``` Next we'll define some functions based on steps in the K-means algorithm. ``` def initialize_clusters(points, k): """Initializes clusters as k randomly selected points from points.""" return points[np.random.randint(points.shape[0], size=k)] # Function for calculating the distance between centroids def get_distances(centroid, points): """Returns the distance the centroid is from each data point in points.""" return np.linalg.norm(points - centroid, axis=1) ``` Here we'll generate some data using [scikit-learn](http://scikit-learn.org)'s [`make_blobs`](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html#sklearn.datasets.make_blobs) function. For this example we'll generate a dataset with three clusters. Using this function will give us access to the actual class labels for each group so we can assess accuracy later if we would like to. Normally when using K-means, you won't know the cluster assignments or the number of clusters in the dataset! ``` # Generate dataset X, y = make_blobs(centers=3, n_samples=500, random_state=1) # Visualize fig, ax = plt.subplots(figsize=(4,4)) ax.scatter(X[:,0], X[:,1], alpha=0.5) ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$'); ``` Now let's implement K-means using k = 3. ``` k = 3 maxiter = 50 # Initialize our centroids by picking random data points centroids = initialize_clusters(X, k) # Initialize the vectors in which we will store the # assigned classes of each data point and the # calculated distances from each centroid classes = np.zeros(X.shape[0], dtype=np.float64) distances = np.zeros([X.shape[0], k], dtype=np.float64) # Loop for the maximum number of iterations for i in range(maxiter): # Assign all points to the nearest centroid for i, c in enumerate(centroids): distances[:, i] = get_distances(c, X) # Determine class membership of each point # by picking the closest centroid classes = np.argmin(distances, axis=1) # Update centroid location using the newly # assigned data point classes for c in range(k): centroids[c] = np.mean(X[classes == c], 0) ``` Once we've finished running the algorithm, we can visualize the classified data and our calculated centroids locations. ``` group_colors = ['skyblue', 'coral', 'lightgreen'] colors = [group_colors[j] for j in classes] fig, ax = plt.subplots(figsize=(4,4)) ax.scatter(X[:,0], X[:,1], color=colors, alpha=0.5) ax.scatter(centroids[:,0], centroids[:,1], color=['blue', 'darkred', 'green'], marker='o', lw=2) ax.set_xlabel('$x_0$') ax.set_ylabel('$x_1$'); ``` Look's pretty good! In another post I'll discuss some limitations of the $k$-means algorithm and assess what happens when $k$ is chosen to be greater than or less than the actual number of clusters in your dataset.
github_jupyter
``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV from sklearn.preprocessing import StandardScaler, LabelEncoder, OrdinalEncoder from sklearn.pipeline import make_pipeline from category_encoders import OneHotEncoder from sklearn.metrics import f1_score, precision_score, recall_score from sklearn.linear_model import LogisticRegression from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, HistGradientBoostingClassifier, RandomForestClassifier, BaggingClassifier, ExtraTreesClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.feature_selection import RFE import warnings warnings.simplefilter(action='ignore', category=FutureWarning) # read in data train_values = pd.read_csv('data/Proj5_train_values.csv') train_labels = pd.read_csv('data/Proj5_train_labels.csv') ``` #### Label Encode ``` # Label Encode categorical features le = LabelEncoder() train_enc = train_values.apply(le.fit_transform) train_enc ``` ## Modeling with 10% of data - For faster processing ``` # grab first 10% of rows train_enc_10pct = train_enc.head(int(len(train_values) * 0.1)) train_labels_10pct = train_labels.head(int(len(train_labels) * 0.1)) ``` #### Baseline + TTS ``` # baseline model train_labels_10pct['damage_grade'].value_counts(normalize = True) # establish X + y X = train_enc_10pct.drop(columns = ['building_id']) y = train_labels_10pct['damage_grade'] # tts X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y, random_state = 123) ``` #### Modeling ``` # Random Forest pipe_forest = make_pipeline(StandardScaler(), RandomForestClassifier(n_jobs = -1, random_state = 123)) params = {'randomforestclassifier__max_depth' : [6, 7, 8, 9, 10, 11], 'randomforestclassifier__max_features' : [15, 20, 30, 35]} grid_forest = GridSearchCV(pipe_forest, param_grid = params) grid_forest.fit(X_train, y_train) print(f'Train Score: {grid_forest.score(X_train, y_train)}') print(f'Test Score: {grid_forest.score(X_test, y_test)}') grid_forest.best_params_ # grab feature importances pipe_forest_fi = make_pipeline(StandardScaler(), RandomForestClassifier(n_jobs = -1, random_state = 123, max_depth = 11, max_features = 15)) pipe_forest_fi.fit(X_train, y_train) forest_fi_df = pd.DataFrame({'importances': pipe_forest_fi.named_steps['randomforestclassifier'].feature_importances_, 'name': X_train.columns}).sort_values('importances', ascending = False) forest_fi_df[:5] # test to ensure X_train.columns + feature_importances are same length print(len(X_train.columns)) print(len(pipe_forest_fi.named_steps['randomforestclassifier'].feature_importances_)) # Extra Trees pipe_trees = make_pipeline(StandardScaler(), ExtraTreesClassifier(n_jobs = -1, random_state = 123)) params = {'extratreesclassifier__max_depth' : [6, 7, 8, 9, 10, 11], 'extratreesclassifier__max_features' : [15, 20, 30, 35]} grid_trees = GridSearchCV(pipe_trees, param_grid = params) grid_trees.fit(X_train, y_train) print(f'Train Score: {grid_trees.score(X_train, y_train)}') print(f'Test Score: {grid_trees.score(X_test, y_test)}') grid_trees.best_params_ # grab feature importances pipe_trees_fi = make_pipeline(StandardScaler(), ExtraTreesClassifier(n_jobs = -1, random_state = 123, max_depth = 6, max_features = 35)) pipe_trees_fi.fit(X_train, y_train) trees_fi_df = pd.DataFrame({'importances': pipe_trees_fi.named_steps['extratreesclassifier'].feature_importances_, 'name': X_train.columns}).sort_values('importances', ascending = False) trees_fi_df[:5] # test to ensure X_train.columns + feature_importances are same length print(len(X_train.columns)) print(len(pipe_trees_fi.named_steps['extratreesclassifier'].feature_importances_)) earthquake = pd.merge(train_values.head(int(len(train_values) * 0.1)), train_labels.head(int(len(train_labels) * 0.1)), on = 'building_id') earthquake sns.scatterplot(x = 'area_percentage', y = 'height_percentage', data = earthquake, hue = 'damage_grade'); sns.boxplot(x = 'damage_grade', y = 'height_percentage', data = earthquake); # referenced https://seaborn.pydata.org/generated/seaborn.catplot.html sns.catplot(x = "foundation_type", data = earthquake, kind = "count", hue = 'damage_grade').set(xlabel = 'Roof Type', ylabel = 'Frequency'); plt.suptitle('Damage Grade by Roof Type', y = 1.05); ```
github_jupyter
# Biological question: Are there differences in the binding distance of the same TF-pair in different clusters? - PART2 This notebook can be used to analyse if there are differences in the binding distance of the same TF-pair in two different clusters. In "Outline of this notebook" the general steps in the notebook are explained. The details for each general step are described directly in the notebook for the general step. **Needed input for notebook:** .pkl file with performed market basket analysis for the second of two chosen clusters for comparison + -csv file of the results of the distance analysis of the first cluster (you can also have a look at TF-COMB docs) #### Exemplary Data: WP2 - A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts vs. A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts ### Outline of this notebook: This notbook presents the second part of the analysis to find out if there is a difference in the binding distance of the same TF-pair in different clusters. If you **have not done the PART 1** yet, **do the PART 1 first**. Here the second cluster is analysed. The **results of the first cluster are then imported**, merged together and the comparison of the binding distance are done. 1. Implementation of distance analysis for second cluster and transfering in dataframe 2. Importing the results of the first cluster 3. Merging the results of first and second cluster 4. Looking at the Distribution of the difference in binding distance between the same TF-pairs in the two clusters 5. Comparing the binding distances between the same TF-pairs in the two clusters 6. Possibility to have a closer look and to compare results of different clusters ## 1. Implementation of distance analysis for cluster 0. Creation of folders for the structure if necessary, so the needed path for the output are existing 1. Read in **path of the .pkl file of the first chosen cluster from already performed market basket analysis** (alternative: perform normal market basket analysis) **(-> adjust for cluster)** 2. Using .simplify_rules(), so the duplicates of a TF-pair (because of the two orientations TF1-TF2 or TF2-TF1) are not displayed 3. Selection of TF-pairs by cosine and zscore 4. Implementation of distance analysis with considering the noisiness (lower noise, "clearer/better" analysis) 5. Creation of dataframe df_distance_clusterPART2 with the results of the distance analysis of the second cluster so it can be easily merged with the dataframe of the first cluster 6. Reducing the TF co-occurrences by selecting the TF co-occurrences by peak hight above 2.8 (good proven boundary from other applications of distance analysis) ``` # The following lines, initally check if all file/paths are available. #If a result folder does not exist it is created automatically import os import pathlib if not os.path.exists("./results/distanceresultsfordifference/"): pathlib.Path("./results/distanceresultsfordifference/").mkdir(parents=True, exist_ok=True) if not os.path.exists("./results/differencedistance_distributionplot/"): pathlib.Path("./results/differencedistance_distributionplot/").mkdir(parents=True, exist_ok=True) if not os.path.exists("./results/differencedistance_plot/"): pathlib.Path("./results/differencedistance_plot/").mkdir(parents=True, exist_ok=True) if not os.path.exists("./results/differencedistance_table/"): pathlib.Path("./results/differencedistance_table/").mkdir(parents=True, exist_ok=True) import tfcomb.objects clusterPART2_object = tfcomb.objects.CombObj().from_pickle("/mnt/workspace_stud/stud6/repositories/Datenanalyse-2021/wp6/analyse/results/wp2/main/A8CPH_esophagus_muscularis_mucosa/A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.pkl") clusterPART2_object clusterPART2_object.simplify_rules() clusterPART2_object_selected = clusterPART2_object.select_significant_rules() clusterPART2_object_selected.analyze_distances(threads=6) clusterPART2_object_selected.distObj.evaluate_noise(threads=6) clusterPART2_object_selected.distObj.rank_rules() df_distance_clusterPART2=clusterPART2_object_selected.distObj.peaks df_distance_clusterPART2=df_distance_clusterPART2[(df_distance_clusterPART2["Peak Heights"]>2.8)] df_distance_clusterPART2 ``` ## 2. Importing the results of the first cluster 1. Import of results of the first cluster and saving them in df_distance_clusterPART1_csv dataframe ``` import pandas as pd df_distance_clusterPART1_csv=pd.read_csv("./results/distanceresultsfordifference/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts.csv") df_distance_clusterPART1_csv ``` ## 3. Merging the results of first and second cluster 1. Merging the results of the distance analysis for the TF-pairs that are in first and second cluster in new dataframe df_distancedifference_2clusters so the binding distance between the two clusters can be compared. The columns with the suffix CPART1 have the information of the first cluster and columns with CPART2 have the information of the second cluster. ``` df_distancedifference_2clusters=df_distance_clusterPART1_csv.merge(df_distance_clusterPART2,suffixes=('_CPART1', '_CPART2'), left_on =["TF1","TF2"], right_on = ["TF1","TF2"]) df_distancedifference_2clusters pd.set_option('max_columns', None) pd.set_option('max_rows', 50) ``` ## 4. Looking at the Distribution of the difference in binding distance between the same TF-pairs in the two clusters 1. Calculation of the difference between the binding distance per TF-pair 2. Calcultation of the average peak Height of a TF-pair as an additional assessment factor 3. Selection of TF-pairs above 100 counts as TF-pairs of interest so they so probability is higher that they are important for cluster 4. Sorting the dataframe df_distancedifference_2clusters by the difference in the binding distance for plotting them 5. Calculating the Distribution in the difference in binding distance. This can be used for comparing different two clusters (2 clusters same celltype vs 2 clusters different celltype) concering their distribution in difference in binding distance 6. Plotting the Distribution of the difference in binding distance and saving the **distribution plot .png file (-> adjust for cluster)** ``` df_distancedifference_2clusters['Difference between Distance'] = abs(df_distancedifference_2clusters['Distance_CPART1'] - df_distancedifference_2clusters['Distance_CPART2']) df_distancedifference_2clusters['Average Peak Height'] = ((df_distancedifference_2clusters['Peak Heights_CPART1'] + df_distancedifference_2clusters['Peak Heights_CPART2'])/2) df_distancedifference_2clusters['TF-pair'] = df_distancedifference_2clusters['TF1'] + " + " + df_distancedifference_2clusters['TF2'] df_distancedifference_2clusters = df_distancedifference_2clusters[(df_distancedifference_2clusters["TF1_TF2_count_CPART1"]>100) &(df_distancedifference_2clusters["TF1_TF2_count_CPART2"]>100)] df_distancedifference_2clusters_sorted=df_distancedifference_2clusters.sort_values(by=['Difference between Distance']) df_differencedistance_distribution=df_distancedifference_2clusters_sorted['Difference between Distance'].value_counts() df_differencedistance_distribution df_distancedifference_2clusters_sorted import matplotlib.pyplot as plt differencedistance = df_differencedistance_distribution.keys() occurrence = df_differencedistance_distribution plt.figure(figsize=(15, 5)) plt.scatter(differencedistance,occurrence) plt.xticks plt.grid(True) plt.xlabel('Difference in Distance') plt.ylabel('Occurrence') plt.title('Distribution of the Difference in binding Distance') plt.savefig("./results/differencedistance_distributionplot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.png") plt.show() ``` ## 5. Comparing the binding distances between the same TF-pairs in the two clusters 1. Selecting the difference in binding distance over 10 since now we only want to look at the TF-pairs with a difference in binding distance 2. Sorting the TF-Pairs by noisiness of the first cluster and selecting the first 100. The noisiness is selected as an assessment factor since it distinguishes a clear signal from noisy signal in the distance analysis what was ranged as important factor for the qualitiy of the TF-pair binding distance. The reason for the selection of 100 TF-pairs is for the readability of the figure and can also be adjusted reasonable (as well as the other factors) 3. Sorting the dataframe df_distancedifference_2clusters_withoutlowdifference_noisinesstop100 by the difference in the binding distance for plotting them and **saving finished distance difference table in .csv file (-> adjust for cluster)** 10. Plotting the difference in distance over the TF-pairs and saving the **difference in distnace plot in a .png file (-> adjust for cluster)** ``` df_distancedifference_2clusters_withoutlowdifference = df_distancedifference_2clusters[(df_distancedifference_2clusters["Difference between Distance"]>10)] df_distancedifference_2clusters_withoutlowdifference_noisinesstop100=df_distancedifference_2clusters_withoutlowdifference.sort_values(by=['Noisiness_CPART1']).head(100) df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted=df_distancedifference_2clusters_withoutlowdifference_noisinesstop100.sort_values(by=['Difference between Distance']) df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted.to_csv("./results/differencedistance_table/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.csv") differencedistance = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['Difference between Distance'] TFpairs = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['TF-pair'] averagepeakheight = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['Average Peak Height'] plt.figure(figsize=(20, 5)) plt.scatter(TFpairs,differencedistance, c=averagepeakheight, cmap = 'cividis_r') plt.xticks(rotation='vertical') plt.grid(True, axis = 'y') plt.xlabel('TF-pairs') plt.ylabel('Difference in Distance') cbar = plt.colorbar() cbar.set_label("average Peak Height") plt.title('Difference in Distance over the TF-pairs') plt.savefig("./results/differencedistance_plot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.png") plt.show() ``` ## 5. Possibility to have a closer look and to compare results of different clusters #### Possibility to import other plots from different two clusters for comparisons (-> adjust for cluster) ``` from PIL import Image image = Image.open("/mnt/workspace_stud/stud7/Datenanalyse-2021/wp6/analyse/results/differencedistance_plot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__ACCQ1_colon_transverse_c3_Enterocytes.png") image.show() ```
github_jupyter
``` #!pwd import pandas as pd import os import string from nltk.corpus import stopwords from nltk import word_tokenize, WordNetLemmatizer from nltk import stem, pos_tag from nltk.corpus import wordnet as wn from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import os import re cwd = os.getcwd() filepath = cwd+'/data/df_test_indian.csv' outputfilepath = cwd+'/data/df_test_indian_with_negation.csv' df = pd.read_csv(filepath) df.head() # ffile1 = open(filepath,"r", encoding = "ISO-8859-1") # df_standard = pd.read_csv(ffile1, encoding = "utf-8") # df_standard.drop(['restaurant_id', 'date', 'review_id'], inplace=True, axis=1) # df_standard.rename(columns={'text': 'review', 'Sentiment': 'sentiment'}, inplace=True) # df_standard.head() # mask = df_standard.sentiment == 'negative' # column_name = 'sentiment' # df_standard.loc[mask, column_name] = -1 # mask = df_standard.sentiment == 'positive' # column_name = 'sentiment' # df_standard.loc[mask, column_name] = 1 # df_standard = df_standard.rename(columns = {'stars':'rating'}) # df_standard.head() # df = pd.concat([df, df_standard], axis=0) # df = df.reset_index().drop('index', axis=1) ratings = df['rating'] sentiments = df['sentiment'] reviews = df['review'] sentiment_trans = [] for r in ratings.keys(): rating = ratings.loc[r] sentiment = sentiments.loc[r] sentiment = int(sentiment) if rating <= 3: sentiment = -1 else: sentiment = sentiment sentiment_trans.insert(r, sentiment) sentiments = pd.Series(sentiment_trans, index=ratings.keys()) df = pd.concat([ratings, reviews, sentiments], axis = 1) df.columns = ['rating', 'review', 'sentiment'] df.head() df stop = stopwords.words('english') snowball = stem.snowball.EnglishStemmer() wnl = WordNetLemmatizer() reviews = df['review'] def negation_Processing(reviews): def neg_tag(text): transformed = re.sub(r"\b(?:never|nothing|nowhere|noone|none|not|haven't|hasn't|hasnt|hadn't|hadnt|can't|cant|couldn't|couldnt|shouldn't|shouldnt|won't|wont|wouldn't|wouldnt|don't|dont|doesn't|doesnt|didn't|didnt|isnt|isn't|aren't|arent|aint|ain't|hardly|seldom)\b[\w\s]+[^\w\s]", lambda match: re.sub(r'(\s+)(\w+)', r'\1NEG_\2', match.group(0)), text, flags=re.IGNORECASE) return(transformed) negation_reviews = [] # Append elements to the list for doc in reviews: trans = neg_tag(doc) negation_reviews.append(trans) return negation_reviews # Remove all the punctuations and numbers def removePunc(reviews): comwoPunc = str.maketrans({key: None for key in string.punctuation + string.digits}) for i in reviews.keys(): comment = reviews.loc[i] reviewswoPunc = reviews.replace(comment, comment.translate(comwoPunc)) return reviewswoPunc # Convert all characters to Lower case def convToLow(words): reviewsToLow = words.apply(str.lower) return reviewsToLow # Stopwords removal def removeStop(text, stop): for i in text.keys(): comment = text.loc[i] comment_nostop = " ".join(filter(lambda word: word not in stop, comment.split())) reviewswoStop = text.replace(comment, comment_nostop) return reviewswoStop # Tokenization def token(text): reviewsToken = text.apply(word_tokenize) return reviewsToken # pos tagging def posTag(words): reviews_pos = words.apply(pos_tag) reviews_wnpos = [] for i in reviews_pos.keys(): comment = reviews_pos.loc[i] comment_wnpos = [] for t in comment: t = list(t) tag = t[1] if t[1].startswith('J'): t[1] = wn.ADJ elif t[1].startswith('V'): t[1] = wn.VERB elif t[1].startswith('N'): t[1] = wn.NOUN elif t[1].startswith('R'): t[1] = wn.ADV else: del t t = None if t is not None: comment_wnpos.append(t) else: pass reviews_wnpos.append(comment_wnpos) reviews_wnpos = pd.Series(reviews_wnpos, index=reviews_pos.keys()) return reviews_wnpos # Lemmatization def lemma(text, wnl): reviews_lem = [] for i in text.keys(): comment = text.loc[i] comment_lem = [] for t in comment: word = t[0] tag = t[1] t = wnl.lemmatize(word, pos=tag) comment_lem.append(t) reviews_lem.append(comment_lem) allReviews = [] for j in reviews_lem: reviews = ' '.join(j) allReviews.append(reviews) reviewsLemma = pd.Series(allReviews, index=text.keys()) return reviewsLemma if __name__ == '__main__': reviews = negation_Processing(reviews) reviews = pd.Series(reviews) reviews = removePunc(reviews) reviews = convToLow(reviews) reviews = removeStop(reviews, stop) reviews = token(reviews) reviews = posTag(reviews) final_reviews = lemma(reviews,wnl) ratings = df['rating'] sentiments = df['sentiment'] df = pd.concat([ratings, final_reviews, sentiments], axis = 1) df.columns = ['rating', 'review', 'sentiment'] df.to_csv(outputfilepath, encoding='utf-8') ```
github_jupyter
# TV Script Generation In this project, you'll generate your own [Seinfeld](https://en.wikipedia.org/wiki/Seinfeld) TV scripts using RNNs. You'll be using part of the [Seinfeld dataset](https://www.kaggle.com/thec03u5/seinfeld-chronicles#scripts.csv) of scripts from 9 seasons. The Neural Network you'll build will generate a new ,"fake" TV script, based on patterns it recognizes in this training data. ## Get the Data The data is already provided for you in `./data/Seinfeld_Scripts.txt` and you're encouraged to open that file and look at the text. >* As a first step, we'll load in this data and look at some samples. * Then, you'll be tasked with defining and training an RNN to generate a new script! ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # load in data import helper data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) ``` ## Explore the Data Play around with `view_line_range` to view different parts of the data. This will give you a sense of the data you'll be working with. You can see, for example, that it is all lowercase text, and each new line of dialogue is separated by a newline character `\n`. ``` view_line_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) lines = text.split('\n') print('Number of lines: {}'.format(len(lines))) word_count_line = [len(line.split()) for line in lines] print('Average number of words in each line: {}'.format(np.average(word_count_line))) print('The lines {} to {}:'.format(*view_line_range)) print('\n'.join(text.split('\n')[view_line_range[0]:view_line_range[1]])) ``` --- ## Implement Pre-processing Functions The first thing to do to any dataset is pre-processing. Implement the following pre-processing functions below: - Lookup Table - Tokenize Punctuation ### Lookup Table To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries: - Dictionary to go from the words to an id, we'll call `vocab_to_int` - Dictionary to go from the id to word, we'll call `int_to_vocab` Return these dictionaries in the following **tuple** `(vocab_to_int, int_to_vocab)` ``` import problem_unittests as tests def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ # TODO: Implement Function # return tuple return (None, None) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables) ``` ### Tokenize Punctuation We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks can create multiple ids for the same word. For example, "bye" and "bye!" would generate two different word ids. Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token: - Period ( **.** ) - Comma ( **,** ) - Quotation Mark ( **"** ) - Semicolon ( **;** ) - Exclamation mark ( **!** ) - Question mark ( **?** ) - Left Parentheses ( **(** ) - Right Parentheses ( **)** ) - Dash ( **-** ) - Return ( **\n** ) This dictionary will be used to tokenize the symbols and add the delimiter (space) around it. This separates each symbols as its own word, making it easier for the neural network to predict the next word. Make sure you don't use a value that could be confused as a word; for example, instead of using the value "dash", try using something like "||dash||". ``` def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenized dictionary where the key is the punctuation and the value is the token """ # TODO: Implement Function return None """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup) ``` ## Pre-process all the data and save it Running the code cell below will pre-process all the data and save it to file. You're encouraged to look at the code for `preprocess_and_save_data` in the `helpers.py` file to see what it's doing in detail, but you do not need to change this code. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # pre-process training data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) ``` # Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() ``` ## Build the Neural Network In this section, you'll build the components necessary to build an RNN by implementing the RNN Module and forward and backpropagation functions. ### Check Access to GPU ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch # Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a GPU to train your neural network.') ``` ## Input Let's start with the preprocessed input data. We'll use [TensorDataset](http://pytorch.org/docs/master/data.html#torch.utils.data.TensorDataset) to provide a known format to our dataset; in combination with [DataLoader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader), it will handle batching, shuffling, and other dataset iteration functions. You can create data with TensorDataset by passing in feature and target tensors. Then create a DataLoader as usual. ``` data = TensorDataset(feature_tensors, target_tensors) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size) ``` ### Batching Implement the `batch_data` function to batch `words` data into chunks of size `batch_size` using the `TensorDataset` and `DataLoader` classes. >You can batch words using the DataLoader, but it will be up to you to create `feature_tensors` and `target_tensors` of the correct size and content for a given `sequence_length`. For example, say we have these as input: ``` words = [1, 2, 3, 4, 5, 6, 7] sequence_length = 4 ``` Your first `feature_tensor` should contain the values: ``` [1, 2, 3, 4] ``` And the corresponding `target_tensor` should just be the next "word"/tokenized word value: ``` 5 ``` This should continue with the second `feature_tensor`, `target_tensor` being: ``` [2, 3, 4, 5] # features 6 # target ``` ``` from torch.utils.data import TensorDataset, DataLoader def batch_data(words, sequence_length, batch_size): """ Batch the neural network data using DataLoader :param words: The word ids of the TV scripts :param sequence_length: The sequence length of each batch :param batch_size: The size of each batch; the number of sequences in a batch :return: DataLoader with batched data """ # TODO: Implement function # return a dataloader return None # there is no test for this function, but you are encouraged to create # print statements and tests of your own ``` ### Test your dataloader You'll have to modify this code to test a batching function, but it should look fairly similar. Below, we're generating some test text data and defining a dataloader using the function you defined, above. Then, we are getting some sample batch of inputs `sample_x` and targets `sample_y` from our dataloader. Your code should return something like the following (likely in a different order, if you shuffled your data): ``` torch.Size([10, 5]) tensor([[ 28, 29, 30, 31, 32], [ 21, 22, 23, 24, 25], [ 17, 18, 19, 20, 21], [ 34, 35, 36, 37, 38], [ 11, 12, 13, 14, 15], [ 23, 24, 25, 26, 27], [ 6, 7, 8, 9, 10], [ 38, 39, 40, 41, 42], [ 25, 26, 27, 28, 29], [ 7, 8, 9, 10, 11]]) torch.Size([10]) tensor([ 33, 26, 22, 39, 16, 28, 11, 43, 30, 12]) ``` ### Sizes Your sample_x should be of size `(batch_size, sequence_length)` or (10, 5) in this case and sample_y should just have one dimension: batch_size (10). ### Values You should also notice that the targets, sample_y, are the *next* value in the ordered test_text data. So, for an input sequence `[ 28, 29, 30, 31, 32]` that ends with the value `32`, the corresponding output should be `33`. ``` # test dataloader test_text = range(50) t_loader = batch_data(test_text, sequence_length=5, batch_size=10) data_iter = iter(t_loader) sample_x, sample_y = data_iter.next() print(sample_x.shape) print(sample_x) print() print(sample_y.shape) print(sample_y) ``` --- ## Build the Neural Network Implement an RNN using PyTorch's [Module class](http://pytorch.org/docs/master/nn.html#torch.nn.Module). You may choose to use a GRU or an LSTM. To complete the RNN, you'll have to implement the following functions for the class: - `__init__` - The initialize function. - `init_hidden` - The initialization function for an LSTM/GRU hidden state - `forward` - Forward propagation function. The initialize function should create the layers of the neural network and save them to the class. The forward propagation function will use these layers to run forward propagation and generate an output and a hidden state. **The output of this model should be the *last* batch of word scores** after a complete sequence has been processed. That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word. ### Hints 1. Make sure to stack the outputs of the lstm to pass to your fully-connected layer, you can do this with `lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)` 2. You can get the last batch of word scores by shaping the output of the final, fully-connected layer like so: ``` # reshape into (batch_size, seq_length, output_size) output = output.view(batch_size, -1, self.output_size) # get last batch out = output[:, -1] ``` ``` import torch.nn as nn class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): """ Initialize the PyTorch RNN Module :param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary) :param output_size: The number of output dimensions of the neural network :param embedding_dim: The size of embeddings, should you choose to use them :param hidden_dim: The size of the hidden layer outputs :param dropout: dropout to add in between LSTM/GRU layers """ super(RNN, self).__init__() # TODO: Implement function # set class variables # define model layers def forward(self, nn_input, hidden): """ Forward propagation of the neural network :param nn_input: The input to the neural network :param hidden: The hidden state :return: Two Tensors, the output of the neural network and the latest hidden state """ # TODO: Implement function # return one batch of output word scores and the hidden state return None, None def init_hidden(self, batch_size): ''' Initialize the hidden state of an LSTM/GRU :param batch_size: The batch_size of the hidden state :return: hidden state of dims (n_layers, batch_size, hidden_dim) ''' # Implement function # initialize hidden state with zero weights, and move to GPU if available return None """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_rnn(RNN, train_on_gpu) ``` ### Define forward and backpropagation Use the RNN class you implemented to apply forward and back propagation. This function will be called, iteratively, in the training loop as follows: ``` loss = forward_back_prop(decoder, decoder_optimizer, criterion, inp, target) ``` And it should return the average loss over a batch and the hidden state returned by a call to `RNN(inp, hidden)`. Recall that you can get this loss by computing it, as usual, and calling `loss.item()`. **If a GPU is available, you should move your data to that GPU device, here.** ``` def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): """ Forward and backward propagation on the neural network :param rnn: The PyTorch Module that holds the neural network :param optimizer: The PyTorch optimizer for the neural network :param criterion: The PyTorch loss function :param inp: A batch of input to the neural network :param target: The target output for the batch of input :return: The loss and the latest hidden state Tensor """ # TODO: Implement Function # move data to GPU, if available # perform backpropagation and optimization # return the loss over a batch and the hidden state produced by our model return None, None # Note that these tests aren't completely extensive. # they are here to act as general checks on the expected outputs of your functions """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu) ``` ## Neural Network Training With the structure of the network complete and data ready to be fed in the neural network, it's time to train it. ### Train Loop The training loop is implemented for you in the `train_decoder` function. This function will train the network over all the batches for the number of epochs given. The model progress will be shown every number of batches. This number is set with the `show_every_n_batches` parameter. You'll set this parameter along with other parameters in the next section. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print("Training for %d epoch(s)..." % n_epochs) for epoch_i in range(1, n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained rnn return rnn ``` ### Hyperparameters Set and train the neural network with the following parameters: - Set `sequence_length` to the length of a sequence. - Set `batch_size` to the batch size. - Set `num_epochs` to the number of epochs to train for. - Set `learning_rate` to the learning rate for an Adam optimizer. - Set `vocab_size` to the number of unique tokens in our vocabulary. - Set `output_size` to the desired size of the output. - Set `embedding_dim` to the embedding dimension; smaller than the vocab_size. - Set `hidden_dim` to the hidden dimension of your RNN. - Set `n_layers` to the number of layers/cells in your RNN. - Set `show_every_n_batches` to the number of batches at which the neural network should print progress. If the network isn't getting the desired results, tweak these parameters and/or the layers in the `RNN` class. ``` # Data params # Sequence Length sequence_length = # of words in a sequence # Batch Size batch_size = # data loader - do not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters # Number of Epochs num_epochs = # Learning Rate learning_rate = # Model parameters # Vocab size vocab_size = # Output size output_size = # Embedding Dimension embedding_dim = # Hidden Dimension hidden_dim = # Number of RNN Layers n_layers = # Show stats for every n number of batches show_every_n_batches = 500 ``` ### Train In the next cell, you'll train the neural network on the pre-processed data. If you have a hard time getting a good loss, you may consider changing your hyperparameters. In general, you may get better results with larger hidden and n_layer dimensions, but larger models take a longer time to train. > **You should aim for a loss less than 3.5.** You should also experiment with different sequence lengths, which determine the size of the long range dependencies that a model can learn. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # create model and move to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./save/trained_rnn', trained_rnn) print('Model Trained and Saved') ``` ### Question: How did you decide on your model hyperparameters? For example, did you try different sequence_lengths and find that one size made the model converge faster? What about your hidden_dim and n_layers; how did you decide on those? **Answer:** (Write answer, here) --- # Checkpoint After running the above training cell, your model will be saved by name, `trained_rnn`, and if you save your notebook progress, **you can pause here and come back to this code at another time**. You can resume your progress by running the next cell, which will load in our word:id dictionaries _and_ load in your saved model by name! ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./save/trained_rnn') ``` ## Generate TV Script With the network trained and saved, you'll use it to generate a new, "fake" Seinfeld TV script in this section. ### Generate Text To generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. You'll be using the `generate` function to do this. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores! ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): """ Generate text using the neural network :param decoder: The PyTorch Module that holds the trained neural network :param prime_id: The word id to start the first prediction :param int_to_vocab: Dict of word id keys to word values :param token_dict: Dict of puncuation tokens keys to puncuation values :param pad_value: The value used to pad a sequence :param predict_len: The length of text to generate :return: The generated text """ rnn.eval() # create a sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output, _ = rnn(current_seq, hidden) # get the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # use top_k sampling to get the index of the next word top_k = 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next word index with some element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary word = int_to_vocab[word_i] predicted.append(word) if(train_on_gpu): current_seq = current_seq.cpu() # move to cpu # the generated word becomes the next "current sequence" and the cycle can continue if train_on_gpu: current_seq = current_seq.cpu() current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\n ', '\n') gen_sentences = gen_sentences.replace('( ', '(') # return all the sentences return gen_sentences ``` ### Generate a New Script It's time to generate the text. Set `gen_length` to the length of TV script you want to generate and set `prime_word` to one of the following to start the prediction: - "jerry" - "elaine" - "george" - "kramer" You can set the prime word to _any word_ in our dictionary, but it's best to start with a name for generating a TV script. (You can also start with any other names you find in the original text file!) ``` # run the cell multiple times to get different results! gen_length = 400 # modify the length to your preference prime_word = 'jerry' # name for starting the script """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length) print(generated_script) ``` #### Save your favorite scripts Once you have a script that you like (or find interesting), save it to a text file! ``` # save script to a text file f = open("generated_script_1.txt","w") f.write(generated_script) f.close() ``` # The TV Script is Not Perfect It's ok if the TV script doesn't make perfect sense. It should look like alternating lines of dialogue, here is one such example of a few generated lines. ### Example generated script >jerry: what about me? > >jerry: i don't have to wait. > >kramer:(to the sales table) > >elaine:(to jerry) hey, look at this, i'm a good doctor. > >newman:(to elaine) you think i have no idea of this... > >elaine: oh, you better take the phone, and he was a little nervous. > >kramer:(to the phone) hey, hey, jerry, i don't want to be a little bit.(to kramer and jerry) you can't. > >jerry: oh, yeah. i don't even know, i know. > >jerry:(to the phone) oh, i know. > >kramer:(laughing) you know...(to jerry) you don't know. You can see that there are multiple characters that say (somewhat) complete sentences, but it doesn't have to be perfect! It takes quite a while to get good results, and often, you'll have to use a smaller vocabulary (and discard uncommon words), or get more data. The Seinfeld dataset is about 3.4 MB, which is big enough for our purposes; for script generation you'll want more than 1 MB of text, generally. # Submitting This Project When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save another copy as an HTML file by clicking "File" -> "Download as.."->"html". Include the "helper.py" and "problem_unittests.py" files in your submission. Once you download these files, compress them into one zip file for submission.
github_jupyter
# Custom NER ``` # !pip install spacy import pandas as pd import plac import random import warnings from pathlib import Path import spacy from spacy.util import minibatch, compounding data = pd.read_csv('./label_text_product_attrs.csv', dtype=str) data.rename(columns={'sale_price': 'ppu', 'final_price': 'total_price'}, inplace=True) row = data.iloc[0] row print(row['text']) attributes = data.columns.difference(['image_name', 'text']) attributes a = 'ABC12345ABC12AAABC' def find_2nd(string, substring): return string.find(substring, string.find(substring) + 1) find_2nd(a, '1') sub = 'ABC' import re [i for i in range(len(a)) if a.startswith(sub, i)] a[14:] import numpy as np def get_row_entities(row, debug=False): row_entities = [] cache = [] text = row['text'] for attr in ['sku', 'product_name', 'quantity', 'ppu', 'total_price', 'discounted_part', 'original_price']: if debug: print(attr) if not pd.isnull(row[attr]): value = str(row[attr]) value = value.strip() if not value in text: print(attr, value, 'not in text') else: try: indices = [i for i in range(len(text)) if text.startswith(value, i)] if debug: print(f'org indices = {indices}') indices_ = indices.copy() for item in indices_: if any(item in cache_ for cache_ in cache): indices.remove(item) if debug: print(f'indices = {indices}') start_index = indices[0] end_index = start_index + len(value) cache.append(range(start_index, end_index)) if debug: print(f'cache = {cache}') if start_index < 0: print(start_index) if end_index < 0: print(end_index) row_entities.append((start_index, end_index, attr)) except Exception as e: print(row) print(text) print('Error', row_entities, value) raise e for i, item in enumerate(row_entities): if item[-1] == 'product_name': product_name_range = item[0:-1] product_name_id = i if debug: print('Product name ranges', product_name_range) splits = row.text[product_name_range[0]: product_name_range[1]].split(' ') ranges = [] for item in splits: ranges.append((row.text.find(item), row.text.find(item) + len(item), 'product_name')) row_entities.pop(product_name_id) row_entities += ranges def is_overlapped(entities): ranges = [range(item[0], item[1]) for item in entities] if debug: print('Total ranges', ranges) return len(reduce(lambda x, y: set(x).intersection(y), ranges)) > 0 from functools import reduce assert not is_overlapped(row_entities) return row_entities row = data.iloc[56] entities = get_row_entities(row, debug=True) entities print(row) print() for i, j, name in entities: print(f"{name}: {row['text'][i:j]}") from sklearn.model_selection import train_test_split train, test = train_test_split(data, test_size=0.2, random_state=42) TRAIN_DATA = [] for index, row in train.iterrows(): TRAIN_DATA.append((row['text'], {"entities": get_row_entities(row)})) def trim_entity_spans(data: list) -> list: """Removes leading and trailing white spaces from entity spans. Args: data (list): The data to be cleaned in spaCy JSON format. | Returns: list: The cleaned data. """ invalid_span_tokens = re.compile(r'\s') cleaned_data = [] for text, annotations in data: entities = annotations['entities'] valid_entities = [] for start, end, label in entities: valid_start = start valid_end = end # if there's preceding spaces, move the start position to nearest character while valid_start < len(text) and invalid_span_tokens.match( text[valid_start]): valid_start += 1 while valid_end > 1 and invalid_span_tokens.match( text[valid_end - 1]): valid_end -= 1 valid_entities.append([valid_start, valid_end, label]) cleaned_data.append([text, {'entities': valid_entities}]) return cleaned_data trim_entity_spans(TRAIN_DATA[:1]) def train_spacy(TRAIN_DATA): nlp = spacy.blank('en') # create blank Language class # create the built-in pipeline components and add them to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if 'ner' not in nlp.pipe_names: ner = nlp.create_pipe('ner') nlp.add_pipe(ner, last=True) # add labels for _, annotations in TRAIN_DATA: for ent in annotations.get('entities'): ner.add_label(ent[2]) # get names of other pipes to disable them during training other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner'] with nlp.disable_pipes(*other_pipes): # only train NER optimizer = nlp.begin_training() for itn in range(10): print("Statring iteration " + str(itn)) random.shuffle(TRAIN_DATA) losses = {} for text, annotations in TRAIN_DATA: nlp.update( [text], # batch of texts [annotations], # batch of annotations drop=0.2, # dropout - make it harder to memorise data sgd=optimizer, # callable to update weights losses=losses) print(losses) return nlp def main(model=None, output_dir='.', n_iter=100): """Load the model, set up the pipeline and train the entity recognizer.""" if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # create the built-in pipeline components and add them to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner, last=True) # otherwise, get it so we can add labels else: ner = nlp.get_pipe("ner") # add labels for _, annotations in TRAIN_DATA: for ent in annotations.get("entities"): ner.add_label(ent[2]) # get names of other pipes to disable them during training pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] # only train NER with nlp.disable_pipes(*other_pipes), warnings.catch_warnings(): try: # show warnings for misaligned entity spans once warnings.filterwarnings("once", category=UserWarning, module='spacy') # reset and initialize the weights randomly – but only if we're # training a new model if model is None: nlp.begin_training() for itn in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update( texts, # batch of texts annotations, # batch of annotations drop=0.5, # dropout - make it harder to memorise data losses=losses, ) print("Losses", losses) except Exception as e: print(texts, annotations) raise e # # test the trained model # for text, _ in TRAIN_DATA: # doc = nlp(text) # print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) # print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) nlp = spacy.load('en_core_web_sm') docs = [] for text, annot in TRAIN_DATA: doc = nlp(text) tags = spacy.gold.biluo_tags_from_offsets(doc, annot['entities']) print(np.array(doc)) print(np.array(tags)) break print(TRAIN_DATA[0][0][46:47]) TRAIN_DATA[0] # nlp = train_spacy(trim_entity_spans(TRAIN_DATA)) for i, row in enumerate(TRAIN_DATA): if row[0].startswith('08936034200116'): print(i) main() TEST_DATA = [] for index, row in test.iterrows(): TEST_DATA.append((row['text'], {"entities": get_row_entities(row)})) text = test.iloc[0].text text output_dir = '../pretrained_models/NER' print("Loading from", output_dir) nlp2 = spacy.load(output_dir) # for text, _ in TEST_DATA: doc = nlp2(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) ```
github_jupyter
### Requirement ``` aliyun-python-sdk-core==2.13.25 aliyun-python-sdk-ocr==1.0.8 Flask==1.1.2 imutils==0.5.3 json5==0.9.5 Keras==2.4.3 Keras-Preprocessing==1.1.2 matplotlib==3.3.0 numpy==1.18.5 opencv-python==4.4.0.40 oss2==2.12.1 Pillow==7.0.0 sklearn==0.0 tensorflow==2.3.0 trdg==1.6.0 ``` ### Import Aliyun python SDK modules - `aliyun-python-sdk-core` - `aliyun-python-sdk-ocr` - `oss2` ``` #Aliyun SDK Core from aliyunsdkcore.client import AcsClient from aliyunsdkcore.acs_exception.exceptions import ClientException from aliyunsdkcore.acs_exception.exceptions import ServerException #Aliyun SDK OSS import oss2 #Aliyun SDK OCR from aliyunsdkocr.request.v20191230.RecognizeCharacterRequest import RecognizeCharacterRequest ``` ### Configure Aliyun python SDK ``` #Access_key access_key_id = '' access_key_secret = '' #OSS endpoint = '' bucket_name = '' auth = oss2.Auth(access_key_id, access_key_secret) bucket = oss2.Bucket(auth, endpoint, bucket_name) #OCR location = '' client = AcsClient(access_key_id, access_key_secret, location) ``` ### Instruct OCR request body, set return format to `json` ``` request = RecognizeCharacterRequest() request.set_accept_format('json') ``` ### Upload local image with SHA1 hashed name to OSS - `image_path` is pointed to the local image - image format should be `.png` - image size should less than 3MB ``` import os from hashlib import sha1 image_path = '' #Upload with SHA1 hashed name filename, file_extension = os.path.splitext(image_path) key = sha1(open(image_path, 'rb').read()).hexdigest() + file_extension bucket.put_object_from_file(key, image_path) import json #Get image info from OSS info = bucket.get_object(key, process = 'image/info') info_content = info.read() decoded_info = json.loads(oss2.to_unicode(info_content)) print('Image Info ->') print(json.dumps(decoded_info, indent = 4, sort_keys = True)) #Struct image URL image_url = 'https://' + bucket_name + '.' + endpoint.replace("https://","") + '/' + key print('Image URL -> ' + image_url) #Set OCR image_url request.set_ImageURL(image_url) ``` ### Send request and show OCR result - `MinHeight` is set to $\frac{1}{20}$ of the image width - `OutputProbability` is set to `true` ``` #Pre-config request min_height = int(int(decoded_info['ImageHeight']['value']) / 20) request.set_MinHeight(int(min_height)) request.set_OutputProbability(True) #Send request to OCR server and get response response = client.do_action_with_exception(request) #Delete OSS image bucket.delete_object(key) import json #Parse json response parsed = json.loads(response) print('Response ->') print(json.dumps(parsed, indent = 4, sort_keys = True)) ``` ### Parsed all `TextRectangle` and calculate the distance between image center and rect center ``` distances = [] objects = parsed['Data']['Results'] #Cal image center O(o_x0, o_y0) o_x0, o_y0 = int(decoded_info['ImageWidth']['value']) / 2.0, int(decoded_info['ImageHeight']['value']) / 2.0 import math for object in objects: #Cal TextRectangle angle A, start point A(x0, y0) and endpoint B(x1, y1) A = object['TextRectangles']['Angle'] / 180.0 x0, y0 = object['TextRectangles']['Left'], object['TextRectangles']['Top'] x1, y1 = x0 + object['TextRectangles']['Width'], y0 + object['TextRectangles']['Height'] #Cal vector AB = (v_x0, v_y0) v_x0, v_y0 = x1 - x0, y1 - y0 #Cal angle A rotated and 1/2 lenthed vector AB' = (v_x1, v_y1) v_x1, v_y1 = (v_x0 * math.cos(A) - v_y0 * math.sin(A)) / 2.0, (v_y0 * math.cos(A) + v_x0 * math.sin(A)) / 2.0 #Cal TextRectangle center point B'(x2, y2) x2, y2 = x0 + v_x1, y0 + v_y1 print('TextRectangleCtr -> ', (x2, y2)) #Cal distance between point B and O d = math.pow(x2 - o_x0, 2) + math.pow(y2 - o_y0, 2) distances.append(d) ``` ### Find the nearest `TextRectangle` index to the image center ``` index_min = distances.index(min(distances)) print('Min_Index -> ', index_min) ``` ### Draw all `TextRectangle` - ROI is **green** and others is **red** ``` from matplotlib import pyplot as plt from matplotlib import patches as patches %matplotlib inline img = plt.imread(image_path) fig, ax = plt.subplots(1) ax.imshow(img) index = 0 for object in objects: if (index == index_min): c = 'g' else: c = 'r' index += 1 ret = patches.Rectangle( (object['TextRectangles']['Left'], object['TextRectangles']['Top']), object['TextRectangles']['Width'], object['TextRectangles']['Height'], object['TextRectangles']['Angle'] / 180.0, linewidth = 2, edgecolor = c, facecolor = 'none' ) ax.add_patch(ret) plt.show() ``` ### ROI ``` import PIL from matplotlib import pyplot as plt A = - objects[index_min]['TextRectangles']['Angle'] / 180.0 roi = PIL.Image.open(image_path) roi = roi.rotate(A) def rotate(x, y, o_x, o_y, theta): x_r = math.cos(theta) * (x - o_x) - math.sin(theta) * (y - o_y) + o_x y_r = math.sin(theta) * (x - o_x) + math.cos(theta) * (y - o_y) + o_y return [x_r, y_r] #Cal start point A(x0, y0) x0, y0 = objects[index_min]['TextRectangles']['Left'], objects[index_min]['TextRectangles']['Top'] #Cal angle A rotated A'(x1, y1) x1, y1 = rotate(x0, y0, o_x0, o_y0, A) roi = roi.crop((x1, y1, (x1 + objects[index_min]['TextRectangles']['Width']), (y1 + objects[index_min]['TextRectangles']['Height']))) fig, ax = plt.subplots(1) ax.imshow(roi) plt.show() ``` ### Load image function for DeepFont - color to gray - resize to (105, 105) ``` import PIL import numpy as np def pil_image(img_path): pil_img = PIL.Image.open(img_path).convert('L') pil_img = pil_img.resize((105, 105)) return pil_img ``` ### Preprocessing function - Noise a small Gaussian noise with 0 mean and standard deviation 3 is added to input. - Blur a random Gaussian blur with standard deviation from 2.5 to 3.5 is added to input. - Perspective Rotation a randomly-parameterized affine transformation is added to input. - Shading the input background is filled with a gradient in illumination. ``` import PIL import cv2 import numpy as np def noise_image(img): img_array = np.asarray(img) mean = 0.0 std = 3 noisy_img = img_array + np.random.normal(mean, std, img_array.shape) noisy_img_clipped = np.clip(noisy_img, 0, 255) noise_img = PIL.Image.fromarray(np.uint8(noisy_img_clipped)) noise_img = noise_img.resize((105, 105)) return noise_img def blur_image(img): blur_img = img.filter(PIL.ImageFilter.GaussianBlur(radius = 3)) blur_img = blur_img.resize((105, 105)) return blur_img def affine_rotation(img): rows, columns = img.shape point1 = np.float32([[10, 10], [30, 10], [10, 30]]) point2 = np.float32([[20, 15], [40, 10], [20, 40]]) anchor = cv2.getAffineTransform(point1, point2) output = cv2.warpAffine(img, anchor, (columns, rows)) affine_img = PIL.Image.fromarray(np.uint8(output)) affine_img = affine_img.resize((105, 105)) return affine_img def gradient_fill(img): output = cv2.Laplacian(img, cv2.CV_64F) laplacian_img = PIL.Image.fromarray(np.uint8(output)) laplacian_img = laplacian_img.resize((105, 105)) return laplacian_img ``` ### Generate Datasets - `ttf_path` is a folder contains all the font file with correct font name and `.ttf` extension - `data_path` is a folder stores or contains generated datasets Uses `TextRecognitionDataGenerator` ``` import os ttf_path = '' data_path = '' for file in os.listdir(ttf_path): if file.endswith('.ttf'): path = os.path.join(ttf_path, file) name, ext = os.path.splitext(os.path.basename(path)) out_path = data_path + '/' + name command = 'trdg -l en -c 30 -rs -let -num -r --length 1 -b 1 -e .png -fi -f 105 -ft ' + path + ' --output_dir ' + out_path os.system(command) ``` ### Import Datasets - `label_path` should be defined ``` import os import json from imutils import paths from random import seed, shuffle label_path = '' #Random image path from data_path image_paths = sorted(list(paths.list_images(data_path))) seed(10) shuffle(image_paths) #Use folder name in data_path as font name font_names = [] for f in os.listdir(data_path): if not f.startswith('.'): font_names.append(f) font_names.sort() with open(label_path, 'w') as outfile: json.dump(font_names, outfile) print('Font Names -> ', font_names) ``` ### Labeling font by the index of font name in `font_names` ``` def conv_label(label): return font_names.index(label) ``` ### Preprocessing Datasets ``` import os import itertools import numpy as np from keras.preprocessing.image import img_to_array data = [] labels = [] auguments = ["blur", "noise", "affine", "gradient"] for path in image_paths: #Labeling images label = path.split(os.path.sep)[-2] if not label.startswith('.'): label = conv_label(label) else: continue pil_img = pil_image(path) org_img = img_to_array(pil_img) data.append(org_img) labels.append(label) #Random auguments combinations for i in range(0, len(auguments)): for augument in list(itertools.combinations(auguments, i + 1)): temp_img = pil_img combinations = list(augument) for method in combinations: if method == 'noise': temp_img = noise_image(temp_img) elif method == 'blur': temp_img = blur_image(temp_img) elif method == 'affine': open_cv_affine = np.array(pil_img) temp_img = affine_rotation(open_cv_affine) elif method == 'gradient': open_cv_gradient = np.array(pil_img) temp_img = gradient_fill(open_cv_gradient) temp_img = img_to_array(temp_img) data.append(temp_img) labels.append(label) ``` ### Partition Datasets and transform - $\frac{3}{4}$ for training - $\frac{1}{4}$ for testing ``` import numpy as np from sklearn.model_selection import train_test_split from keras.utils import to_categorical #Partition data = np.asarray(data, dtype = "float") / 255.0 labels = np.array(labels) (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size = 0.25, random_state = 10) #Converting labels from integers to vectors trainY = to_categorical(trainY, num_classes = len(font_names)) testY = to_categorical(testY, num_classes = len(font_names)) ``` ### Additional Datasets process - **Variable Character Spacing** when rendering each synthetic image, set the character spacing (by pixel) to be a Gaussian random variable of mean 10 and standard deviation 40, bounded by [0, 50]. - **Variable Aspect Ratio** Before cropping each image into a input patch, the image, with heigh fixed, is squeezed in width by a random ratio, drawn from a uniform distribution between $\frac{5}{6}$ and $\frac{7}{6}$. ``` from keras.preprocessing.image import ImageDataGenerator augmented_images = ImageDataGenerator( rotation_range = 30, width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True ) ``` ### Re-arrange Datasets channels ``` from keras import backend as K K.set_image_data_format('channels_last') ``` ### Create model - **Unsupervised cross-domain sub-network ${C_u}$**, which consists of the first *K* layers of *CNN*. It accounts for extracting low-level visual features shared by both syn- thetic and real-world data domains. ${C_u}$ will be trained in a unsupervised way, using unlabeled data from both domains. It constitutes the crucial step that further minimizes the low-level feature gap, beyond the previous data augmentation efforts. - **Supervised domain-specific sub-network ${C_s}$**, which consists of the remaining *N − K* layers. It accounts for learning higher-level discriminative features for classi- fication, based on the shared features from ${C_u}$. ${C_s}$ will be trained in a supervised way, using labeled data from the synthetic domain only. ``` from keras.models import Sequential from keras.layers.normalization import BatchNormalization from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D , UpSampling2D ,Conv2DTranspose def create_model(): model = Sequential() #Cu Layers model.add(Conv2D(64, kernel_size = (48, 48), activation = 'relu', input_shape = (105, 105, 1))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Conv2D(128, kernel_size = (24, 24), activation = 'relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Conv2DTranspose(128, (24, 24), strides = (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'uniform')) model.add(UpSampling2D(size = (2, 2))) model.add(Conv2DTranspose(64, (12, 12), strides = (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'uniform')) model.add(UpSampling2D(size = (2, 2))) #Cs Layers model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu')) model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu')) model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu')) model.add(Flatten()) model.add(Dense(4096, activation = 'relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation = 'relu')) model.add(Dropout(0.5)) model.add(Dense(2383, activation = 'relu')) model.add(Dense(len(font_names), activation = 'softmax')) return model ``` ### Compile Model ``` from keras import optimizers batch_size = 128 epochs = 50 model= create_model() opt = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True) model.compile(loss = 'mean_squared_error', optimizer = opt, metrics = ['accuracy']) ``` ### Fit and store Model - `model_path` should be defined ``` from keras import callbacks model_path = '' my_callbacks = [ callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 10, verbose = 0, mode = 'min'), callbacks.ModelCheckpoint(model_path, monitor = 'val_loss', verbose = 1, save_best_only = True, mode = 'min') ] model.fit( trainX, trainY, shuffle = True, batch_size = batch_size, epochs = epochs, verbose = 1, validation_data = (testX, testY), callbacks = my_callbacks ) ``` ### Evaluate ``` from keras.models import load_model model_path = '' model = load_model(model_path) score = model.evaluate(testX, testY, verbose = 0) print('Test loss ->', score[0]) print('Test accuracy ->', score[1]) ``` ### Revert font name from labels ``` def rev_conv_label(label): return font_names[label] ``` ### Verify ``` import PIL import numpy as np import matplotlib.cm as cm import matplotlib.pylab as plt from keras.preprocessing.image import img_to_array #Load image and de-noisy tmp_img = roi.copy().convert('L') tmp_img = blur_image(tmp_img) arr_img = img_to_array(tmp_img) #Predict using trained model data = [] data.append(arr_img) data = np.asarray(data, dtype = "float") / 255.0 y = np.argmax(model.predict(data), axis = -1) #Display result label = rev_conv_label(int(y[0])) fig, ax = plt.subplots(1) ax.imshow(roi, interpolation = 'nearest', cmap = cm.gray) ax.text(5, 5, label, bbox = {'facecolor': 'white', 'pad': 8}) plt.show() ```
github_jupyter
# Multivariate Timeseries Classification In this project we have 205 samples of 89 rows each with 14 different features. The problem is similar to activity recognition and is solved using both traditional machine learning using feature engineering and also, using deep learning using LSTM, CNN1D-LSTM and CNN2D-LSTM. ### Import Necessary Files ``` from data_loader import DataLoader from preprocessing import prepare_data from models import FeatureEngineeredModel, DeepLearningModel from utils import plot_confusion_matrix, plot_class_distribution, plot_best_accuracies ``` ### Data File ``` data_file = "challenge_dataset.xlsx" ``` ### Train and Test files ``` train_file = "train.csv" test_file = "test.csv" ``` ### Test size of 20% ``` test_size = 0.2 ``` ### Prepare train and test datasets prepare_data() renames "ID_TestSet" to "id" and splits the data into train and test set and creates two new csv files which are used for training and testing models ``` data = prepare_data(data_file, train_file, test_file, test_size, gen=False) ``` ### Visualize the distribution of different classes ``` classes = data.groupby("goal").id.count() plot_class_distribution(classes) ``` ### Define dataset loader ``` data_loader = DataLoader(train_file, test_file) ``` ### Feature Engineered Model FeatureEngineeredModel in ints constructor extracts and selects the relevant features ``` X_train, y_train, X_test, y_test, train_ids, test_ids = data_loader.get_train_test_data() traditional_model = FeatureEngineeredModel(X_train, y_train, X_test, y_test, train_ids, test_ids) ``` ### Evaluate RandomForest and XGBoost for various num estimators and determine the best model ``` tm_best_model, tm_best_score = traditional_model.evaluate() ``` ### Deep Learning Models - LSTM, CNN1D-LSTM, CNN2D-LSTM ``` X_train, y_train, X_test, y_test, train_ids, test_ids = data_loader.get_train_test_data_norm() deep_learning_model = DeepLearningModel(X_train, y_train, X_test, y_test, train_ids, test_ids) ``` ### Training the three deep learning architectures 10 times to get average and best accuracy ``` dl_best_model, dl_best_score = deep_learning_model.evaluate() ``` ### Plot Confusion Matrices ``` plot_confusion_matrix(traditional_model.get_confusion_matrix(), classes = [0,1,2], title = traditional_model.best_model_name + " Confusion Matrix") plot_confusion_matrix(deep_learning_model.get_confusion_matrix(), classes = [0,1,2], title = deep_learning_model.best_model_name + " Confusion Matrix") ``` ### Comparison of the Accuracy of the best models of the five algorithms or architectures ``` model_names = traditional_model.model_names.copy() model_names.extend(deep_learning_model.model_names) accuracies = traditional_model.scores.copy() accuracies.extend(deep_learning_model.scores) plot_best_accuracies(model_names, accuracies) ```
github_jupyter
### This notebook contains some code for processing the atlas data: 1. Hole filling, masking to prostate, extract individual histology labels (Gleason grade) 2. Interpolate histology-derived data (5mm spacing vs. 2.5mm MRI axial slices) 3. Interpolate to isotropic voxel sizes (0.8 x 0.8 x 0.8 mm^3) 4. Write data to disk and save images for manual review ``` %load_ext autoreload %autoreload 2 import pathlib import SimpleITK as sitk from platipy.imaging.label.utils import get_com from platipy.imaging.utils.vessel import vessel_spline_generation from platipy.imaging import ImageVisualiser from platipy.imaging.registration.utils import smooth_and_resample # import colorcet as cc %matplotlib notebook from birt_utils import ( interpolate_image, interpolate_histology_lesion_probability, generate_sampling_label ) # Set parameters contour_fill_hole_mm = 5 input_dir = pathlib.Path("../../1_data/atlas_data/") case_id_list = sorted([i.name[6:] for i in input_dir.glob("*MRHIST*")]) print(len(case_id_list), case_id_list) """ Simplify the images/labels that we propagate """ labels_linear = [ "TUMOUR_PROBABILITY_GRADE_2+2", "TUMOUR_PROBABILITY_GRADE_3+2", "TUMOUR_PROBABILITY_GRADE_3+3", "TUMOUR_PROBABILITY_GRADE_3+4", "TUMOUR_PROBABILITY_GRADE_4+3", "TUMOUR_PROBABILITY_GRADE_4+4", "TUMOUR_PROBABILITY_GRADE_4+5", "TUMOUR_PROBABILITY_GRADE_5+4", "TUMOUR_PROBABILITY_GRADE_5+5", ] labels_nn = [ "CONTOUR_PROSTATE", "CONTOUR_PZ", "CONTOUR_URETHRA", "LABEL_HISTOLOGY", "LABEL_SAMPLING" ] images_bspline = [ "MRI_T2W_2D", ] images_linear = [ "CELL_DENSITY_MAP", ] images_nn = [ "HISTOLOGY" ] data_names = labels_linear + labels_nn + images_linear + images_nn vals = [] for atlas_id in case_id_list: im = sitk.ReadImage( (input_dir / f"MRHIST{atlas_id}" / "LABELS" / f"MRHIST{atlas_id}_LABEL_HISTOLOGY.nii.gz").as_posix() ) new_vals = np.unique(sitk.GetArrayViewFromImage(im)) print(atlas_id, new_vals) vals += list(new_vals) np.unique(vals) """ Read in data """ hist_value_2p2 = 64 hist_value_3p2 = 96 hist_value_3p3 = 128 hist_value_3p4 = 160 hist_value_4p3 = 192 hist_value_4p4 = 224 hist_value_4p5 = 234 hist_value_5p4 = 244 hist_value_5p5 = 255 atlas_set = {} for atlas_id in case_id_list: print(atlas_id, end=" | ") atlas_set[atlas_id] = {} atlas_set[atlas_id]["ORIGINAL"] = {} # Read MRI atlas_set[atlas_id]["ORIGINAL"]['MRI_T2W_2D'] = sitk.ReadImage( (input_dir / f"MRHIST{atlas_id}" / "IMAGES" / f"MRHIST{atlas_id}_MRI_T2W_2D.nii.gz").as_posix() ) # Resampling functions g_nn = lambda x: sitk.Resample(x, atlas_set[atlas_id]["ORIGINAL"]['MRI_T2W_2D'], sitk.Transform(), sitk.sitkNearestNeighbor) g_linear = lambda x: sitk.Resample(x, atlas_set[atlas_id]["ORIGINAL"]['MRI_T2W_2D'], sitk.Transform(), sitk.sitkLinear) # Read cell density and histology atlas_set[atlas_id]["ORIGINAL"]['CELL_DENSITY_MAP'] = g_linear( sitk.ReadImage( (input_dir / f"MRHIST{atlas_id}" / "IMAGES" / f"MRHIST{atlas_id}_CELL_DENSITY_MAP.nii.gz").as_posix() ) ) atlas_set[atlas_id]["ORIGINAL"]['HISTOLOGY'] = g_nn( sitk.ReadImage( (input_dir / f"MRHIST{atlas_id}" / "IMAGES" / f"MRHIST{atlas_id}_HISTOLOGY.nii.gz").as_posix() ) ) # Read whole prostate contour atlas_set[atlas_id]["ORIGINAL"]['CONTOUR_PROSTATE'] = g_nn( sitk.ReadImage( (input_dir / f"MRHIST{atlas_id}" / "LABELS" / f"MRHIST{atlas_id}_CONTOUR_PROSTATE.nii.gz").as_posix() ) )>0 # Fill holes contour_fillhole_img = [int(contour_fill_hole_mm/i) for i in atlas_set[atlas_id]["ORIGINAL"]['MRI_T2W_2D'].GetSpacing()] atlas_set[atlas_id]["ORIGINAL"]['CONTOUR_PROSTATE'] = sitk.BinaryMorphologicalClosing(atlas_set[atlas_id]["ORIGINAL"]['CONTOUR_PROSTATE'], contour_fillhole_img) # Masking function mask_to_prostate = lambda x: sitk.Mask(x, atlas_set[atlas_id]["ORIGINAL"]['CONTOUR_PROSTATE']) # Read in PZ and urethtra contours (and mask) atlas_set[atlas_id]["ORIGINAL"]['CONTOUR_PZ'] = mask_to_prostate (g_nn( sitk.ReadImage( (input_dir / f"MRHIST{atlas_id}" / "LABELS" / f"MRHIST{atlas_id}_CONTOUR_PZ_INTERP.nii.gz").as_posix() ) ) ) atlas_set[atlas_id]["ORIGINAL"]['CONTOUR_URETHRA'] = mask_to_prostate (g_nn( sitk.ReadImage( (input_dir / f"MRHIST{atlas_id}" / "LABELS" / f"MRHIST{atlas_id}_CONTOUR_URETHRA.nii.gz").as_posix() ) ) ) # Read in histology labels (tumour annotation) atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] = g_nn( sitk.ReadImage( (input_dir / f"MRHIST{atlas_id}" / "LABELS" / f"MRHIST{atlas_id}_LABEL_HISTOLOGY.nii.gz").as_posix() ) ) # Extract out individual labels atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_2+2"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_2p2)) atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_3+2"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_3p2)) atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_3+3"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_3p3)) atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_3+4"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_3p4)) atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_4+3"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_4p3)) atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_4+4"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_4p4)) atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_4+5"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_4p5)) atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_5+4"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_5p4)) atlas_set[atlas_id]["ORIGINAL"]["TUMOUR_PROBABILITY_GRADE_5+5"] = mask_to_prostate(interpolate_histology_lesion_probability(atlas_set[atlas_id]["ORIGINAL"]['LABEL_HISTOLOGY'] == hist_value_5p5)) # Generate sampling label atlas_set[atlas_id]["ORIGINAL"]["LABEL_SAMPLING"] = mask_to_prostate( generate_sampling_label(atlas_set[atlas_id]["ORIGINAL"]['HISTOLOGY']) ) """ Interpolate missing cell density/histology images """ for atlas_id in atlas_set: atlas_set[atlas_id]["ORIGINAL"]['CELL_DENSITY_MAP'] = interpolate_image( sitk.GrayscaleFillhole( sitk.Cast( atlas_set[atlas_id]["ORIGINAL"]['CELL_DENSITY_MAP'], sitk.sitkFloat32 ) ) ) atlas_set[atlas_id]["ORIGINAL"]['HISTOLOGY'] = interpolate_image( sitk.Cast( atlas_set[atlas_id]["ORIGINAL"]['HISTOLOGY'], sitk.sitkVectorFloat32 ) ) """ Resample to 0.8mm (isotropic) voxel size """ f_nn = lambda x: smooth_and_resample(x, isotropic_voxel_size_mm = 0.8, interpolator=sitk.sitkNearestNeighbor) f_linear = lambda x: smooth_and_resample(x, isotropic_voxel_size_mm = 0.8, interpolator=sitk.sitkLinear) f_bspline = lambda x: smooth_and_resample(x, isotropic_voxel_size_mm = 0.8, interpolator=sitk.sitkBSpline) for atlas_id in atlas_set: atlas_set[atlas_id]['RESAMPLED'] = {} for label_name in labels_linear + images_linear: atlas_set[atlas_id]['RESAMPLED'][label_name] = f_linear( atlas_set[atlas_id]['ORIGINAL'][label_name]) for label_name in images_bspline: atlas_set[atlas_id]['RESAMPLED'][label_name] = f_bspline( atlas_set[atlas_id]['ORIGINAL'][label_name]) for label_name in labels_nn + images_nn: atlas_set[atlas_id]['RESAMPLED'][label_name] = f_nn( atlas_set[atlas_id]['ORIGINAL'][label_name]) # Memory saver atlas_set[atlas_id]["ORIGINAL"] = None """ Write atlas data """ for atlas_id in list(atlas_set.keys()): print(atlas_id, end=" | ") output_dir = pathlib.Path(f"../1_processing/ATLAS_DATA_PROCESSED/MRHIST{atlas_id}") (output_dir / "IMAGES").mkdir(exist_ok=True, parents=True) (output_dir / "LABELS").mkdir(exist_ok=True, parents=True) for label_name in labels_linear + labels_nn: sitk.WriteImage(atlas_set[atlas_id]["RESAMPLED"][label_name], str(output_dir / "LABELS" / f"MRHIST{atlas_id}_{label_name}.nii.gz")) for img_name in images_bspline + images_linear + images_nn: if "CELL_DENSITY" in img_name: sitk.WriteImage((8000/255 * atlas_set[atlas_id]["RESAMPLED"]['CELL_DENSITY_MAP'])**1.5, str(output_dir / "IMAGES" / f"MRHIST{atlas_id}_{img_name}.nii.gz")) else: sitk.WriteImage(atlas_set[atlas_id]["RESAMPLED"][img_name], str(output_dir / "IMAGES" / f"MRHIST{atlas_id}_{img_name}.nii.gz")) """ Generate some figures to check data integrity """ figure_dir = pathlib.Path(f"../1_processing/FIGURES_PROCESSING") figure_dir.mkdir(exist_ok=True, parents=True) # 1. Contour check vis = ImageVisualiser(atlas_set[atlas_id]["RESAMPLED"]['MRI_T2W_2D'], cut=get_com(atlas_set[atlas_id]["RESAMPLED"]['CONTOUR_PZ']), figure_size_in=6, window=[0,1200]) vis.add_contour({ 'WG':atlas_set[atlas_id]["RESAMPLED"]['CONTOUR_PROSTATE'], 'PZ':atlas_set[atlas_id]["RESAMPLED"]['CONTOUR_PZ'], 'U':atlas_set[atlas_id]["RESAMPLED"]['CONTOUR_URETHRA'], }, colormap=plt.cm.cool) fig = vis.show() fig.savefig(figure_dir / f"MRHIST{atlas_id}_0_CONTOURS.jpeg", dpi = 300) # 2. CD check vis = ImageVisualiser(atlas_set[atlas_id]["RESAMPLED"]['MRI_T2W_2D'], cut=get_com(atlas_set[atlas_id]["RESAMPLED"]['CONTOUR_PZ']), figure_size_in=6, window=[0,1200]) vis.add_scalar_overlay((8000/255 * atlas_set[atlas_id]["RESAMPLED"]['CELL_DENSITY_MAP'])**1.5, min_value=0, max_value=200000, name='Cell density [mm'+r'$^{-3}$'+']', colormap=plt.cm.gnuplot2, alpha=1) fig = vis.show() fig.savefig(figure_dir / f"MRHIST{atlas_id}_1_CELLDENSITY.jpeg", dpi = 300) # 3. Histology vis = ImageVisualiser(atlas_set[atlas_id]["RESAMPLED"]['HISTOLOGY'], cut=get_com(atlas_set[atlas_id]["RESAMPLED"]['CONTOUR_PZ']), figure_size_in=6) vis.add_contour({ 'SAMPLE (HALF)':atlas_set[atlas_id]["RESAMPLED"]['LABEL_SAMPLING']>=0.5, 'SAMPLE (FULL)':atlas_set[atlas_id]["RESAMPLED"]['LABEL_SAMPLING']<=0.5, }, colormap=plt.cm.cool) fig = vis.show() fig.savefig(figure_dir / f"MRHIST{atlas_id}_2_HISTOLOGY.jpeg", dpi = 300) # 4. Histology annotations vis = ImageVisualiser(atlas_set[atlas_id]["RESAMPLED"]['MRI_T2W_2D'], figure_size_in=6, window=[0,1], projection="median") ctr_dict = { label[-3:]:atlas_set[atlas_id]["RESAMPLED"][label] for label in labels_linear } vis.add_contour(ctr_dict, colormap=plt.cm.jet) fig = vis.show() fig.savefig(figure_dir / f"MRHIST{atlas_id}_3_ANNOTATIONS.jpeg", dpi = 300) # Close plt.close("all") ```
github_jupyter
``` %matplotlib inline %run utils.ipynb import matplotlib.pyplot as plt from matplotlib import colors, ticker # import cartopy.crs as ccrs import pandas as pd import numpy as np import scipy as sp from astropy.table import Table import astropy.units as u import astropy.coordinates as coord import arviz as az import seaborn as sns import kinesis as kn import gapipes as gp plt.style.use(mystyledict) %store -r out_full df = out_full#.loc[out_full['Member_r19']!='other'] print(f"{len(df)} rows, {len(df.columns)} columns") # slices of data gdr2 = df.groupby('in_dr2').get_group(True) df[["in_dr2", "in_leao", "in_meingast", "in_roser"]].fillna(False).groupby(["in_dr2"]).sum() df[["in_dr2", "in_leao", "in_meingast", "in_roser"]].fillna(False).groupby('in_dr2').get_group(False).groupby('in_meingast').sum() df[["in_dr2", "in_leao", "in_meingast", "in_roser"]].fillna(False).groupby('in_dr2').get_group(False).groupby('in_roser').sum() fig, ax = plt.subplots() ax.hist(df['radial_velocity_error'].dropna(), np.logspace(-1,1.2,32)); median_rv_error = df['radial_velocity_error'].median() print(median_rv_error) ax.axvline(median_rv_error, c='k',lw=1); ax.set_xscale('log'); fig, ax = plt.subplots(1, 1, figsize=(4, 2.5), subplot_kw=dict(projection=ccrs.Mollweide())) ax.gridlines( crs=ccrs.Geodetic(), xlocs=[-180, -90, 0, 90, 180], ylocs=[0, 45, 90, -45, -90], linewidth=0.5, zorder=0, ) ax.scatter(df["ra"], df["dec"], s=1, c='k', transform=ccrs.Geodetic()) ax.scatter(gdr2["ra"], gdr2["dec"], s=1, transform=ccrs.Geodetic()) ax.set_global() ax.set_title("Sky distribution") fig.tight_layout() fig.savefig('../plots/hyades-sky.pdf') fig, ax = plt.subplots(1, 1, figsize=(4, 2.5),subplot_kw=dict(projection=ccrs.Mollweide(central_longitude=180))) ax.gridlines( crs=ccrs.Geodetic(), xlocs=[-180, -90, 0, 90, 180], ylocs=[0, 45, 90, -45, -90], linewidth=0.5, zorder=0, ) ax.scatter(df["l"], df["b"], s=1, c='k', transform=ccrs.Geodetic()) ax.scatter(gdr2["l"], gdr2["b"], s=1, transform=ccrs.Geodetic()) ax.set_global() ax.set_title("Galactic (centered on $l=180$)") fig.tight_layout() fig.savefig('../plots/hyades-galactic-distribution.pdf') fig, ax = plt.subplots(1, 2, figsize=(8, 4)) for cax in ax: cax.set_aspect("equal") for dset, color in zip([df, gdr2], ["k", None]): cartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity ax[0].scatter(cartx.x, cartx.y, s=1, c=color) ax[1].scatter(cartx.x, cartx.z, s=1, c=color) for cax in ax: cax.set_xlabel("$X_\mathrm{ICRS}$") ax[0].set_ylabel("$Y_\mathrm{ICRS}$") ax[1].set_ylabel("$Z_\mathrm{ICRS}$") fig.tight_layout() fig.savefig('../plots/hyades-xyz-icrs.pdf') xlims = (-115, 42) ylims = (-145, 186) zlims = (-50, 30) totheight = ((zlims[1]-zlims[0]) + (ylims[1]-ylims[0]))/(xlims[1]-xlims[0]) height_ratio = (ylims[1]-ylims[0])/(zlims[1]-zlims[0]) fig_xsize = 3 fig_ysize = totheight * fig_xsize fig, ax = plt.subplots(2, 1, figsize=(fig_xsize+1., fig_ysize), sharex=True, gridspec_kw={'height_ratios':[height_ratio, .8]}) for cax in ax: cax.set_aspect('equal'); labels = ['cl+tails ({})'.format(len(df)), 'cl ({})'.format(len(gdr2))] for dset, color, label in zip([df, gdr2], ['k', None], labels): cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity ax[0].scatter(cartx.x, cartx.y, s=1, c=color, label=label); ax[1].scatter(cartx.x, cartx.z, s=1, c=color); ax[1].set_xlabel('$X$ [pc]') ax[0].set_ylabel('$Y$ [pc]') ax[1].set_ylabel('$Z$ [pc]'); ax[0].legend(loc='lower right', fontsize=12, markerscale=3, fancybox=False) fig.subplots_adjust(left=0.22, bottom=0.08,top=0.99, right=0.98, hspace=0.01) fig.savefig('../report/plots/hyades-data-dist.pdf') gdr2_rv = df.loc[df['radial_velocity'].notnull()] harps_rv = df.loc[df['RV_HARPS_leao'].notnull()] xlims = (-115, 42) ylims = (-145, 186) zlims = (-50, 30) totheight = ((zlims[1]-zlims[0]) + (ylims[1]-ylims[0]))/(xlims[1]-xlims[0]) height_ratio = (ylims[1]-ylims[0])/(zlims[1]-zlims[0]) fig_xsize = 3 fig_ysize = totheight * fig_xsize fig, ax = plt.subplots(2, 1, figsize=(fig_xsize+1., fig_ysize), sharex=True, gridspec_kw={'height_ratios':[height_ratio, .8]}) for cax in ax: cax.set_aspect('equal'); labels = ['', 'has RV'.format(len(gdr2_rv)), 'has HARPS RV'] for dset, color, label, s in zip([df, gdr2_rv,harps_rv], ['k', None,'tab:red'], labels, [1, 4,1]): cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity ax[0].scatter(cartx.x, cartx.y, s=s, c=color, label=label); ax[1].scatter(cartx.x, cartx.z, s=s, c=color); ax[1].set_xlabel('$X$ [pc]') ax[0].set_ylabel('$Y$ [pc]') ax[1].set_ylabel('$Z$ [pc]'); ax[0].legend(loc='lower right', fontsize=12, markerscale=3, fancybox=False) fig.subplots_adjust(left=0.22, bottom=0.08,top=0.99, right=0.98, hspace=0.01) # fig.savefig('../report/plots/hyades-data-dist-rv.pdf') df[["radial_velocity", "RV_HARPS_leao", "source_id"]].notnull().groupby( ["radial_velocity", "RV_HARPS_leao"] ).agg("count") delta_rv = df["radial_velocity"] - df["RV_HARPS_leao"] delta_rv_sigma = delta_rv / np.hypot(df["radial_velocity_error"], df["eRV_HARPS_leao"]) mean_delta_rv = np.nanmean(delta_rv) mean_delta_rv_sigma = np.nanmean(delta_rv_sigma) print(f"mean delta RV (DR2-HARPS) = {mean_delta_rv:-8.4f}") print(f"mean delta RV (DR2-HARPS) / error = {mean_delta_rv_sigma:-8.4f}") fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) ax1 = sns.distplot( delta_rv[~np.isnan(delta_rv)], ax=ax1, color="k", hist_kws={"lw":0}, kde_kws={"lw": 1}, ) ax1.axvline(0, c="k", lw=1) ax1.set_xlabel(r"$\mathrm{RV}_\mathrm{DR2} - \mathrm{RV}_\mathrm{HARPS}$") ax1.set_ylabel("Density") ax1.text( 0.05, 0.95, f"mean={mean_delta_rv:-.3f} km/s", ha="left", va="top", size=12, transform=ax1.transAxes, ) ax1.set_yticks([0, .5, 1, 1.5, 2.]) ax1.set_ylim(0, 2.2) sns.distplot( delta_rv_sigma[~np.isnan(delta_rv_sigma)], ax=ax2, color="k", hist_kws={"lw":0}, kde_kws={"lw": 1}, ) ax2.axvline(0, c="k", lw=1) ax2.set_xlabel( r"$\mathrm{RV}_\mathrm{DR2} - \mathrm{RV}_\mathrm{HARPS}" r"/ \sqrt{\sigma_\mathrm{RV, DR2}^2+\sigma_\mathrm{RV, HARPS}^2}$" ) ax2.set_ylabel("Density") fig.tight_layout() fig.savefig("../plots/compare-gaia-harps-rv.pdf") mean_cartv_icrs = [-6.03, 45.56, 5.57] vx, vy, vz = mean_cartv_icrs fig, ax = plt.subplots(1, 2, figsize=(8, 4)) for cax in ax: cax.set_aspect("equal") for dset, color in zip([df, gdr2], ["k", None]): cartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_icrs)[:,None] cond = (np.abs(dvx)<5) & (np.abs(dvy)<5) & (np.abs(dvz)<5) # ax[0].scatter(cartx.x, cartx.y, s=1, c=color) ax[0].quiver(cartx.x[cond], cartx.y[cond], dvx[cond], dvy[cond], color=color) ax[1].quiver(cartx.x[cond], cartx.z[cond], dvx[cond], dvz[cond], color=color) for cax in ax: cax.set_xlabel("$X_\mathrm{ICRS}$") ax[0].set_ylabel("$Y_\mathrm{ICRS}$") ax[1].set_ylabel("$Z_\mathrm{ICRS}$") fig.tight_layout() # fig.savefig('../plots/hyades-xyz-vector-icrs.pdf') mean_cartv_galactic = [-42.24, -19.00, -1.48] fig, ax = plt.subplots(1, 2, figsize=(8, 4)) for cax in ax: cax.set_aspect("equal") for dset, color in zip([df, gdr2], ["k", None]): cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_galactic)[:, None] cond = (np.abs(dvx) < 3) & (np.abs(dvy) < 3) & (np.abs(dvz) < 3) # ax[0].scatter(cartx.x, cartx.y, s=1, c=color) ax[0].quiver(cartx.x[cond], cartx.y[cond], dvx[cond], dvy[cond], color=color) ax[1].quiver(cartx.x[cond], cartx.z[cond], dvx[cond], dvz[cond], color=color) for cax in ax: cax.set_xlabel("$X_\mathrm{Galactic}$") ax[0].set_ylabel("$Y_\mathrm{Galactic}$") ax[1].set_ylabel("$Z_\mathrm{Galactic}$") fig.tight_layout() fig.savefig('../plots/hyades-xyz-vector-galactic.pdf') mean_cartv_galactic = [-42.24, -19.00, -1.48] fig, ax = plt.subplots( 3, 3, figsize=(6.5, 6.5), sharex="col", sharey="all" ) dset = df cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_galactic)[:, None] xyz = cartx.xyz.value dvxyz = [dvx, dvy, dvz] for icol in range(3): for irow in range(3): ax[irow, icol].scatter(xyz[icol], dvxyz[irow], s=1) ax[0, 0].set_ylim(-5, 5) for cax in ax.ravel(): cax.set_yticks([-4, -2, 0, 2, 4]) cax.tick_params(width=1, length=6) fig.subplots_adjust(wspace=0.04, hspace=0.04, left=0.15, bottom=0.15, top=0.94) for cax, label in zip(ax[:, 0], ["x", "y", "z"]): cax.set_ylabel( r"$\Delta v_{0}$".format(label) + r" [$\mathrm{km}\,\mathrm{s}^{-1}$]" ) ax[2, 0].set_xlabel("$X$ [pc]") ax[2, 1].set_xlabel("$Y$ [pc]") ax[2, 2].set_xlabel("$Z$ [pc]") fig.suptitle( "Residual velocities vs. position (Galactic) $N$={}/{}".format( (~np.isnan(dvx)).sum(), len(df) ), size=15 ) fig.subplots_adjust(right=0.98, left=0.1, bottom=0.1) # fig.savefig("../plots/residual-velocity-vs-position-galactic.pdf") error_summary = pd.DataFrame( dict( pmra_error_frac=np.abs(df["pmra_error"] / df["pmra"]), pmdec_error_frac=np.abs(df["pmdec_error"] / df["pmdec"]), parallax_error_frac=np.abs(df["parallax_error"] / df["parallax"]), ) ).describe() error_summary pmdelta = np.hypot( *(df_gfr[['pmra', 'pmdec']].values - df[['pmra', 'pmdec']].values).T) plt.scatter(df['phot_g_mean_mag'], pmdelta, s=4); plt.xlabel('$G$ [mag]') plt.ylabel(r'$\Delta \mu$'); deltav = np.hypot((df_gfr.g.vra-df.g.vra).values, (df_gfr.g.vdec-df.g.vdec).values) plt.scatter(df['phot_g_mean_mag'], deltav, s=4); plt.xlabel('$G$ [mag]') plt.ylabel(r'$\Delta v_{\mathrm{tan}}$'); mean_cartv_icrs = [-6.03, 45.56, 5.57] fig, ax = plt.subplots( 3, 3, figsize=(6.5, 6.5), sharex="col", sharey="all" ) dset = df cartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_icrs)[:, None] xyz = cartx.xyz.value dvxyz = [dvx, dvy, dvz] for icol in range(3): for irow in range(3): ax[irow, icol].scatter(xyz[icol], dvxyz[irow], s=1) ax[0, 0].set_ylim(-5, 5) for cax in ax.ravel(): cax.set_yticks([-4, -2, 0, 2, 4]) cax.tick_params(width=1, length=6) fig.subplots_adjust(wspace=0.04, hspace=0.04, left=0.15, bottom=0.15, top=0.85) for cax, label in zip(ax[:, 0], ["x", "y", "z"]): cax.set_ylabel(r"$\Delta v_{0}$".format(label)+r" [$\mathrm{km}\,\mathrm{s}^{-1}$]") ax[2,0].set_xlabel("$X$ [pc]") ax[2,1].set_xlabel("$Y$ [pc]") ax[2,2].set_xlabel("$Z$ [pc]") fig.suptitle( "Residual velocities vs. position (ICRS) $N$={}/{}".format( (~np.isnan(dvx)).sum(), len(df) ), size=15 ) fig.subplots_adjust(right=0.98, left=0.1, bottom=0.1, top=0.94) # fig.savefig("../plots/residual-velocity-vs-position-icrs.pdf") fig, ax = plt.subplots(1, 1) ax.set_xlabel("$G$ [mag]") n_bright_sources = (df["phot_g_mean_mag"] < 12).sum() print(n_bright_sources) ax.hist( df["phot_g_mean_mag"], bins=np.linspace(0, 20, 21), histtype="step", color="k", label="all (N={})".format(len(df)), ) ax.hist( df.dropna(subset=["radial_velocity"])["phot_g_mean_mag"], bins=np.linspace(0, 20, 21), histtype="step", label="has Gaia RV (N={})".format(df["radial_velocity"].notna().sum()), ) ax.hist( df.dropna(subset=["RV_HARPS_leao"])["phot_g_mean_mag"], bins=np.linspace(0, 20, 21), histtype="step", label="has HARPS RV (N={})".format(df["RV_HARPS_leao"].notna().sum()), ) ax.legend(loc="upper left", fontsize=10, frameon=False); ax.set_ylabel('Count'); df = out_full.loc[out_full["Member_r19"] != "other"] fig, ax = plt.subplots() ax.scatter( df["bp_rp"], df["phot_g_mean_mag"] + df.g.distmod, s=1, c='k' ) ax.invert_yaxis() ax.set_xlabel("BP-RP [mag]") ax.set_ylabel("$M_G$ [mag]"); # get tgas data for velocity uncertainty comparison hy_tgas = pd.read_csv("../data/reino_tgas_full.csv", index_col=0) print(f"number of sources in Reino selection: {len(hy_tgas)} rows") tmp = pd.concat( [ hy_tgas.g.vra_error.rename("v").to_frame().assign(label=r"TGAS $v_\alpha$"), hy_tgas.g.vdec_error.rename("v").to_frame().assign(label=r"TGAS $v_\delta$"), df.g.vra_error.rename("v").to_frame().assign(label=r"DR2 $v_\alpha$"), df.g.vdec_error.rename("v").to_frame().assign(label=r"DR2 $v_\delta$"), # df.g.vra_error.rename('v').to_frame().assign(label='HG vra'), # df.g.vdec_error.rename('v').to_frame().assign(label='HG vdec'), df["radial_velocity_error"].rename("v").to_frame().assign(label="DR2 RV"), df["eRV_HARPS_leao"].rename("v").to_frame().assign(label="HARPS RV"), ] ) tmp["v"] = np.log10(tmp["v"]) tmp.groupby('label').describe() g = sns.FacetGrid(tmp, row="label", aspect=5, height=0.8) g.map(sns.kdeplot, "v", clip_on=False, shade=True, alpha=1, lw=1.5, bw=0.2) g.set_titles("") g.fig.subplots_adjust(hspace=0.1, top=0.95, right=0.95, left=0.05, bottom=0.12) g.set(xticklabels=["0.001", "0.01", "0.1", "1", "10"], xticks=[-3, -2, -1, 0, 1]) g.set(yticks=[]) for cax, label in zip(g.fig.axes, g.row_names): cax.spines["left"].set_visible(False) cax.tick_params(length=5, labelsize=12) cax.text(0.95, 0.95, label, ha='right', va='top', transform=cax.transAxes, bbox=dict(facecolor='w'), size=12) cax.axvline(np.log10(0.3), c='k', lw=1, linestyle=':', zorder=-1); g.fig.axes[-1].set_xlabel(r'$\log \sigma_v\,/\,[\mathrm{km}\,\mathrm{s}^{-1}$]'); g.fig.savefig("../plots/hyades-velocity-uncertainty-distribution.pdf") cl_center_icrs_cart = [] ```
github_jupyter
``` import torch import torch.nn as nn import torchvision.transforms as transforms from torch.utils.data import DataLoader import torchvision from networks import * from advertorch.attacks import LinfPGDAttack, GradientSignAttack, LinfBasicIterativeAttack, CarliniWagnerL2Attack, MomentumIterativeAttack, SpatialTransformAttack, LinfSPSAAttack, JacobianSaliencyMapAttack from blackbox.CNN3 import CNN num_classes=10 #torch.cuda.set_device(0) #model = resnet(num_classes=num_classes,depth=110) model = sixNet() #mdoel = model.cuda() BBox = CNN() if True: model = nn.DataParallel(model,device_ids=[0,2,1,3]).cuda() BBox = nn.DataParallel(BBox, device_ids=[0,2,1,3]).cuda() #Loading Trained Model Bb_file = './saved_model/model_Blackbox_model_mnist' softmax_filename= './saved_model/model_pretrain_model_mnist' #filename= 'Models_PCL/CIFAR10_PCL.pth.tar' robust_model= './saved_model/model_posttrain_model_mnist_prox' checkpoint = torch.load(robust_model) Bb = torch.load(Bb_file) model.load_state_dict(checkpoint) BBox.load_state_dict(Bb) model.eval() BBox.eval() device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # Loading Test Data (Un-normalized) transform_test = transforms.Compose([transforms.ToTensor()]) testset = torchvision.datasets.MNIST(root='./file', train=False, download=True, transform=transform_test) test_loader = torch.utils.data.DataLoader(testset, batch_size=1000, pin_memory=True, shuffle=False, num_workers=4) eps = 0.4 pgd02 = LinfPGDAttack( BBox,loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=eps, nb_iter=10, eps_iter=eps/10, rand_init=True, clip_min=0, clip_max=1, targeted=False) fsgm = GradientSignAttack(BBox,loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=eps, clip_min=0, clip_max=1) MIM1 = MomentumIterativeAttack(BBox,loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=eps, nb_iter=10, eps_iter=eps/10, clip_min=0, clip_max=1, targeted=False) BIM = LinfBasicIterativeAttack(BBox, loss_fn=nn.CrossEntropyLoss(reduction="sum"),eps=eps, nb_iter=10, eps_iter=eps/10, clip_min=0, clip_max=1, targeted=False) CW = CarliniWagnerL2Attack(BBox,loss_fn=nn.CrossEntropyLoss(reduction="sum"), num_classes=10, learning_rate=0.01, max_iterations=1000, initial_const=10, clip_min=0, clip_max=1) attacks={'pgd':pgd02, "mim":MIM1, "fsgm":fsgm, "BIM":BIM, "CW":CW} model.eval() for attack_name in attacks: correct = 0 size = 0 n = 0 for tedata, tetarget in test_loader: n += 1 size += tedata.shape[0] tedata, tetarget = tedata.to(device), tetarget.to(device) tedata = attacks[attack_name].perturb(tedata, tetarget).to(device) output = model(tedata) pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(tetarget.view_as(pred)).sum().item() print("{:s} acc: {:.2f}".format(attack_name, 100. * correct / size)) ```
github_jupyter
``` %matplotlib inline import numpy as np import pandas as pd import pymc3 as pm import matplotlib.pyplot as plt import seaborn as sns import arviz as az import warnings warnings.filterwarnings('ignore') ``` ## Archimedes procedure for porous material density determination Experimental procedure is as follow, for further detail refer to standard ASTM D7263. 1. Measure temperature of liquid to determine its ($\rho_{\text{water}}$). Find object theoretical full density ( $\rho_{\text{solid}}$ ) and wax density ($\rho_{wax}$) from datasheet 2. Measure mass of sample(s) ($M_t$) 3. Apply two coats of wax to the sample(s) 4. Measure mass of Coated sample(s) ($M_c$) 5. Measure mass of Coated Sample(s) submerged ($M_{sub}$) \begin{equation} \text{density}, \rho_b = \frac{M_t}{\frac{(M_c - M_{sub})}{\rho_{water}} - \frac{(M_c - M_t)}{\rho_{wax}}} \end{equation} \begin{equation} \text{true density}, \pi_d = \frac{ \rho_b}{\rho_{\text{solid}}} * 100 \end{equation} Parts using two powder type are made. Parts labeled bimodal should have a higher density then parts labelled unimodal. ## Display first few row of excel sheet ``` data = pd.read_excel('./data/green_sample_benchmark.xlsx', sheet_name = 'wax', index_col= [0,1]) oil_density = pd.read_excel('./data/green_sample_benchmark.xlsx', sheet_name = 'wax_param', index_col= 0) data.head() ``` ## Display first few row of excel sheet ``` oil_density.head() ``` ## Carrier fluid density Density of the carrier fluid ($\rho_{\text{water}}$) is necessary for density calculation. Since the density of the fluid fluctuate depending on temperature and temperature varies from experiment to experiment, it is recorded for each experiment. For experiment 2 it was recorded at the beginning and at the end. ``` def plot_hist(data, b): fig, axes = plt.subplots(1,3) axes[0].hist(data['dry weight'], bins = b, density = True) axes[0].set_title('dry weight') axes[0].set_xlabel('weight (g)') axes[0].set_ylabel('%') axes[1].hist(data['wet coated'], bins = b, density = True) axes[1].set_title('wet weight') axes[1].set_xlabel('weight (g)') axes[1].set_ylabel('%') axes[2].hist(data['coated weight'], bins = b, density = True) axes[2].set_title('coated weight') axes[2].set_xlabel('weight (g)') axes[2].set_ylabel('%') plt.tight_layout() plt.show() plot_hist(data, 20) data = data.dropna() ``` ## Compute density Each sample was measured 3 times at each stage. Since we are only interested in the uncertainty in the true density ($\pi_d$), a cartesian product of each measured weights (dry, wet and coated) and liquid density is done for each sample in order to compute true density. ``` from itertools import product from sklearn import preprocessing density_df = pd.DataFrame(columns = ['batch', 'id', 'density', 'operator', 'powder']) # density of stainless steel 316L (g/cc) taken from data sheet den_ss316 = 8.0 unique_id = 0 # iterate over each sample to do cartesian product for idx in data.index.unique(): sample = data.loc[idx] op = int(np.unique(sample['operator']).squeeze()) powder = str(np.unique(sample['powder type']).squeeze()) batch = idx[0] d_weight = sample['dry weight'].to_numpy() wc_weight = sample['wet coated'].to_numpy() c_weight = sample['coated weight'].to_numpy() exp_num = sample['exp'] # density of liquid and wax taken from datasheet water_den = np.unique(oil_density.loc[exp_num]['water density'].to_numpy()) wax_den = np.unique(oil_density.loc[exp_num]['wax density'].to_numpy()) # cartesian product prod = product(d_weight, wc_weight, c_weight, water_den, wax_den) for d_w, wc_w, c_w, water_d, wax_d in prod: #compute true density refer to equation on top den = (d_w/((c_w - wc_w)/water_d - (c_w - d_w)/wax_d))/den_ss316*100 new_entry = { 'batch' : batch, 'id' : unique_id, 'density' : float(den.squeeze()), 'operator' : op, 'powder' : powder } density_df = density_df.append(new_entry, ignore_index = True) unique_id += 1 ax = sns.histplot(density_df['density']) density_df.to_csv('./data/density_wax.csv', index=False) basic_model = pm.Model() n_part = unique_id powder_mean = density_df.groupby('powder').mean()['density'] part_id = density_df['id'].to_numpy().astype(np.int32) obs_den = density_df['density'].to_numpy().astype(np.int32) # Relabel data to integers powder_enc = preprocessing.LabelEncoder() powder_enc.fit(['Unimodal','Bimodal']) powder_data = powder_enc.transform(density_df['powder']) #compute starting point for mean inference test_val_mu = [powder_mean[c] for c in powder_enc.classes_] A = 10000 with basic_model: # setup independent mu_i mu_p = pm.Uniform('muP', 20.0, 60.0, shape = 2, testval = test_val_mu) mu_real = pm.math.switch(powder_data, mu_p[1], mu_p[0]) # setup prior for part variance and error variance sig_repeat = pm.HalfCauchy('sig_repeat', 25) sig_part = pm.HalfCauchy('sig_part', 25) # each part will vary independently (nesting relationship) P_t = pm.Normal('P_t', mu = 0, sigma = 1, shape = n_part) # setup mu mu = pm.Deterministic('mu', mu_real + P_t[part_id]*sig_part) # add error and data to model density = pm.Normal('density', mu = mu, sigma = sig_repeat, observed = obs_den) with basic_model: trace = pm.sample(A, chains = 4, tune = 2000, target_accept=0.9) prior = pm.sample_prior_predictive() posterior_predictive = pm.sample_posterior_predictive(trace) ``` Convert trace to arviz data format for diagnostic ``` trace_data_az = az.from_pymc3( trace=trace, prior=prior, posterior_predictive=posterior_predictive, model=basic_model ) trace_data_az.to_netcdf('./data/wax_trace.nc') ``` ## Trace plot The sampler is exploring the probability space by sampling different values of the model input parameters ($\mu_i, \sigma_{\text{part}}, \sigma_{\text{gage}}$). The left graph shows the probability density of the sampled values. The right graph shows the sampled with respect to sample number. The left graph is computed by summing up the right graph. When the left graph looks skewed towards zero or the right graph does not resemble a thick caterpillar, this indicates a biased sampling and a lack of convergence. ``` pm.traceplot(trace, var_names=['muP','sig_part','sig_repeat']) plt.show() ``` ## Predictive check plot Predictive posterior plot overlays data generated by the model's posterior over the real data. This is useful to check the fitness of the model. ``` az.plot_ppc(trace_data_az, data_pairs={"density":"density"}, kind = 'scatter') plt.show() ``` ## Forest plot Forest plot on estimated real mean $\mu_{i}$ to check if the values make sense ``` pm.plots.forestplot(trace, var_names=['muP']) plt.show() ``` ## Compute split $\hat{R}$ In the Markov-Chain Monte Carlo (MCMC) sampler, the sampling is done simultaneously in multiple processes (chain). The rank normalized $\hat{R}$ checks for convergence by comparing the variance between chain with the variance within chain. If convergence has been achieved then the variance should be the same ($\hat{R}=1$) (Vehtari et al). [Vehtari et al.](https://arxiv.org/pdf/1903.08008v1.pdf "vehtari") recommends a $\hat{R} < 1.01$. ``` az.rhat(trace_data_az) ``` ## Calculate highest density interval (HDI) Taken from [arviz doc](https://arviz-devs.github.io/arviz/api/generated/arviz.hdi.html#arviz.hdi "arviz") : The HDI is the minimum width Bayesian credible interval (BCI). ``` hdi_data = az.hdi(trace_data_az) hdi_data.to_netcdf('./data/wax_inference.nc') hdi_data ```
github_jupyter
# Scaling up ML using Cloud AI Platform In this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud AI Platform. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates *how* to package up a TensorFlow model to run it within Cloud AI Platform. Later in the course, we will look at ways to make a more effective machine learning model. ## Environment variables for project and bucket Note that: <ol> <li> Your project id is the *unique* string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: <b>Project ID:</b> cloud-training-demos </li> <li> Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket. </li> </ol> <b>Change the cell below</b> to reflect your Project ID and bucket name. ``` !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst import os PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 # For Python Code # Model Info MODEL_NAME = 'taxifare' # Model Version MODEL_VERSION = 'v1' # Training Directory name TRAINING_DIR = 'taxi_trained' # for bash os.environ['PROJECT'] = PROJECT os.environ['BUCKET'] = BUCKET os.environ['REGION'] = REGION os.environ['MODEL_NAME'] = MODEL_NAME os.environ['MODEL_VERSION'] = MODEL_VERSION os.environ['TRAINING_DIR'] = TRAINING_DIR os.environ['TFVERSION'] = '2.5' # Tensorflow version %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION ``` ## Packaging up the code Take your code and put into a standard Python package structure. <a href="taxifare/trainer/model.py">model.py</a> and <a href="taxifare/trainer/task.py">task.py</a> containing the Tensorflow code from earlier (explore the <a href="taxifare/trainer/">directory structure</a>). ``` %%bash find ${MODEL_NAME} %%bash cat ${MODEL_NAME}/trainer/model.py ``` ## Find absolute paths to your data Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you ``` %%bash echo "Working Directory: ${PWD}" echo "Head of taxi-train.csv" head -1 $PWD/taxi-train.csv echo "Head of taxi-valid.csv" head -1 $PWD/taxi-valid.csv ``` ## Running the Python module from the command-line #### Clean model training dir/output dir ``` %%bash # This is so that the trained model is started fresh each time. However, this needs to be done before rm -rf $PWD/${TRAINING_DIR} %%bash # Setup python so it sees the task module which controls the model.py export PYTHONPATH=${PYTHONPATH}:${PWD}/${MODEL_NAME} # Currently set for python 2. To run with python 3 # 1. Replace 'python' with 'python3' in the following command # 2. Edit trainer/task.py to reflect proper module import method python -m trainer.task \ --train_data_paths="${PWD}/taxi-train*" \ --eval_data_paths=${PWD}/taxi-valid.csv \ --output_dir=${PWD}/${TRAINING_DIR} \ --train_steps=1000 --job-dir=./tmp %%bash ls $PWD/${TRAINING_DIR}/export/exporter/ %%writefile ./test.json {"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2} %%bash sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete %%bash # This model dir is the model exported after training and is used for prediction # model_dir=$(ls ${PWD}/${TRAINING_DIR}/export/exporter | tail -1) # predict using the trained model gcloud ai-platform local predict \ --model-dir=${PWD}/${TRAINING_DIR}/export/exporter/${model_dir} \ --json-instances=./test.json ``` #### Clean model training dir/output dir ``` %%bash # This is so that the trained model is started fresh each time. However, this needs to be done before rm -rf $PWD/${TRAINING_DIR} ``` ## Running locally using gcloud ``` %%bash # Use Cloud Machine Learning Engine to train the model in local file system gcloud ai-platform local train \ --module-name=trainer.task \ --package-path=${PWD}/${MODEL_NAME}/trainer \ -- \ --train_data_paths=${PWD}/taxi-train.csv \ --eval_data_paths=${PWD}/taxi-valid.csv \ --train_steps=1000 \ --output_dir=${PWD}/${TRAINING_DIR} %%bash ls $PWD/${TRAINING_DIR} ``` ## Submit training job using gcloud First copy the training data to the cloud. Then, launch a training job. After you submit the job, go to the cloud console (http://console.cloud.google.com) and select <b>AI Platform | Jobs</b> to monitor progress. <b>Note:</b> Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job. ``` %%bash # Clear Cloud Storage bucket and copy the CSV files to Cloud Storage bucket echo $BUCKET gsutil -m rm -rf gs://${BUCKET}/${MODEL_NAME}/smallinput/ gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/${MODEL_NAME}/smallinput/ %%bash OUTDIR=gs://${BUCKET}/${MODEL_NAME}/smallinput/${TRAINING_DIR} JOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S) echo $OUTDIR $REGION $JOBNAME # Clear the Cloud Storage Bucket used for the training job gsutil -m rm -rf $OUTDIR gcloud ai-platform jobs submit training $JOBNAME \ --region=$REGION \ --module-name=trainer.task \ --package-path=${PWD}/${MODEL_NAME}/trainer \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --scale-tier=BASIC \ --runtime-version 2.3 \ --python-version 3.5 \ -- \ --train_data_paths="gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-train*" \ --eval_data_paths="gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-valid*" \ --output_dir=$OUTDIR \ --train_steps=10000 ``` Don't be concerned if the notebook appears stalled (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. <b>Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.</b> ``` %%bash gsutil ls gs://${BUCKET}/${MODEL_NAME}/smallinput ``` ## Train on larger dataset I have already followed the steps below and the files are already available. <b> You don't need to do the steps in this comment. </b> In the next chapter (on feature engineering), we will avoid all this manual processing by using Cloud Dataflow. Go to http://bigquery.cloud.google.com/ and type the query: <pre> SELECT (tolls_amount + fare_amount) AS fare_amount, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers, 'nokeyindata' AS key FROM [nyc-tlc:yellow.trips] WHERE trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 AND ABS(HASH(pickup_datetime)) % 1000 == 1 </pre> Note that this is now 1,000,000 rows (i.e. 100x the original dataset). Export this to CSV using the following steps (Note that <b>I have already done this and made the resulting GCS data publicly available</b>, so you don't need to do it.): <ol> <li> Click on the "Save As Table" button and note down the name of the dataset and table. <li> On the BigQuery console, find the newly exported table in the left-hand-side menu, and click on the name. <li> Click on "Export Table" <li> Supply your bucket name and give it the name train.csv (for example: gs://cloud-training-demos-ml/taxifare/ch3/train.csv). Note down what this is. Wait for the job to finish (look at the "Job History" on the left-hand-side menu) <li> In the query above, change the final "== 1" to "== 2" and export this to Cloud Storage as valid.csv (e.g. gs://cloud-training-demos-ml/taxifare/ch3/valid.csv) <li> Download the two files, remove the header line and upload it back to GCS. </ol> <p/> <p/> <h2> Run Cloud training on 1-million row dataset </h2> This took 60 minutes and uses as input 1-million rows. The model is exactly the same as above. The only changes are to the input (to use the larger dataset) and to the Cloud MLE tier (to use STANDARD_1 instead of BASIC -- STANDARD_1 is approximately 10x more powerful than BASIC). At the end of the training the loss was 32, but the RMSE (calculated on the validation dataset) was stubbornly at 9.03. So, simply adding more data doesn't help. ``` %%bash OUTDIR=gs://${BUCKET}/${MODEL_NAME}/${TRAINING_DIR} JOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S) CRS_BUCKET=cloud-training-demos # use the already exported data echo $OUTDIR $REGION $JOBNAME gsutil -m rm -rf $OUTDIR gcloud ai-platform jobs submit training $JOBNAME \ --region=$REGION \ --module-name=trainer.task \ --package-path=${PWD}/${MODEL_NAME}/trainer \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --scale-tier=STANDARD_1 \ --runtime-version 2.3 \ --python-version 3.5 \ -- \ --train_data_paths="gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/train.csv" \ --eval_data_paths="gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/valid.csv" \ --output_dir=$OUTDIR \ --train_steps=100000 ``` ## Challenge Exercise Modify your solution to the challenge exercise in d_trainandevaluate.ipynb appropriately. Make sure that you implement training and deployment. Increase the size of your dataset by 10x since you are running on the cloud. Does your accuracy improve? Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
# Riskfolio-Lib Tutorial: <br>__[Financionerioncios](https://financioneroncios.wordpress.com)__ <br>__[Orenji](https://www.orenj-i.net)__ <br>__[Riskfolio-Lib](https://riskfolio-lib.readthedocs.io/en/latest/)__ <br>__[Dany Cajas](https://www.linkedin.com/in/dany-cajas/)__ <a href='https://ko-fi.com/B0B833SXD' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://cdn.ko-fi.com/cdn/kofi1.png?v=2' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> ## Tutorial 14: Mean [Ulcer Index](https://en.wikipedia.org/wiki/Ulcer_index) Portfolio Optimization ## 1. Downloading the data: ``` import numpy as np import pandas as pd import yfinance as yf import warnings warnings.filterwarnings("ignore") pd.options.display.float_format = '{:.4%}'.format # Date range start = '2016-01-01' end = '2019-12-30' # Tickers of assets assets = ['JCI', 'TGT', 'CMCSA', 'CPB', 'MO', 'APA', 'MMC', 'JPM', 'ZION', 'PSA', 'BAX', 'BMY', 'LUV', 'PCAR', 'TXT', 'TMO', 'DE', 'MSFT', 'HPQ', 'SEE', 'VZ', 'CNP', 'NI', 'T', 'BA'] assets.sort() # Downloading data data = yf.download(assets, start = start, end = end) data = data.loc[:,('Adj Close', slice(None))] data.columns = assets # Calculating returns Y = data[assets].pct_change().dropna() display(Y.head()) ``` ## 2. Estimating Mean Ulcer Index Portfolios ### 2.1 Calculating the portfolio that maximizes Ulcer Performance Index (UPI) ratio. ``` import riskfolio as rp # Building the portfolio object port = rp.Portfolio(returns=Y) # Calculating optimal portfolio # Select method and estimate input parameters: method_mu='hist' # Method to estimate expected returns based on historical data. method_cov='hist' # Method to estimate covariance matrix based on historical data. port.assets_stats(method_mu=method_mu, method_cov=method_cov, d=0.94) # Estimate optimal portfolio: model='Classic' # Could be Classic (historical), BL (Black Litterman) or FM (Factor Model) rm = 'UCI' # Risk measure used, this time will be variance obj = 'Sharpe' # Objective function, could be MinRisk, MaxRet, Utility or Sharpe hist = True # Use historical scenarios for risk measures that depend on scenarios rf = 0 # Risk free rate l = 0 # Risk aversion factor, only useful when obj is 'Utility' w = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist) display(w.T) ``` ### 2.2 Plotting portfolio composition ``` # Plotting the composition of the portfolio ax = rp.plot_pie(w=w, title='Sharpe Mean Ulcer Index', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) ``` ### 2.3 Calculate efficient frontier ``` points = 40 # Number of points of the frontier frontier = port.efficient_frontier(model=model, rm=rm, points=points, rf=rf, hist=hist) display(frontier.T.head()) # Plotting the efficient frontier label = 'Max Risk Adjusted Return Portfolio' # Title of point mu = port.mu # Expected returns cov = port.cov # Covariance matrix returns = port.returns # Returns of the assets ax = rp.plot_frontier(w_frontier=frontier, mu=mu, cov=cov, returns=returns, rm=rm, rf=rf, alpha=0.05, cmap='viridis', w=w, label=label, marker='*', s=16, c='r', height=6, width=10, ax=None) # Plotting efficient frontier composition ax = rp.plot_frontier_area(w_frontier=frontier, cmap="tab20", height=6, width=10, ax=None) ``` ## 3. Estimating Risk Parity Portfolios for Ulcer Index ### 3.1 Calculating the risk parity portfolio for Ulcer Index. ``` b = None # Risk contribution constraints vector w_rp = port.rp_optimization(model=model, rm=rm, rf=rf, b=b, hist=hist) display(w.T) ``` ### 3.2 Plotting portfolio composition ``` ax = rp.plot_pie(w=w_rp, title='Risk Parity Ulcer Index', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) ``` ### 3.3 Plotting Risk Composition ``` ax = rp.plot_risk_con(w_rp, cov=port.cov, returns=port.returns, rm=rm, rf=0, alpha=0.01, color="tab:blue", height=6, width=10, ax=None) ```
github_jupyter
``` import numpy as np from scipy import io img_list=[] image_names = io.loadmat('images.mat')['images'] image_attributes = io.loadmat('attributeLabels_continuous.mat')['labels_cv'] for i in range(image_names.shape[0]): img_list.append(image_names[i][0][0]) att_dict = dict(zip(img_list, image_attributes)) res101 = io.loadmat('../../resnet-feats/SUN/res101.mat') att_splits = io.loadmat('att_splits.mat') train_loc = 'train_loc' val_loc = 'val_loc' test_loc = 'test_unseen_loc' train_images = res101['image_files'][np.squeeze(att_splits[train_loc]-1)] val_images = res101['image_files'][np.squeeze(att_splits[val_loc]-1)] test_images = res101['image_files'][np.squeeze(att_splits[test_loc]-1)] class_labels = res101['labels'] prior_matrix = att_splits['att'] test_classes = class_labels[np.squeeze(att_splits[test_loc]-1)] prior_matrix_ts = prior_matrix[:,(np.unique(test_classes)-1)] print 'Train images = ' + str(len(train_images)) + '\nVal images = ' + str(len(val_images)) + '\nTest images = ' + str(len(test_images)) train_att=np.zeros([train_images.shape[0], image_attributes.shape[1]]) for i in range(train_images.shape[0]): img_name=train_images[i][0][0].split('images/')[1] train_att[i] = np.round(att_dict[img_name]) val_att=np.zeros([val_images.shape[0], image_attributes.shape[1]]) for i in range(val_images.shape[0]): img_name=val_images[i][0][0].split('images/')[1] val_att[i] = np.round(att_dict[img_name]) test_att=np.zeros([test_images.shape[0], image_attributes.shape[1]]) for i in range(test_images.shape[0]): img_name=test_images[i][0][0].split('images/')[1] test_att[i] = np.round(att_dict[img_name]) corr_train = np.corrcoef(train_att.transpose()) nans = np.isnan(corr_train) corr_train[nans] = 0 corr_test = np.corrcoef(prior_matrix_ts) nans = np.isnan(corr_test) corr_test[nans] = 0 def diff_corr(corr_train, corr_test): dis_corr = (corr_train - corr_test) dis_corr = np.sign(corr_train)*dis_corr return dis_corr.clip(0,np.inf) dis_corr = diff_corr(corr_train, corr_test) loc = np.unravel_index(np.argsort(-dis_corr, axis=None)[:100], dis_corr.shape) dis_corr[loc] dis_corr.mean() ``` <br> <br> <br> ### New split for SUN dataset ``` res101['image_files'] all_img = att_dict.keys() cls = [img.split('/')[1] for img in all_img] len(np.unique(cls)) res101['labels'] att_splits = io.loadmat('att_splits.mat') #ZSL_GBU data image_names = np.array([x[0][0].split('images/')[1] for x in res101['image_files']]) class_labels = res101['labels'] name2class = dict(zip(image_names, np.squeeze(class_labels))) class2name = {} for n in name2class: c = name2class[n] if c not in class2name: class2name[c] = [n] else: class2name[c].append(n) def diff_corr(corr_train, corr_test): dis_corr = (corr_train - corr_test) dis_corr = np.sign(corr_train)*dis_corr return dis_corr.clip(0,np.inf) def get_corr_shift(att_dict, class2name, train_class, test_class): test_att_mat = [] train_att_mat = [] for cls in train_class: for img in class2name[cls]: train_att_mat.append(att_dict[img]) for cls in test_class: for img in class2name[cls]: test_att_mat.append(att_dict[img]) # print '#Train instances = ' + str(len(train_att_mat)) + '\n#Test instances = ' + str(len(test_att_mat)) train_att_mat = np.array(train_att_mat).transpose() test_att_mat = np.array(test_att_mat).transpose() corr_train = np.corrcoef(train_att_mat) corr_train[np.isnan(corr_train)] = 0. corr_test = np.corrcoef(test_att_mat) corr_test[np.isnan(corr_test)] = 0. dis_corr = diff_corr(corr_train, corr_test) # correlation shift score: # 1) mean # corr_shift_score = np.mean(dis_corr) # 2) average of top n% dis_corr_array = dis_corr.flatten() top_percentage = 100 num_elements = int((top_percentage/100.)*len(dis_corr_array)) corr_shift_score = np.mean(dis_corr_array[np.argsort(dis_corr_array)[-num_elements:]]) return corr_shift_score train_classes = np.unique([name2class['/'.join(tr[0][0].split('/')[8:])] for tr in train_images]) val_classes = np.unique([name2class['/'.join(val[0][0].split('/')[8:])] for val in val_images]) test_classes = np.unique([name2class['/'.join(te[0][0].split('/')[8:])] for te in test_images]) get_corr_shift(att_dict, class2name, train_classes, test_classes) import time selected_classes = [] remaining_classes = range(718) remaining_classes.remove(0) start_time = time.time() for i in range(72): best_cls = '' best_score = -1. for cls in remaining_classes: new_sel = selected_classes[:] new_sel.append(cls) new_rem = remaining_classes[:] new_rem.remove(cls) shift_score = get_corr_shift(att_dict, class2name, new_rem, new_sel) if shift_score > best_score: best_score = shift_score best_cls = cls selected_classes.append(best_cls) remaining_classes.remove(best_cls) print str(i+1) + ') ' + str(selected_classes[-1]) + ' -> ' + str(best_score) + ' :' + str(time.time() - start_time)+ 's' test_set = selected_classes[:] train_val = remaining_classes[:] all_classes = range(718) all_classes.remove(0) train_val = all_classes[:] for cls in all_classes: if cls in test_set: train_val.remove(cls) len(train_val) selected_classes = [] remaining_classes = train_val[:] for i in range(65): best_cls = '' best_score = -1. for cls in remaining_classes: new_sel = selected_classes[:] new_sel.append(cls) new_rem = remaining_classes[:] new_rem.remove(cls) shift_score = get_corr_shift(att_dict, class2name, new_rem, test_set) if shift_score > best_score: best_score = shift_score best_cls = cls selected_classes.append(best_cls) remaining_classes.remove(best_cls) print str(i+1) + ') ' + str(selected_classes[-1]) + ' -> ' + str(best_score) train_set = remaining_classes[:] val_set = selected_classes[:] print get_corr_shift(att_dict, class2name, train_classes, test_classes) print get_corr_shift(att_dict, class2name, train_set, test_set) print get_corr_shift(att_dict, class2name, train_classes, test_classes) print get_corr_shift(att_dict, class2name, train_set, test_set) test_count = 0 for cls in test_set: test_count += len(class2name[cls]) test_count split_dict = {} split_dict['train_cls'] = train_set split_dict['val_cls'] = val_set split_dict['test_cls'] = test_set import pickle with open('sun_cs_split.npy', 'wb') as fp: np.save(fp, split_dict) sd = np.load('sun_cs_split.npy', allow_pickle=True).item() sd min(train_set+val_set+test_set) train_loc = [] val_loc = [] test_loc = [] for i, label in enumerate(class_labels): if label in sd['train_cls']: train_loc.append(i) elif label in sd['val_cls']: val_loc.append(i) elif label in sd['test_cls']: test_loc.append(i) ```
github_jupyter
# Phase Kickback (фазовый откат?) В деталях об этом явлении можно почитать в [qiskit texbook](https://qiskit.org/textbook/ch-algorithms/grover.html). Для нас оно будет нужно, чтобы сконветировать "классическую функцию" (оракул) $f$ в функцию определённого вида. ## Фаза Для начала, напомним, что такое **фаза**. Существуют состояния-суперпозиции, которые при измерении дают нам одинаковые вероятности наблюдения, например $|\psi_+\rangle=|+\rangle=\frac{1}{\sqrt{2}}(|0\rangle+|1\rangle)$ и $|\psi_-\rangle=|-\rangle=\frac{1}{\sqrt{2}}(|0\rangle-|1\rangle)$ Если вы попытаетесь из измерить, то различить их не удастся. Тем не менее мы знаем, что если применить к каждому из них вентиль Адамара, то мы получим уже разные результаты! А вообще их бесконечно много. В общем виде все такие равноверятные состояния можно записать как $|\psi\rangle=\frac{1}{\sqrt{2}}(|0\rangle+e^{i\phi}|1\rangle)$, где $\phi$ -- и есть фаза. Приведённые выше примеры -- частные случаи для $\phi=0, \pi$. Но именно они нам и будут интересны. NB! Непосредственно фазу наблюдать мы не можем, но она может влиять не результаты последующих вычислений. ## Собственные векторы (состояния) и значения (фазы) Собственным (для матрицы) называется так вектор, который она не в состоянии "испортить". Самое страшное что матрица-оператор может сделать с вектором -- умножить его на **какую-нибудь константу** (эту константу и называют собственным значением). Сколько вы не применяйте $X$ к состоянию $|+\rangle$, оно останется неизменным. У матрицы 2x2 не больше чем 2 собственных вектора. Для оператора $NOT$ ими являются векторы-состояния $|+\rangle$ и $|-\rangle$ с собственными значениями 1 и -1 соответственно. $X|-\rangle = X\frac{1}{\sqrt{2}}(|0\rangle-|1\rangle)=\frac{1}{\sqrt{2}}(|1\rangle-|0\rangle)=-\frac{1}{\sqrt{2}}(|0\rangle-|1\rangle)=-|-\rangle$. Этим интересными свойствами и пользуется phase kickback: Если мы применим контролируемый оператор (в нашем случае $CNOT$) к собственному состоянию (например, $|-\rangle$), то собственное значение для этого состояния запрыгнет в **фазу управляющего кубита (или даже регистра)**. Такого мы не ожидали! Мы думали, что управляющий кубит (или регистр) неизменны, но это не так. Это математическая особенность нам будет очень на руку при реализации алгоритма Гровера. Вот так например можно найти собственные вектора и значения для поизвольных операторов: ``` ## TODO compute eigenstates on X import numpy as np X = np.array([[0., 1], [1, 0]]) evals, evecs = np.linalg.eig(X) for i in range(X.shape[0]): print("Собственное значение:", evals[i], end='\t') print("Собственный вектор:", evecs[:, i]) ``` Как проверить этот эффект на $CNOT$? Очень просто. Если подготовить: - управляющий кубит в $|+\rangle$, - управляемый кубит в $|-\rangle$, то после применения CNOT можно ожидать, что управляющий $\frac{1}{\sqrt{2}}(|0\rangle+|1\rangle)$ превратится в $\frac{1}{\sqrt{2}}(|0\rangle+(-1)|1\rangle)=|-\rangle$. А значит, применив $H$, мы должны будем получить $|1\rangle$! ``` from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, Aer, execute from qiskit.visualization import plot_histogram import matplotlib.pyplot as plt qr = QuantumRegister(2, "q") cr = ClassicalRegister(1, "c") qc = QuantumCircuit(qr, cr) # Чтобы увидеть эффект, подготовим состояние |+> в управляющем кубите. # Если бы не работал, мы бы увидели - |0> -> |+> -> |0>, # Но мы увидим |0> -> |+> -> (!)|-> -> |1> ################################# ## TODO ################################# qc.barrier() # подгтовим состояние |-> - собственное для X in qr[1] ################################# ## TODO ################################# qc.barrier() # Controlled NOT qc.cx(0, 1) qc.barrier() # готовим однозначно измеримое состояние ################################# ## TODO ################################# qc.measure(qr[0], cr[0]) print(qc.draw()) job = execute(qc, Aer.get_backend('qasm_simulator'), shots=1) counts = job.result().get_counts(qc) plot_histogram(counts) ``` [решение](./Q65R_Phase_Kickback_Solution.ipynb)
github_jupyter
# Language Translation In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French. ## Get the Data Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import problem_unittests as tests source_path = 'data/small_vocab_en' target_path = 'data/small_vocab_fr' source_text = helper.load_data(source_path) target_text = helper.load_data(target_path) ``` ## Explore the Data Play around with view_sentence_range to view different parts of the data. ``` view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()}))) sentences = source_text.split('\n') word_counts = [len(sentence.split()) for sentence in sentences] print('Number of sentences: {}'.format(len(sentences))) print('Average number of words in a sentence: {}'.format(np.average(word_counts))) print() print('English sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) print() print('French sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) ``` ## Implement Preprocessing Function ### Text to Word Ids As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function `text_to_ids()`, you'll turn `source_text` and `target_text` from words to ids. However, you need to add the `<EOS>` word id at the end of `target_text`. This will help the neural network predict when the sentence should end. You can get the `<EOS>` word id by doing: ```python target_vocab_to_int['<EOS>'] ``` You can get other word ids using `source_vocab_to_int` and `target_vocab_to_int`. ``` def get_id_text(input, vocab_to_int): return [[vocab_to_int[word] for word in sentence.split()] for sentence in input] def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int): """ Convert source and target text to proper word ids :param source_text: String that contains all the source text. :param target_text: String that contains all the target text. :param source_vocab_to_int: Dictionary to go from the source words to an id :param target_vocab_to_int: Dictionary to go from the target words to an id :return: A tuple of lists (source_id_text, target_id_text) """ source_sentences = [sentence for sentence in source_text.split('\n')] target_sentences = [sentence + ' <EOS>' for sentence in target_text.split('\n')] source_id_text = get_id_text(source_sentences, source_vocab_to_int) target_id_text = get_id_text(target_sentences, target_vocab_to_int) return source_id_text, target_id_text """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_text_to_ids(text_to_ids) ``` ### Preprocess all the data and save it Running the code cell below will preprocess all the data and save it to file. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ helper.preprocess_and_save_data(source_path, target_path, text_to_ids) ``` # Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np import helper import problem_unittests as tests (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() ``` ### Check the Version of TensorFlow and Access to GPU This will check to make sure you have the correct version of TensorFlow and access to a GPU ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf from tensorflow.python.layers.core import Dense # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) ``` ## Build the Neural Network You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below: - `model_inputs` - `process_decoder_input` - `encoding_layer` - `decoding_layer_train` - `decoding_layer_infer` - `decoding_layer` - `seq2seq_model` ### Input Implement the `model_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders: - Input text placeholder named "input" using the TF Placeholder name parameter with rank 2. - Targets placeholder with rank 2. - Learning rate placeholder with rank 0. - Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0. - Target sequence length placeholder named "target_sequence_length" with rank 1 - Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0. - Source sequence length placeholder named "source_sequence_length" with rank 1 Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) ``` def model_inputs(): """ Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences. :return: Tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length) """ inputs = tf.placeholder(tf.int32, [None, None], name='input') targets = tf.placeholder(tf.int32, [None, None], name='targets') learning_rate = tf.placeholder(tf.float32, name='learning_rate') keep_prob = tf.placeholder(tf.float32, name='keep_prob') target_sequence_length = tf.placeholder(tf.int32, [None], name='target_sequence_length') max_target_len = tf.reduce_max(target_sequence_length,name='max_target_len') source_sequence_length = tf.placeholder(tf.int32, [None], name='source_sequence_length') return inputs, targets, learning_rate, keep_prob, target_sequence_length, max_target_len, source_sequence_length """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_model_inputs(model_inputs) ``` ### Process Decoder Input Implement `process_decoder_input` by removing the last word id from each batch in `target_data` and concat the GO ID to the begining of each batch. ``` def process_decoder_input(target_data, target_vocab_to_int, batch_size): """ Preprocess target data for encoding :param target_data: Target Placehoder :param target_vocab_to_int: Dictionary to go from the target words to an id :param batch_size: Batch Size :return: Preprocessed target data """ go = target_vocab_to_int['<GO>'] ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) return tf.concat([tf.fill([batch_size, 1], go), ending], 1) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_process_encoding_input(process_decoder_input) ``` ### Encoding Implement `encoding_layer()` to create a Encoder RNN layer: * Embed the encoder input using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence) * Construct a [stacked](https://github.com/tensorflow/tensorflow/blob/6947f65a374ebf29e74bb71e36fd82760056d82c/tensorflow/docs_src/tutorials/recurrent.md#stacking-multiple-lstms) [`tf.contrib.rnn.LSTMCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/LSTMCell) wrapped in a [`tf.contrib.rnn.DropoutWrapper`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/DropoutWrapper) * Pass cell and embedded input to [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn) ``` from imp import reload reload(tests) def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size): """ Create encoding layer :param rnn_inputs: Inputs for the RNN :param rnn_size: RNN Size :param num_layers: Number of layers :param keep_prob: Dropout keep probability :param source_sequence_length: a list of the lengths of each sequence in the batch :param source_vocab_size: vocabulary size of source data :param encoding_embedding_size: embedding size of source data :return: tuple (RNN output, RNN state) """ embedded_encoder_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size) def make_cell(rnn_size): lstm = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2)) drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob) return drop cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)]) rnn_out, rnn_state = tf.nn.dynamic_rnn(cell, embedded_encoder_input, sequence_length=source_sequence_length, dtype=tf.float32) return rnn_out, rnn_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_encoding_layer(encoding_layer) ``` ### Decoding - Training Create a training decoding layer: * Create a [`tf.contrib.seq2seq.TrainingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/TrainingHelper) * Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder) * Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode) ``` def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_summary_length, output_layer, keep_prob): """ Create a decoding layer for training :param encoder_state: Encoder State :param dec_cell: Decoder RNN Cell :param dec_embed_input: Decoder embedded input :param target_sequence_length: The lengths of each sequence in the target batch :param max_summary_length: The length of the longest sequence in the batch :param output_layer: Function to apply the output layer :param keep_prob: Dropout keep probability :return: BasicDecoderOutput containing training logits and sample_id """ training_helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length) training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, encoder_state, output_layer) output, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder, maximum_iterations = max_summary_length) return output """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer_train(decoding_layer_train) ``` ### Decoding - Inference Create inference decoder: * Create a [`tf.contrib.seq2seq.GreedyEmbeddingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/GreedyEmbeddingHelper) * Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder) * Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode) ``` def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob): """ Create a decoding layer for inference :param encoder_state: Encoder state :param dec_cell: Decoder RNN Cell :param dec_embeddings: Decoder embeddings :param start_of_sequence_id: GO ID :param end_of_sequence_id: EOS Id :param max_target_sequence_length: Maximum length of target sequences :param vocab_size: Size of decoder/target vocabulary :param decoding_scope: TenorFlow Variable Scope for decoding :param output_layer: Function to apply the output layer :param batch_size: Batch size :param keep_prob: Dropout keep probability :return: BasicDecoderOutput containing inference logits and sample_id """ infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference( output_fn, encoder_state, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size) infer_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, decoder_fn=infer_decoder_fn, scope=decoding_scope) return infer_logits """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer_infer(decoding_layer_infer) ``` ### Build the Decoding Layer Implement `decoding_layer()` to create a Decoder RNN layer. * Embed the target sequences * Construct the decoder LSTM cell (just like you constructed the encoder cell above) * Create an output layer to map the outputs of the decoder to the elements of our vocabulary * Use the your `decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob)` function to get the training logits. * Use your `decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob)` function to get the inference logits. Note: You'll need to use [tf.variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope) to share variables between training and inference. ``` def decoding_layer(dec_input, encoder_state, target_sequence_length, max_target_sequence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, decoding_embedding_size): """ Create decoding layer :param dec_input: Decoder input :param encoder_state: Encoder state :param target_sequence_length: The lengths of each sequence in the target batch :param max_target_sequence_length: Maximum length of target sequences :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :param target_vocab_size: Size of target vocabulary :param batch_size: The size of the batch :param keep_prob: Dropout keep probability :param decoding_embedding_size: Decoding embedding size :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ with tf.variable_scope('decoding') as decoding_scope: dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers) dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, input_keep_prob=keep_prob, output_keep_prob=keep_prob) output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope) with tf.variable_scope('decoding') as decoding_scope: train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) with tf.variable_scope('decoding', reuse=True) as decoding_scope: infer_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], sequence_length - 1, vocab_size, decoding_scope, output_fn, keep_prob) return train_logits, infer_logits """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_decoding_layer(decoding_layer) ``` ### Build the Neural Network Apply the functions you implemented above to: - Encode the input using your `encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size)`. - Process target data using your `process_decoder_input(target_data, target_vocab_to_int, batch_size)` function. - Decode the encoded input using your `decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size)` function. ``` def seq2seq_model(input_data, target_data, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sentence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int): """ Build the Sequence-to-Sequence part of the neural network :param input_data: Input placeholder :param target_data: Target placeholder :param keep_prob: Dropout keep probability placeholder :param batch_size: Batch Size :param source_sequence_length: Sequence Lengths of source sequences in the batch :param target_sequence_length: Sequence Lengths of target sequences in the batch :param source_vocab_size: Source vocabulary size :param target_vocab_size: Target vocabulary size :param enc_embedding_size: Decoder embedding size :param dec_embedding_size: Encoder embedding size :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput) """ enc_inputs = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size) enc_state = encoding_layer(enc_inputs, rnn_size, num_layers, keep_prob) dec_inputs = process_decoding_input(target_data, target_vocab_to_int, batch_size) dec_embeddings = tf.Variable(tf.truncated_normal([target_vocab_size, dec_embedding_size], stddev=0.01)) dec_embed_inputs = tf.nn.embedding_lookup(dec_embeddings, dec_inputs) train_logits, infer_logits = decoding_layer( dec_embed_inputs, dec_embeddings, enc_state, target_vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob ) return train_logits, infer_logits """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_seq2seq_model(seq2seq_model) ``` ## Neural Network Training ### Hyperparameters Tune the following parameters: - Set `epochs` to the number of epochs. - Set `batch_size` to the batch size. - Set `rnn_size` to the size of the RNNs. - Set `num_layers` to the number of layers. - Set `encoding_embedding_size` to the size of the embedding for the encoder. - Set `decoding_embedding_size` to the size of the embedding for the decoder. - Set `learning_rate` to the learning rate. - Set `keep_probability` to the Dropout keep probability - Set `display_step` to state how many steps between each debug output statement ``` # Number of Epochs epochs = 7 # Batch Size batch_size = 256 # RNN Size rnn_size = 512 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 10 decoding_embedding_size = 10 # Learning Rate learning_rate = 0.001 # Dropout Keep Probability keep_probability = 0.7 display_step = 256 ``` ### Build the Graph Build the graph using the neural network you implemented. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ save_path = 'checkpoints/dev' (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() max_target_sentence_length = max([len(sentence) for sentence in source_int_text]) train_graph = tf.Graph() with train_graph.as_default(): input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs() #sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length') input_shape = tf.shape(input_data) train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, source_sequence_length, target_sequence_length, max_target_sequence_length, len(source_vocab_to_int), len(target_vocab_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int) training_logits = tf.identity(train_logits.rnn_output, name='logits') inference_logits = tf.identity(inference_logits.sample_id, name='predictions') masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( training_logits, targets, masks) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) ``` Batch and pad the source and target sequences ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ def pad_sentence_batch(sentence_batch, pad_int): """Pad sentences with <PAD> so that each sentence of a batch has the same length""" max_sentence = max([len(sentence) for sentence in sentence_batch]) return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch] def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int): """Batch targets, sources, and the lengths of their sentences together""" for batch_i in range(0, len(sources)//batch_size): start_i = batch_i * batch_size # Slice the right amount for the batch sources_batch = sources[start_i:start_i + batch_size] targets_batch = targets[start_i:start_i + batch_size] # Pad pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int)) pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int)) # Need the lengths for the _lengths parameters pad_targets_lengths = [] for target in pad_targets_batch: pad_targets_lengths.append(len(target)) pad_source_lengths = [] for source in pad_sources_batch: pad_source_lengths.append(len(source)) yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths ``` ### Train Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ def get_accuracy(target, logits): """ Calculate accuracy """ max_seq = max(target.shape[1], logits.shape[1]) if max_seq - target.shape[1]: target = np.pad( target, [(0,0),(0,max_seq - target.shape[1])], 'constant') if max_seq - logits.shape[1]: logits = np.pad( logits, [(0,0),(0,max_seq - logits.shape[1])], 'constant') return np.mean(np.equal(target, logits)) # Split data to training and validation sets train_source = source_int_text[batch_size:] train_target = target_int_text[batch_size:] valid_source = source_int_text[:batch_size] valid_target = target_int_text[:batch_size] (valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source, valid_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(epochs): for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate( get_batches(train_source, train_target, batch_size, source_vocab_to_int['<PAD>'], target_vocab_to_int['<PAD>'])): _, loss = sess.run( [train_op, cost], {input_data: source_batch, targets: target_batch, lr: learning_rate, target_sequence_length: targets_lengths, source_sequence_length: sources_lengths, keep_prob: keep_probability}) if batch_i % display_step == 0 and batch_i > 0: batch_train_logits = sess.run( inference_logits, {input_data: source_batch, source_sequence_length: sources_lengths, target_sequence_length: targets_lengths, keep_prob: 1.0}) batch_valid_logits = sess.run( inference_logits, {input_data: valid_sources_batch, source_sequence_length: valid_sources_lengths, target_sequence_length: valid_targets_lengths, keep_prob: 1.0}) train_acc = get_accuracy(target_batch, batch_train_logits) valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits) print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}' .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_path) print('Model Trained and Saved') ``` ### Save Parameters Save the `batch_size` and `save_path` parameters for inference. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params(save_path) ``` # Checkpoint ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess() load_path = helper.load_params() ``` ## Sentence to Sequence To feed a sentence into the model for translation, you first need to preprocess it. Implement the function `sentence_to_seq()` to preprocess new sentences. - Convert the sentence to lowercase - Convert words into ids using `vocab_to_int` - Convert words not in the vocabulary, to the `<UNK>` word id. ``` def sentence_to_seq(sentence, vocab_to_int): """ Convert a sentence to a sequence of ids :param sentence: String :param vocab_to_int: Dictionary to go from the words to an id :return: List of word ids """ # TODO: Implement Function return None """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_sentence_to_seq(sentence_to_seq) ``` ## Translate This will translate `translate_sentence` from English to French. ``` translate_sentence = 'he saw a old yellow truck .' """ DON'T MODIFY ANYTHING IN THIS CELL """ translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_path + '.meta') loader.restore(sess, load_path) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('predictions:0') target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0') source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0') keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size, target_sequence_length: [len(translate_sentence)*2]*batch_size, source_sequence_length: [len(translate_sentence)]*batch_size, keep_prob: 1.0})[0] print('Input') print(' Word Ids: {}'.format([i for i in translate_sentence])) print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence])) print('\nPrediction') print(' Word Ids: {}'.format([i for i in translate_logits])) print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits]))) ``` ## Imperfect Translation You might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. For this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data. You can train on the [WMT10 French-English corpus](http://www.statmt.org/wmt10/training-giga-fren.tar). This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project. ## Submitting This Project When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_language_translation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
github_jupyter
## Quality control/Exploratory data analysis Notebook By: Megan Grout (groutm2020@alumni.ohsu.edu) Adapted from code written by Dr. Marilyne Labrie and Nick Kendsersky Last updated: 20200527 Import external libraries. ``` import os import random import re import subprocess import pandas as pd import numpy as np import seaborn as sb import matplotlib.pyplot as plt import matplotlib.colors as mplc from scipy import signal import plotly.figure_factory as ff import plotly import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.express as px init_notebook_mode(connected = True) ``` Import function written for this project. ``` from cycif_modules import * ``` Define function to change header names. Not encapsutated in `cycif_modules`, so that user can change on the fly as necessary. ``` # This may change for each experiment, so I have not sequestered # this code in the cycif_modules.py file # This function takes in a dataframe, changes the names # of the column in various ways, and returns the dataframe. # For best accuracy and generalizability, the code uses # regular expressions (regex) to find strings for replacement. def apply_header_changes(df): # remove lowercase x at beginning of name df.columns = df.columns.str.replace("^x","") # remove space at beginning of name df.columns = df.columns.str.replace("^ ","") # replace space with underscore df.columns = df.columns.str.replace(" ","_") # fix typos #df.columns = df.columns.str.replace("typo","correct_name") return df ``` ## Begin Workflow ### Get directories ``` # Base directory for project base_dir = '' # Set name for of project # for use in directory creation project_name = '' # Set string for current step, and for previous step # for use in file and direcotry naming step_suffix = 'bs' previous_step_suffix_long = "_qc_eda" # Initial input data directory #input_data_dir = r'/Users/groutm/Desktop/TMAdata' #input_data_dir = r'/Users/groutm/Desktop/ww_data' input_data_dir = os.path.join(base_dir, project_name + previous_step_suffix_long) # BS directory #output_data_dir = r'/Users/groutm/Desktop/TMAoutputdata' #output_data_dir = r'/Users/groutm/Desktop/ww_outputdata' output_data_dir = os.path.join(base_dir, project_name + "_" + step_suffix) # BS images subdirectory #output_images_dir = r'/Users/groutm/Desktop/TMAimages' #output_images_dir = r'/Users/groutm/Desktop/wwimages' output_images_dir = os.path.join(output_data_dir,"images") # Metadata directories metadata_dir = os.path.join(base_dir, project_name + "_metadata") metadata_images_dir = os.path.join(metadata_dir,"images") # Create necessary directories for this step, if they don't already exist for d in [base_dir, input_data_dir, output_data_dir, output_images_dir, metadata_dir, metadata_images_dir]: if not os.path.exists(d): os.makedirs(d) # Change directory to location of input files os.chdir(input_data_dir) ``` Create list of samples for use in this step of workflow. Do not include file extensions or steps labels. ``` # Provide list of samples whose files we want to read int # Needs to be a list of strings, which serve as bases for # input file names. Input files will be derived from base # sample names, previous step substring, and filetype # extension ls_samples = ['TMA'] ``` List of columns that are not marker intensities. It is okay if any of these are not actually present in a given dataframe. ``` not_intensities = ['replicate_ID', 'cell_type', 'Nucleus_Roundness', 'Nucleus_Size', 'Cell_Size', 'Nuc_X', 'not','Nuc_X_Inv','Cell_ID','Nuc_Y_Inv','ROI_slide','ROI_index','Nuc_Y', 'cluster'] ``` ## Import segmentation files First, ascertain header of first sample's input file. This information will be used as a template against which all other input data files' headers will be tested. ``` # Read in the first row of the file correpsonding to the first sample (index = 0) # in ls_samples # We do not need to specify a directory, since we earlier changed # the current working directory to be that containing these files filename = ls_samples[0] + previous_step_suffix_long + ".csv" # Read in only the first line df = pd.read_csv(filename, index_col = 0, nrows = 1) # Verify that the ID column in input file became the index # For segmentation files, we need the first column to be the # cell index. In later steps, the cell index will actually not # be a proper dataframe data column, but the index of the saved # dataframe from the previous step. if df.index.name != "ID": print("Expected the first column in input file (index_col = 0) " "to be 'ID'. This column will be used to set the index names" "(cell number for each sample). It appears that the column '" + df.index.name + "' was actually the imported as the index " "column.") # Apply the changes to the headers as specified in above funciton df = apply_header_changes(df) # Set variable to hold default header values expected_headers = df.columns.values ``` For this entry point into the workflow, we expect the first column to be the ID index. ``` df.index.name ``` FYI - What are the headers in our dataframe? ``` print("Used " + ls_samples[0] + ".csv to determine the expected, corrected headers for all files.") print("There headers are: \n" + ", ".join([h for h in expected_headers]) + ".") ``` #### Import segmentation files for analysis ``` # Set dictionary to hold all individual sample data dfs = {} # iterate through each sample in our list of samples for sample in ls_samples: # open the file # set the index to be the first (0-based indexing, so 0th) # column in input file. df = pd.read_csv('{}.csv'.format(sample), index_col = 0)#, #nrows = 500) # use nrows = # to specify number of input rows if you want # Check for empty df # if so, don't continue trying to process df if df.shape[0] == 0: print('Zero content lines detected in ' + sample + ' file.' 'Removing from analysis...') # Remove from list, so further steps won't be looking # for data on this sample. # Note that for lists, we do not need to re-assign # the list when removing an item, i.e., we do not say # 'ls_samples = ls_samples.remove(sample)', since this # operation does not return anything. ls_samples.remove(sample) continue # Verify that the loaded df are the right length # commenting out because this code did not work on all # machines during testing (failed one PC, succeeded with # one PC and one MacBook) try: verify_line_no(sample + ".csv", df.shape[0] + 1) except: pass # adding 1 because we expect the header was detected # during file import and not counted towards length of df # Manipulations necessary for concatenation df = apply_header_changes(df) # sort them alphanetically df = df[[x for x in sorted(df.columns.values)]] # Compare headers of new df against what is expected compare_headers(expected_headers, df.columns.values, sample) # Add Sample_ID column and set it equal to sample name for sample df['Sample_ID'] = sample # For cases where we have samples called TMA1.1, TMA1.2, TMA1.3, etc. # Using regular expressions (regex) to extract the characters in the # sample name from TMA to the following digits, stopping at the period #if 'ROI_index' in df.columns.values: # df['ROI_slide'] = re.findall(r'(TMA\d+)',sample)[0] # Add to dictionary of dfs dfs[sample] = df #Merge dfs into one big df df = pd.concat(dfs.values(), ignore_index=False , sort = False) # remove dfs from memory, since its big (relatively) and we # don't need a data struture of all samples' data separated # individually when we can extract information from the big # df using the Sample_ID column del dfs # set index to Sample_ID + cell number df = df.copy().reset_index(drop=True) index = [] # Iterate through each sample, and extract from the big # df just the rows corresponding to that sample. Then, # reassign the cell index based off of the Sample_ID value # and the row number within that chunk. Save that information # in a list of indices for sample in ls_samples: df_chunk = df.loc[df['Sample_ID'] == sample,:].copy() old_index = df_chunk.index df_chunk = df_chunk.reset_index(drop=True) df_chunk = df_chunk.set_index(f'{sample}_Cell_' + df_chunk.index.astype(str)) index = index + df_chunk.index.values.tolist() # Use our list of indices to reassign the big df index df.index = index # Remove the 'level_0' and 'index' columns that resulted # from the above steps. This is not removing the actual index # of the df, just a data column CALLED index. df = df.loc[:,~df.columns.isin(['level_0','index'])] ``` Let's take a look at a few features to make sure our dataframe is as expected. We want to make sure the data import and aggregation steps worked well. ``` df.index df.shape ``` Check for NaN entries (should not be any unless columns do not align), which can result from stitching together dfs with different values in their headers. ``` # if there are any null values, then print names of columns containing # null values if df.isnull().any().any(): print(df.columns[df.isnull().any()]) #in 'if' statement, false means no NaN entries True means NaN entries ``` Check that all expected files were imported into final dataframe by comparing our sample names to the unique values in the Sample_ID column. ``` if sorted(df.Sample_ID.unique()) == sorted(ls_samples): print("All expected filenames present in big df Sample_ID column.") else: compare_headers(['no samples'], df.Sample_ID.unique(), "big df Sample_ID column") ``` List of header values that are not intensities. Can include items that aren't in a given header. Need to save `not_intensities` list for future reference. ``` fn = os.path.join(metadata_dir,"not_intensities.csv") # If this file already exists, add only not_intensities items not already present in file if os.path.exists(fn): print("'not_intensities.csv' already exists.") print("Reconciling file and Jupyter notebook lists.") # Open file as read-only, extract data fh = open(fn, "r") file_ni = fh.read().splitlines() # Set difference to identify items not already in file to_add = set(not_intensities) - set(file_ni) # We want not_intensities to the a complete list not_intensities = list(set(file_ni) | set(not_intensities)) fh.close() # Open file for appending, writing new items fh = open(fn, "a") for item in to_add: fh.write(item +"\n") fh.close() # The file does not yet exist else: print("Could not find " + fn + ". Creating now.") # Open file for writing (will over-write exisiting file), # write all items fh = open(fn, "w") for item in not_intensities: fh.write(item + "\n") fh.close() ``` ### Drop unwanted columns Here, we are dropping a number of columns that we are totally uninterested in. For example, in the current workflow of QI Tissue, we can either export all columns (all markers in all cell components--cell, nucleus, cytoplasm) or individually check each and every one we want. It is faster and easier for the user, and maybe less error-prone, to export all columns and then drop those we are unintersted in here. Not every marker is expected to express in every location; this is why we might drop certain columns. Likewise, we may only be intersted in Average intensity in some features and Maximum intensity in others. ``` # For development purposes, we kept all marker columns in the Cell and that were Intensity Averages. # So the columns we want to keep: # not_intensities, and any intensity column that contains 'Intensity_Average' # We will be listing those columns we want to keep. Alternatively, you could name the columns you want to drop, # or a mixture of both tactics. # To get the 'Intensity_Average' columns, we use list comprehension: # first get a list of all df columns not in 'not_intensities', aka, # those that ARE intensities, 'x for x in df....' # Then, we only include them if they contain 'Intensity_Average', # "...if 'Intensity_Average' in x" ## Explain how to add more, beyond Cell_Intensity_Average, etc. to_keep = not_intensities \ + [x for x in df.columns.values[~df.columns.isin(not_intensities)] if 'Cell_Intensity_Average' in x] # If there are more columns we want to keep, we could include them by # adding them to our 'to_keep' list # to_keep.append(another_column) # NOTE - do NOT reassign this to to_keep (to_keep = to_keep.append(item)), # since the return value is None, for some reason. So you would be saying: # to_keep = to_keep.append(item) # to_keep = None # to_keep --> would display 'None' # to _keep = to_keep + [list, of, columns] # here, you DO ressign (list = list + other_list) # In order to extract only the columns we want from our big df, # we need to only ask for those that are IN the df. # Our to_keep list contains items that might not be in our df headers! # These items are from our not_intensities list. So let's ask for only those items # from to_keep that are actually found in our df df = df[[x for x in to_keep if x in df.columns.values]] # What if we want to drop certain markers by name? # Drop specific markers #df = df.drop(columns = []) ``` Let's take a look at column names to make sure they are as expected. ``` df.columns.values ``` ### Nucleus size analysis #### Distribution plots ``` # Plot only cells where nucleus_size is [0, 500] make_distr_plot_per_sample( title = "Initial dataframe nucleus sizes - 500 cutoff", location = output_images_dir, dfs = [df], df_names = ["Initial dataframe"], colors = ["blue"], x_label = "Nucleus Size", legend = False, xlims = [0,500], markers = ['Nucleus_Size']) # Plot only cells where nucleus_size is [0, 100] make_distr_plot_per_sample(title = "Initial dataframe nucleus sizes to 100", location = output_images_dir, dfs = [df], df_names = ["Initial dataframe"], colors = ["blue"], x_label = "Nucleus Size", legend = False, xlims = [0,100], markers = ['Nucleus_Size']) ``` #### Peak analysis Find valleys between peaks in nucleus size data - unfinished, but left here in case it aids future development. ``` # Unfinished, but could consider using the following function m = signal.find_peaks(df["Nucleus_Size"], prominence = 10, threshold = 20) m[0].shape ``` #### Quantiles Get quantiles (5th, 50th, 95th) ``` qs = [0.05,0.50,0.95] # list of nucleus size percentiles to extract # Extract quantiles nuc_sizes = pd.DataFrame(df["Nucleus_Size"].quantile(q=qs)) nuc_sizes['quantiles'] = nuc_sizes.index nuc_sizes = nuc_sizes.reset_index().drop(columns = ['index']) # Display df nuc_sizes ## Save these data to file filename = "nuc_quantile_sizes.csv" filename = os.path.join(output_data_dir,filename) nuc_sizes.to_csv(filename, index = False) ``` #### Nucleus size and other feature scatter plot Scatter plot – to be most informative, ideally this would be cell size vs nucleus size, where color = nucleus roundness. Not all data used to develop workflow had all necessary features, so the actual data plotted below may not be terribly useful. ``` # Set string variables title = "Nucleus size by cell size for initial dataframe" x_label = "Cell Size" y_label = "Nucleus Size" # cell size - weewin data only has Nuc size! # Create figure fig = px.scatter(df, x="Cell_Size", y="Nucleus_Size", color='Nucleus_Roundness') # Update layout for the aesthetic parameters we want fig.update_layout(title_text=title, font=dict(size=18), plot_bgcolor = 'white', showlegend = True ) # Adjust opacity fig.update_traces(opacity=0.6) # Adjust x-axis parameters fig.update_xaxes(title_text = x_label, showline=True, linewidth=2, linecolor='black', tickfont=dict(size=18)) # Adjust y-axis parameters fig.update_yaxes(title_text = y_label, showline=True, linewidth=2, linecolor='black', tickfont=dict(size=18)) # Display plot #plot(fig) filename = os.path.join(output_images_dir, title.replace(" ","_") + ".png") fig.write_image(filename) ``` ### Delete columns as necessary Move forward with only the columns of interest ``` # Remove columns containing "DAPI" # use list comprehension to extract only column headers # that do not contain the string "DAPI" df = df[[x for x in df.columns.values if 'DAPI' not in x]] print("Columns are now...") print([c for c in df.columns.values]) ``` ### Create lists of full names and shortened names to use in plotting We want a list of shortened marker intensity column header values for use in plotting. For example 'pATR_Cell_Intensity_Average' would display as 'pATR' for readability. In the case of more than one column present for a given marker, e.g., the inclusion of 'pATR_Nucleus_Cell_Intensity_Average', the pltoted labels would be 'pATR_Cell' and 'pATR_Cell'. We want to create dictionaries of both full to short names and short to full names. ``` full_to_short_names, short_to_full_names = \ shorten_feature_names(df.columns.values[~df.columns.isin(not_intensities)]) ``` Save this data to a metadata file. These devices will be used throughout the workflow. ``` filename = os.path.join(metadata_dir, "full_to_short_column_names.csv") fh = open(filename, "w") fh.write("full_name,short_name\n") for k,v in full_to_short_names.items(): fh.write(k + "," + v + "\n") fh.close() filename = os.path.join(metadata_dir, "short_to_full_column_names.csv") fh = open(filename, "w") fh.write("short_name,full_name\n") for k,v in short_to_full_names.items(): fh.write(k + "," + v + "\n") fh.close() ## Print contents to screen if the user wants #for key, value in full_to_short_names.items(): # print(key + ": " + value) ``` ### Import exposure time metadata Here, we want to end up with a data structure that incorporates metadata on each intensity marker column used in our big dataframe in an easy-to-use format. This is going to include the full name of the intensity marker columns in the big data frame, the corresponding round and channel, the target protein (e.g., CD45), and the segmentation localization information (cell, cytoplasm, nucleus)... We can use this data structure to assign unique colors to all channels and rounds, for example, for use in later visualizations. Here, we expect this exposure time metadata file to have four columns (more are accepted). These are as follows: - Round: The round in which the marker was assess. Should be in form 'r#' - Target: The target/marker used. This should be a string whose contents match in the imported segmentation data files. The capitalization does not need to be consistent. These values should be unique in this file, without duplicates. - Exp: The exposre time for this marker for this channel, in milliseconds. Not currently used in workflow. - Channel: THe channel in which the marker was assessed. Should be in form 'c#'. ``` filename = "Exposure_Time.csv" #filename = "Exposure_Time_full.csv" filename = os.path.join(metadata_dir, filename) exp_df = pd.read_csv(filename) # Verify file imported correctly # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, exp_df.shape[0] + 1) print("Ran file size verification.") except: pass # Headers print("Assessing whether column headers are as expected.") expected_headers =['Round','Target','Exp','Channel'] compare_headers(expected_headers, exp_df.columns.values, "Imported metadata file") # Missingness if exp_df.isnull().any().any(): print("\nexp_df has null value(s) in row(s):") print(exp_df[exp_df.isna().any(axis=1)]) else: print("No null values detected.") ``` Check to make sure that there are not duplicate values in the Target column. ``` if len(exp_df['Target']) > len(exp_df['Target'].unique()): print("One or more non-unique Target values in exp_df. Currently not supported.") exp_df.sort_values(by = ['Target']).head() # Create lowercase version of target exp_df['target_lower'] = exp_df['Target'].str.lower() exp_df.head() ``` Create dataframe that contains marker intensity columns in our df that aren't in `not_intensities` ``` intensities = pd.DataFrame({'full_column':df.columns.values[~df.columns.isin(not_intensities)]}) intensities.head() ``` Extract the marker information from the `full_column`, which corresponds to full column in big dataframe. ``` # Use regular expressions (regex) to isolate the part of the field that # begins (^) with an alphanumeric value (W), and ends with an underscore (_) # '$' is end of line intensities['marker'] = intensities['full_column'].str.extract(r'([^\W_]+)') # convert to lowercase intensities['marker_lower'] = intensities['marker'].str.lower() # Subset the intensities df to exclude any column pertaining to DAPI intensities = intensities.loc[intensities['marker_lower'] != 'dapi'] ``` Now merge the `intensities` and `exp_df` together to create `metadata` ``` metadata = pd.merge(exp_df, intensities, how = 'left', left_on = 'target_lower',right_on = 'marker_lower') metadata = metadata.drop(columns = ['marker_lower']) # Target is the capitalization from the Exposure_Time.csv # target_lower is Target in all caps # marker is the extracted first component of the full column in segmentation data, with corresponding capitalization ``` Add a column to signify marker target location. ``` # Use a lambda to determine segmented location of intensity marker column and update metadata accordingly # This function determines what the location of the marker is in the cell # It looks for 'cytoplasm', 'cell',' or 'nucleus' string inside the # 'full_column' column of a given row, and returns the identifyied # area of 'unknown' if none of them def add_metadata_location(row): fc = row['full_column'].lower() if 'cytoplasm' in fc and 'cell' not in fc and 'nucleus' not in fc: return 'cytoplasm' elif 'cell' in fc and 'cytoplasm' not in fc and 'nucleus' not in fc: return 'cell' elif 'nucleus' in fc and 'cell' not in fc and 'cytoplasm' not in fc: return 'nulceus' else: return 'unknown' # apply the function metadata['location'] = metadata.apply( lambda row: add_metadata_location(row), axis = 1) ``` A peek at our `metadata` dataframe: ``` metadata.head() ``` Save this data structure to the metadata folder. ``` # don't want to add color in because that's better off treating color the same for round, channel, and sample filename = "marker_intensity_metadata.csv" filename = os.path.join(metadata_dir, filename) metadata.to_csv(filename, index = False) ``` ### Import sample metadata if applicable ``` filename = "ROI_Map.csv" filename = os.path.join(metadata_dir, filename) sample_metadata = pd.read_csv(filename) # Verify file imported correctly # Verify size # This part is wrapped in a try/except block because # it wasn't working on the PC workstation, but worked # on MG's personal PC laptop and department loaner MacBook try: verify_line_no(filename, sample_metadata.shape[0] + 1) print("Ran file length verification.") except: pass # Headers print("Assessing whether column headers are as expected.") expected_headers =['Sample_ID', 'ROI_slide','ROI_index', 'TMA_Core', 'TMA_row', 'TMA_column', 'tissue_long', 'tissue_short', 'Replicate', 'Type'] compare_headers(expected_headers, sample_metadata.columns.values, "Imported metadata file") # Missingness if exp_df.isnull().any().any(): print("\nexp_df has null value(s) in row(s):") print(sample_metadta[sample_metadata.isna().any(axis=1)]) else: print("No null values detected.") ``` In this case, `sample_metadata` does not need to be merged with any other df and then saved again ## Establish colors to use throughout workflow #### Channel colors Channel colors - want colors that are categorical, since Channel is a non-ordered category (yes, they are numbered, but arbitrarily). A categorical color palette will have dissimilar colors. However, it we will typically use a prescribed set of channel colors that are consistent throughout experiments: c2 = green, c3 = orange, c4 = red, c5 = turquoise. The more automated channel color generation will be left below for reference. ``` # Get those unique colors if len(metadata.Channel.unique()) > 10: print("WARNING: There are more unique channel values than \ there are colors to choose from. Select different palette, e.g., \ continuous palette 'husl'.") channel_color_values = sb.color_palette("colorblind",n_colors = len(metadata.Channel.unique()))#'HLS' # chose 'colorblind' because it is categorical and we're unlikely to have > 10 print("Unique channels are:", metadata.Channel.unique()) # Display those unique colors sb.palplot(sb.color_palette(channel_color_values)) ``` Store in a dictionary ``` channel_color_dict = dict(zip(metadata.Channel.unique(), channel_color_values)) channel_color_dict ``` Let's choose our channel colors instead. We can use the function `matplotlib.colors.to_rbg(c)`, where `c` is a word color name, to convert to the (r, g, b) tuple needed for the workflow. At the top of the script, we imported `matplotlib.colors` as `mplc`, so we can save time and type out simply `mplc.to_rgb(c)` shorthand when using this function. Note that if you use any of the xkcd color survey colors (https://xkcd.com/color/rgb/), you will need to call these specify these as 'xkcd:colorname'. I will demonstrate a couple of different ways of doing changing the colors we generated above, so the user can expand on the examples as necessary. We are holding all of our color information in several instances of a data structure called a dictionary. https://docs.python.org/3/library/stdtypes.html#typesmapping Dictionaries are a way to store an unordered collection of items where each is composed of a key-value mapped pair. In the case of this workflow, each color dictionary has a string identifying the specific thing to be colored, e.g., 'c2', 'TMA', 'cluster1', or 'r5', and the corresponding value is a three-float tuple (r, g, b) that is the color of that thing. With dictionaries, we can remove an key-value pair, add a new key-value pair, or overwrite an existing key-value pair whenever we want. Keys can be many things, but often you will see them as a string. Values can be strings, lists, other dictionaries (as seen below for the heatmaps), etc. Nested dictionaries can be complicated to intuit, but they can be a good way to associate a bunch of information together easily, coding-wise. Keys are not ordered within a dictionary. ``` # get a new color for a channel, overwrite/replace the original channel color in the dictionary c2_color = "green" c2_color = mplc.to_rgb("green") print("Our new color in rbg form is " + str(c2_color) + ".") print("Before replacement, c2 in the dictionary is: " + str(channel_color_dict['c2'])) # Replace value channel_color_dict['c2'] = c2_color print("After replacement, c2 in the dictionary is: " + str(channel_color_dict['c2'])) # Here is how you delete an item from a dictionary print("Keys in the channel color dictionary are: " + str(channel_color_dict.keys())) # If we try to remove an existing key, we will get an error if 'c2' in channel_color_dict.keys(): print("'c2' is in the dictionary. Removing now.") channel_color_dict.pop('c2') print("Keys in the channel color dictionary are: " + str(channel_color_dict.keys())) ## Add in a new item print("Keys in the channel color dictionary are: " + str(channel_color_dict.keys())) print("Adding in 'c2'...") channel_color_dict['c2'] = c2_color print("Keys in the channel color dictionary are: " + str(channel_color_dict.keys())) ## Let's finish the dictionary now channel_color_dict['c2'] = mplc.to_rgb('green') channel_color_dict['c3'] = mplc.to_rgb('orange') channel_color_dict['c4'] = mplc.to_rgb('red') channel_color_dict['c5'] = mplc.to_rgb('turquoise') ## And display the colors so we can see them # Instead of querying the dictionary to get each of our colors, THEN putting those colors in a list, # THEN feeding that list into the palplot/color_palette code as above, I will condense these steps # together. Here we are accessing each (r,g,b) color value in the dictionary using the key. print(['c2','c3','c4','c5']) sb.palplot(sb.color_palette( [channel_color_dict['c2'],channel_color_dict['c3'],channel_color_dict['c4'],channel_color_dict['c5']])) ``` #### Round colors Round colors - want colors that are sequential, since Round is an ordered category. We can still generate colors that are easy to distinguish. Also, many of the categorical palettes cap at at about 10 or so unique colors, and repeat from there. We do not want any repeats! ``` round_color_values = sb.cubehelix_palette( len(metadata.Round.unique()), start=1, rot= -0.75, dark=0.19, light=.85, reverse=True) #round_color_values = sb.color_palette("cubehelix",n_colors = len(metadata.Round.unique())) # chose 'cubehelix' because it is sequential, and round is a continuous process # each color value is a tuple of three values: (R, G, B) print(metadata.Round.unique()) sb.palplot(sb.color_palette(round_color_values)) ## TO-DO: write what these parameters mean ``` Store in a dictionary ``` round_color_dict = dict(zip(metadata.Round.unique(), round_color_values)) for k,v in round_color_dict.items(): round_color_dict[k] = np.float64(v) ``` #### Sample colors Sample colors - want colors that are neither sequential nor categorical. Categorical would be ideal if we could generate an arbitrary number of colors, but I do not think that we can. Hense, we will choose `n` colors from a continuous palette. First we will generate the right number of colors. Later, we will assign TMA samples to gray. ``` # Get those unique colors color_values = sb.color_palette("husl",n_colors = len(ls_samples))#'HLS' # each color value is a tuple of three values: (R, G, B) # Display those unique colors sb.palplot(sb.color_palette(color_values)) ``` Generate enough gray shades for all TMA samples in dataset. ``` # Get list of all TMA samples # by looking for substring 'TMA' in all unique Sample_ID values TMA_samples = [s for s in df.Sample_ID.unique() if 'TMA' in s] # Now make a list of unique gray shades, # whose length equals the length of the list above TMA_color_values = sb.color_palette(n_colors = len(TMA_samples),palette = "gray") # Show the gray color(s) to the user sb.palplot(sb.color_palette(TMA_color_values)) ``` #### Store in a dictionary ``` # Now we will create a dictionary to hold this information # Here we are mapping the unique Sample_ID values in df # (note that sorted() ensures they are in alphabetical # order) with the color_values list we derived above. # This list does NOT have our TMA gray(s) in it. # After we associate the two groups of items together # with zip, we turn it into a dictonary: key = Sample_ID, # value = color for that Sample_ID sample_color_dict = dict(zip( sorted(df.Sample_ID.unique()), color_values )) # Edit our dictioanry # Replace all TMA samples' colors with gray by # iterating through all keys in sorted order # and replacing the color with a gray one. We are # moving through our list of gray colors using our # index 'i', so that each TMA gets a different gray. i = 0 for key in sorted(sample_color_dict.keys()): if 'TMA' in key: sample_color_dict[key] = TMA_color_values[i] i +=1 sample_color_dict ``` Look at the (r,g,b) values of the colors above. Any TMA sample should have r ~= g ~= b. Display the colors: ``` print("Our samples and corresponding colors are:") print([key for key in sorted(sample_color_dict.keys())]) sb.palplot(sb.color_palette([sample_color_dict[key] for key in sorted(sample_color_dict.keys())])) ``` ### Save color information (mapping and legend) to metadata directory ``` # let's look at the metadata again... metadata.head() ``` Add in the color information in both RGB (range 0-1) and hex values, for use in visualizations ``` metadata['round_color'] = metadata.apply(lambda row: round_color_dict[row['Round']], axis = 1) metadata['channel_color'] = metadata.apply(lambda row: channel_color_dict[row['Channel']], axis = 1) # This function takes in a dictionary cd, a column_name string # and returs a dataframe. This df has the information that was # in the dictionary--'rgb' is the (fl, fl, fl) tuple corresponding # to the color names given as the cd keys, an 'hex' is the corresponding # hexademical value. def color_dict_to_df(cd, column_name): df = pd.DataFrame.from_dict(cd, orient = 'index') df['rgb'] = df.apply(lambda row: (np.float64(row[0]), np.float(row[1]), np.float64(row[2])), axis = 1) df = df.drop(columns = [0,1,2]) df['hex'] = df.apply(lambda row: mplc.to_hex(row['rgb']), axis = 1) df[column_name] = df.index return df ``` Sample ``` # Create dataframe color_df = color_dict_to_df(sample_color_dict, "Sample_ID") color_df.head() # Save to file in metadatadirectory filename = "sample_color_data.csv" filename = os.path.join(metadata_dir, filename) color_df.to_csv(filename, index = False) # Legend of sample info only g = plt.figure(figsize = (1,1)).add_subplot(111) g.axis('off') handles = [] # To change the order of items on the legend, do # for item in [item1, item2, item3]: for item in sorted(sample_color_dict.keys()): h = g.bar(0,0, color = sample_color_dict[item], label = item, linewidth =0) handles.append(h) first_legend = plt.legend(handles=handles, loc='upper right', title = 'Sample'), # bbox_to_anchor=(10,10), # bbox_transform=plt.gcf().transFigure) # Save the legend to a file filename = "Sample_legend.png" filename = os.path.join(metadata_images_dir, filename) plt.savefig(filename, bbox_inches = 'tight') ``` Channel ``` # Create dataframe color_df = color_dict_to_df(channel_color_dict, "Channel") color_df.head() # Save to file in metadatadirectory filename = "channel_color_data.csv" filename = os.path.join(metadata_dir, filename) color_df.to_csv(filename, index = False) # Legend of channel info only g = plt.figure(figsize = (1,1)).add_subplot(111) g.axis('off') handles = [] # To change the order of items on the legend, do # for item in [item1, item2, item3]: for item in sorted(channel_color_dict.keys()): h = g.bar(0,0, color = channel_color_dict[item], label = item, linewidth =0) handles.append(h) first_legend = plt.legend(handles=handles, loc='upper right', title = 'Channel'), # bbox_to_anchor=(10,10), # bbox_transform=plt.gcf().transFigure) # Save the legend to a file filename = "Channel_legend.png" filename = os.path.join(metadata_images_dir, filename) plt.savefig(filename, bbox_inches = 'tight') ``` Round ``` # Create dataframe color_df = color_dict_to_df(round_color_dict, "Round") color_df.head() # Save to file in metadatadirectory filename = "round_color_data.csv" filename = os.path.join(metadata_dir, filename) color_df.to_csv(filename, index = False) # Legend of round info only round_legend = plt.figure(figsize = (1,1)).add_subplot(111) round_legend.axis('off') handles = [] # To change the order of items on the legend, do # for item in [item1, item2, item3]: for item in round_color_dict.keys(): h = round_legend.bar(0,0, color = round_color_dict[item], label = item, linewidth =0) handles.append(h) first_legend = plt.legend(handles=handles, loc='upper right', title = 'Round'), # bbox_to_anchor=(10,10), # bbox_transform=plt.gcf().transFigure) # Save the legend to a file filename = "Round_legend.png" filename = os.path.join(metadata_images_dir, filename) plt.savefig(filename, bbox_inches = 'tight') ``` ## EDA scatterplot Scatterplot of nucleus size by nucleus roundness, colored by sample This was not working on my computer, probably due to the size of the data. Let's run this chunk using just a subset of the data. Here, we will want the subset to maintain the same proportion of cells for each Sample_ID as we had in the original dataframe. ``` subset_row_count = 10000 subset_df = create_subset(df, 'Sample_ID', subset_row_count, 'original') ``` How many lines for each sample ID are in our subset df? ``` subset_df['Sample_ID'].value_counts().sort_index() ``` How do the proportions of cells in the original and subset dfs compare? ``` df['Sample_ID'].value_counts().sort_index()/df.shape[0] subset_df['Sample_ID'].value_counts().sort_index()/subset_df.shape[0] ``` Perform the plotting. ``` #By sample ID only # initiate figure fig = go.Figure() title = 'Nucleus size by nucleus roundess by Sample ID' # plot each trace separately for sample in ls_samples: fig.add_trace(go.Scatter( x = subset_df.loc[subset_df['Sample_ID']==sample,'Nucleus_Roundness'], y = subset_df.loc[subset_df['Sample_ID']==sample,'Nucleus_Size'], mode = 'markers', name = sample, marker=dict( color='rgb' + str(sample_color_dict[sample])), showlegend = True )) # Update figure for aesthetic details fig.update_layout(title = title, plot_bgcolor = 'white') fig.update_xaxes(title_text = "Nucleus roundness", linecolor = 'black') fig.update_yaxes(title_text = "Nucleus size", linecolor = 'black') # Output #plot(fig) # plot generates in new Chrome tab # Write to file filename = os.path.join(output_images_dir, title.replace(" ","_") + ".png") fig.write_image(filename) ``` ## Initial heatmap We will only be plotting ~10k cells in the interest of time/computing resources. We want these 10k lines in our original df to be sampled randomly, without replacement, with the caveat that the proportions of all samples in the data are equal to each other (unless a particular sample does not have enough corresponding lines for the desired final df size). If the size of the dataframe is > 10k rows, then we will proceed with the entire dataset. ``` subset_row_count = 10000 subset_df = create_subset(df, 'Sample_ID', subset_row_count, 'equal') ``` How many lines for each sample ID are in our subset df? ``` subset_df['Sample_ID'].value_counts().sort_index() ``` How do the proportions of cells in the original and subset dfs compare? ``` df['Sample_ID'].value_counts().sort_index()/df.shape[0] subset_df['Sample_ID'].value_counts().sort_index()/subset_df.shape[0] ``` ### Get data structures to map colors to columns and rows... ## Row colors For the row colors, we essentially just need to map the information in a given feature to the colors that correspond to that value in the right color dictionary. For example, it might be sample_3, sample_3, sample_4, , so we need the row colors to be (1, 1, 1), (1, 1, 1), (0, 0.25, 0.6). These are the initialy colors--if we are clustering rows or columns, the labels will still match the data with which they're associated. ``` row_sample_colors = subset_df.Sample_ID.map(sample_color_dict) row_sample_colors[1:5] ``` ## Column rows For column rows, matching up the information in each column with the appropriate color is more difficult. ``` # Here, we want to translate marker columns to their corresponding channel information, # and then match that up with the right color, as with row columns # First, we merge the (L) non-intensity column values, transformed into a dataframe, # with the metadata df (R), matching on the "0" column present in the L, # which is the only column in there, with the "full_column" (aka df header name) # column in the R, only including all cases where there is a match and any unmatched # L cases ('both' [?] would be only cases where ther is is a match, and 'right' would # be cases with a match and any unmatched R columns). column_channel_colors = pd.merge(pd.DataFrame(pd.Series( subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values)), metadata, how = 'left', left_on = 0, right_on = 'full_column' # From that resulting df, extract the '0' and 'Channel' objects, # then only 'Channel', then map to the right colors )[[0,'Channel']]['Channel'].map(channel_color_dict) # Set the index to be the names of the colors. There is only one column, and that is the corresponding # colors column_channel_colors.index = subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values column_channel_colors.head() # Here, we want to translate marker columns to their corresponding round information, # and then match that up with the right color, as with row columns # First, we merge the (L) non-intensity column values, transformed into a dataframe, # with the metadata df (R), matching on the "0" column present in the L, # which is the only column in there, with the "full_column" (aka df header name) # column in the R, only including all cases where there is a match and any unmatched # L cases ('both' [?] would be only cases where ther is is a match, and 'right' would # be cases with a match and any unmatched R columns). column_round_colors = pd.merge(pd.DataFrame(pd.Series( subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values)), metadata, how = 'left', left_on = 0, right_on = 'full_column' # From that resulting df, extract the '0' and 'Channel' objects, # then only 'Channel', then map to the right colors )[[0,'Round']]['Round'].map(round_color_dict) # Set the index to be the names of the colors. There is only one column, and that is the corresponding # colors column_round_colors.index = subset_df.loc[:,~subset_df.columns.isin(not_intensities)].columns.values column_round_colors.head() ``` ### Annotations data structure ``` # Create data structure to hold everything we need for row/column annotations # annotations is a dictionary ## IMPORTANT - if you use 'annotations', it MUST have both 'rows' and 'cols' ## objects inside. These can be empty lists, but they must be there! annotations = {} # create a data structure to hold everything we need for only row annotations # row_annotations is a list, where each item therein is a dictioary corresponding # to all of the data pertaining to that particular annotation # Adding each item (e.g., Sample, then Cluster), one at a time to ensure ordering # is as anticipated on figure row_annotations = [] row_annotations.append({'label':'Sample','type':'row','mapping':row_sample_colors,'dict':sample_color_dict, 'location':'center left','bbox_to_anchor':(0, 0.5)}) # Add all row information into the annotations dictionary annotations['rows'] = row_annotations # Now we repeat the process for column annotations col_annotations = [] col_annotations.append({'label':'Round','type':'column','mapping':column_round_colors,'dict':round_color_dict, 'location':'upper right','bbox_to_anchor':(1,0.50)}) col_annotations.append({'label':'Column','type':'column','mapping':column_channel_colors,'dict':channel_color_dict, 'location':'upper right','bbox_to_anchor':(1,0.75)}) annotations['cols'] = col_annotations ``` #### Actually plot the heatmap ``` heatmap_function( data = subset_df.loc[:,~subset_df.columns.isin(not_intensities)], title = "Initial dataframe", # define method, metric, and color map method = 'ward', metric = 'euclidean',cmap = 'coolwarm', # colorbar info (legend coloring of main plot) cbar_kws = {'label':'Intens.'}, # xticklabels - want to have the nicknames instead of full names, # so we translate from full to short names; we also only want to include # non_intensity columns, to match the data we fed into under 'data' xticklabels = [full_to_short_names[name] for name in subset_df.loc[:, ~subset_df.columns.isin(not_intensities)].columns.values], # where to save the df save_loc = output_images_dir, # how to cluster on rows and columns row_cluster = True, col_cluster = True, # provide the dictionary of row and column coloring information # and legend information, as established above. annotations = annotations ) ``` ### Bar plot of count of all cells in all samples - no filtering yet ``` # Get counts for each Sample_ID, sorted by Sample_ID counts = pd.DataFrame(df.Sample_ID.value_counts()).sort_index() # rename Sample_ID to counts counts = counts.rename(columns = {'Sample_ID':'counts'}) # add Sample_ID back in, as what's currently the index counts['Sample_ID'] = counts.index # add 'color', which is derived from the row's Sample_ID fed into the right # color dictionary counts['color'] = counts.apply(lambda row: sample_color_dict[row['Sample_ID']], axis = 1) counts.head() ls_samples # By sample ID only # establish figure fig = go.Figure() title = 'Initial Cell counts by Sample ID' # Changing the ordering of the bars is a easy as iterating through a list # with the samples in a different order! For example, this order below: #for sample in ['TMA', 'GZ7.2', 'GZ10.3', 'GZ7.1', 'GZ10.2', 'GZ10.1', 'GZ6']: for sample in ls_samples: # add trace for each sample fig.add_trace(go.Bar( x=counts.loc[counts['Sample_ID']==sample,'Sample_ID'], y = counts.loc[counts['Sample_ID']==sample,'counts'], text = counts.loc[counts['Sample_ID']==sample,'counts'], textposition='outside', marker=dict( color='rgb' + str(sample_color_dict[sample])), showlegend = False )) # update aesthetic parameters fig.update_layout(title = title, plot_bgcolor = 'white') fig.update_xaxes(title_text = "Sample ID", linecolor = 'black') fig.update_yaxes(title_text = "Cell count", linecolor = 'black') # Display plot #plot(fig) filename = os.path.join(output_images_dir, title.replace(" ","_") + ".png") fig.write_image(filename) ``` ## PCA This is how you might save data for the PCA, if you'd like to. ``` ## for PCA filename = "[filename]_PCA_test.csv" df.to_csv(filename, index = False) ``` ### Drop any other rows or columns we want to before saving data ``` # Let's take a look df.columns.values ``` For the sake of example, I will operate on a copy of df, called df_copy ``` # You MUST do df.copy() # 'df_copy = df' would essentially # give you two different names for the # SAME dataframe, so operating on one # would also operate on the other df_copy = df.copy() ``` #### Operate on entire rows or columns ``` # Drop columns my_cols = [] df_copy = df_copy.drop(columns = my_cols) # Keep only specific columns (explained below) my_cols = [] my_cols = df.columns.values df_copy = df_copy.loc[:,my_cols] ``` #### Operate on rows and columns using filtering criteria ``` # Keep only certain rows based off of criteria # use df.loc[] to filter # df.loc[rows,columns] # df.loc[:,certain_cols] --> keep all rows ':', only certain cols # df.loc[certain_rows,:] --> keep only certain row, all cols ':' # Say we only want certain values for Sample_ID print(df_copy.Sample_ID.unique()) #keep = ['TMA1.1','TMA1.2','TMA1.3','TMA2.1','TMA2.2','TMA2.3'] keep = [] df_copy = df_copy.loc[df_copy['Sample_ID'].isin(keep),:] print(df_copy.Sample_ID.unique()) # Filter on multiple criteria # '&' or 'and' # '|' or 'or' # you MUST have parentheses around each logic expression! df_copy = df_copy.loc[ (df_copy['Sample_ID'].isin(['TMA1.1','TMA1.2','TMA1.3'])) \ ## backslash above used to break line for readability, but tell Python to act like it's all one line | (df_copy['Sample_ID'].isin(['TMA2.1','TMA2.2','TMA2.3'])) , :] print(df_copy.Sample_ID.unique()) # Remove rows based off of certain criteria # note the negating tilde '~'! df_copy = df_copy.loc[ (~df_copy['Sample_ID'].isin(['TMA1.1','TMA1.2','TMA1.3'])) \ ## backslash above used to break line for readability, but tell Python to act like it's all one line & (~df_copy['Sample_ID'].isin(['TMA2.1','TMA2.2','TMA2.3'])),:] print(df_copy.Sample_ID.unique()) ## include example for cell types: cancer, stroma, immune ``` ### Save the data by Sample_ID ``` # Check for existence of output file first for sample in ls_samples: filename = os.path.join(output_data_dir, sample + "_" + step_suffix + ".csv") if os.path.exists(filename): print("File by name "+filename+" already exists.") # Save output files for sample in ls_samples: df_save = df.loc[df['Sample_ID'] == sample,:] filename = os.path.join(output_data_dir, sample + "_" + step_suffix + ".csv") df_save.to_csv(filename, index = True) ```
github_jupyter
# KALEEM WAHEED 18L-1811 Project 2 ### Import Libraries ``` import os import cv2 import numpy as np import matplotlib.pyplot as plt import random import tensorflow as tf import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras import backend as K from keras.optimizers import SGD from keras import regularizers from keras.layers.normalization import BatchNormalization from keras import optimizers ``` # Enable Intellisense ``` %config IPCompleter.greedy=True ``` ## Global Variable ``` data = [] labels = [] im_width = 64 im_height = 64 num_classes = 7 ``` # PreProcessing Data ### Generate new images Handle Class Balance Issue #### Now each class have 1833 images #### Remove noise/Irrelevent Images ``` for i in range(7): path=os.getcwd()+"/Project2Data/"+str(i+1)+'/' print(path) progress = 0 image_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))] for file_name in image_files: image_file = str(path + file_name) img = cv2.imread(image_file,cv2.IMREAD_GRAYSCALE) new_img = cv2.resize(img,(im_width,im_height)) data.append(new_img) progress = progress+1 # print(int(path[-2])) labels.append(int(path[-2])-1) if progress%100==0: print('Progress '+str(progress)+' Image done of Disease type:' + path[-2]) data = np.array(data) print(data.shape) labels = np.array(labels) labels.shape data = data.reshape((data.shape)[0],(data.shape)[1],(data.shape)[2],1) data.shape from keras.utils import np_utils labels.astype('uint8') labels = keras.utils.to_categorical(labels, num_classes) ``` # Shuffle Data ``` def shuffle(a, b): rng_state = np.random.get_state() np.random.shuffle(a) np.random.set_state(rng_state) np.random.shuffle(b) for i in range(10): shuffle(data,labels) ``` ### Building Convolutional Neural Network Model 1 #### batch size 100 , epoch 50 , Adam optimizer Default Learning Rate ``` model = Sequential() model.add(Conv2D(kernel_size=(3,3),filters=64,input_shape=(64,64,1),activation="relu",padding="valid")) model.add(Conv2D(kernel_size=(3,3),filters=64,activation="relu",padding="same")) model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(kernel_size=(3,3),filters=32,activation="relu",padding="same")) model.add(Conv2D(kernel_size=(2,2),filters=32,activation="relu",padding="same")) model.add(MaxPooling2D(pool_size=(3,3),strides=(2,2))) model.add(Conv2D(kernel_size=(2,2),strides=(2,2),filters=64)) model.add(Flatten()) model.add(Dropout(0.8)) model.add(Dense(128,activation="relu")) model.add(Dense(7,activation="softmax")) model.summary() model.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) ``` # Fit model ``` history = model.fit(data, labels, batch_size=100, epochs=50, verbose=1, shuffle = True,validation_split=0.30) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() #accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() #loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() (loss, accuracy) = model.evaluate(data, labels, batch_size=batchsize, verbose=1) ``` ## Building Convolutional Neural Network Model Architecture 2 #### use loss = 'categorical_crossentropy' batch size 100 , epoch 60 ,Optimizer Adam Default Learning Rate ``` model = Sequential() model.add(Conv2D(kernel_size=(3,3),filters=64,input_shape=(64,64,1),activation="relu",padding="valid")) model.add(Conv2D(kernel_size=(3,3),filters=64,activation="relu",padding="same")) model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(kernel_size=(3,3),filters=32,activation="relu",padding="same")) model.add(Conv2D(kernel_size=(2,2),filters=32,activation="relu",padding="same")) model.add(MaxPooling2D(pool_size=(3,3),strides=(2,2))) model.add(Conv2D(kernel_size=(2,2),strides=(2,2),filters=64)) model.add(Flatten()) model.add(Dropout(0.8)) model.add(Dense(128,activation="relu")) model.add(Dense(7,activation="softmax")) model.summary() model.compile(optimizer = 'Adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) batchsize=100 epochs=60 history = model.fit(data, labels, batch_size=batchsize, epochs=epochs, verbose=1, shuffle = True,validation_split=0.30) (loss, accuracy) = model.evaluate(data, labels, batch_size=batchsize, verbose=1) print("accuracy: {:.2f}%".format(accuracy * 100)) #accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() #loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` # Transfer learning on ResNet ## Variable ``` data = [] labels = [] im_width = 64 im_height = 64 num_classes = 7 ``` # Use provided Data ### Remove irrelevent/Noise Data ``` for i in range(7): path=os.getcwd()+"/Project2DataClean/"+str(i+1)+'/' print(path) progress = 0 image_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))] for file_name in image_files: image_file = str(path + file_name) img = cv2.imread(image_file) new_img = cv2.resize(img,(im_width,im_height)) data.append(new_img) progress = progress+1 # print(int(path[-2])) labels.append(int(path[-2])-1) if progress%100==0: print('Progress '+str(progress)+' Image done of Disease type:' + path[-2]) ``` # Use Channel 3 ``` data = np.array(data) labels = np.array(labels) print(len(data)) print(len(labels)) print(data.shape) print(labels.shape) data = data.astype('uint8') labels = keras.utils.to_categorical(labels, 7) print(labels.shape) import numpy as np import os import time #from resnet50 import ResNet50 from keras.preprocessing import image from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten #from imagenet_utils import preprocess_input from keras.layers import Input from keras.models import Model from keras.utils import np_utils from sklearn.utils import shuffle from sklearn.cross_validation import train_test_split #Shuffle the dataset x,y = shuffle(data,labels, random_state=2) X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2) ``` # Keras ResNet50 Model 1 With Transfer learning ### Input(shape=(64, 64, 3)),include_top=False,weights='imagenet' ### batch_size=40, epochs=15 Dropout 0.6 and 0.4 #### Don't want to train all model ##### Last 5 layer retrain ``` image_input = Input(shape=(64, 64, 3)) model = keras.applications.resnet50.ResNet50(input_tensor=image_input, include_top=False,weights='imagenet') last_layer = model.output # add a global spatial average pooling layer x = GlobalAveragePooling2D()(last_layer) # add fully-connected & dropout layers x = Dense(512, activation='relu',name='fc-1')(x) x = Dropout(0.4)(x) x = Dense(128, activation='relu',name='fc-2')(x) x = Dropout(0.6)(x) # a softmax layer for 7 classes out = Dense(7, activation='softmax',name='output_layer')(x) custom_resnet_model2 = Model(inputs=model.input, outputs=out) custom_resnet_model2.summary() for layer in custom_resnet_model2.layers[:-5]: layer.trainable = False custom_resnet_model2.layers[-1].trainable custom_resnet_model2.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) t=time.time() hist = custom_resnet_model2.fit(X_train, y_train, batch_size=40, epochs=15, verbose=1, validation_data=(X_test, y_test)) print('Training time: %s' % (t - time.time())) (loss, accuracy) = custom_resnet_model2.evaluate(X_test, y_test, batch_size=32, verbose=1) print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100)) #accuracy plt.plot(hist.history['acc']) plt.plot(hist.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() #loss plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` # Keras ResNet50 ### Input(shape=(64, 64, 3)),include_top=False,weights='imagenet' ### batch_size=38, epochs=25 Remove fully-connected 2nd last dense layer #### loss='categorical_crossentropy',optimizer='adam' #### Don't want to train all model ##### Last 3 layer retrain ``` image_input = Input(shape=(64, 64, 3)) model = keras.applications.resnet50.ResNet50(input_tensor=image_input, include_top=False,weights='imagenet') last_layer = model.output # add a global spatial average pooling layer x = GlobalAveragePooling2D()(last_layer) # add fully-connected & dropout layers #x = Dense(512, activation='relu',name='fc-1')(x) x = Dropout(0.6)(x) x = Dense(128, activation='relu',name='fc-2')(x) x = Dropout(0.4)(x) # a softmax layer for 7 classes out = Dense(7, activation='softmax',name='output_layer')(x) custom_resnet_model2 = Model(inputs=model.input, outputs=out) custom_resnet_model2.summary() for layer in custom_resnet_model2.layers[:-3]: layer.trainable = False custom_resnet_model2.layers[-1].trainable custom_resnet_model2.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) t=time.time() history = custom_resnet_model2.fit(X_train, y_train, batch_size=38, epochs=25, verbose=1, validation_data=(X_test, y_test)) print('Training time: %s' % (t - time.time())) (loss, accuracy) = custom_resnet_model2.evaluate(X_test, y_test, batch_size=32, verbose=1) print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100)) #accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() #loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` # GoogleNet InceptionV3 ### Input(shape=(150, 150, 3)),include_top=False,weights='imagenet' ### batch_size=30, epochs=20 Remove fully-connected 2nd last dense layer #### loss='categorical_crossentropy',optimizer='adam' #### Don't want to train all model ##### Last 3 layer retrain ``` import numpy as np import os import time #from resnet50 import ResNet50 from keras.preprocessing import image from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten #from imagenet_utils import preprocess_input from keras.layers import Input from keras.models import Model from keras.utils import np_utils from sklearn.utils import shuffle from sklearn.cross_validation import train_test_split data = [] labels = [] im_width = 150 im_height = 150 num_classes = 7 for i in range(7): path=os.getcwd()+"/Project2DataClean/"+str(i+1)+'/' print(path) progress = 0 image_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))] for file_name in image_files: image_file = str(path + file_name) img = cv2.imread(image_file) new_img = cv2.resize(img,(im_width,im_height)) data.append(new_img) progress = progress+1 # print(int(path[-2])) labels.append(int(path[-2])-1) if progress%100==0: print('Progress '+str(progress)+' Image done of Disease type:' + path[-2]) data = np.array(data) labels = np.array(labels) print(len(data)) print(len(labels)) print(data.shape) print(labels.shape) data = data.astype('uint8') labels = keras.utils.to_categorical(labels, 7) print(labels.shape) x,y = shuffle(data,labels, random_state=2) X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2) image_input = Input(shape=(150, 150, 3)) model = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=(150, 150, 3)) last_layer = model.output # add a global spatial average pooling layer x = GlobalAveragePooling2D()(last_layer) # add fully-connected & dropout layers # x = Dense(128, activation='relu',name='fc-1')(x) x = Dropout(0.1)(x) x = Dense(128, activation='relu',name='fc-2')(x) x = Dropout(0.9)(x) # a softmax layer for 7 classes out = Dense(7, activation='softmax',name='output_layer')(x) custom_resnet_model2 = Model(inputs=model.input, outputs=out) custom_resnet_model2.summary() for layer in custom_resnet_model2.layers[:-5]: layer.trainable = False custom_resnet_model2.layers[-1].trainable custom_resnet_model2.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) t=time.time() history = custom_resnet_model2.fit(X_train, y_train, batch_size=30, epochs=15, verbose=1, validation_data=(X_test, y_test)) print('Training time: %s' % (t - time.time())) (loss, accuracy) = custom_resnet_model2.evaluate(X_test, y_test, batch_size=33, verbose=1) print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100)) #accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() #loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ``` # CNN Model 2 (60 Epochs) ## Training Accuracy: 0.9159 validation accuracy: 0.8642 # ResNet Model 1(15 Epochs) ### Training Accuracy: 0.9455 validation accuracy: 0.86 # GoogleNet ### Training Accuracy: 0.79 validation accuracy: 0.78 # GoogleNet(Colab) ### with different dataset ### Training Accuracy: 0.85 validation accuracy: 0.13
github_jupyter
# 多层感知机 :label:`sec_mlp` 在 :numref:`chap_linear`中, 我们介绍了softmax回归( :numref:`sec_softmax`), 然后我们从零开始实现softmax回归( :numref:`sec_softmax_scratch`), 接着使用高级API实现了算法( :numref:`sec_softmax_concise`), 并训练分类器从低分辨率图像中识别10类服装。 在这个过程中,我们学习了如何处理数据,如何将输出转换为有效的概率分布, 并应用适当的损失函数,根据模型参数最小化损失。 我们已经在简单的线性模型背景下掌握了这些知识, 现在我们可以开始对深度神经网络的探索,这也是本书主要涉及的一类模型。 ## 隐藏层 我们在 :numref:`subsec_linear_model`中描述了仿射变换, 它是一个带有偏置项的线性变换。 首先,回想一下如 :numref:`fig_softmaxreg`中所示的softmax回归的模型架构。 该模型通过单个仿射变换将我们的输入直接映射到输出,然后进行softmax操作。 如果我们的标签通过仿射变换后确实与我们的输入数据相关,那么这种方法就足够了。 但是,仿射变换中的*线性*是一个很强的假设。 ### 线性模型可能会出错 例如,线性意味着*单调*假设: 任何特征的增大都会导致模型输出的增大(如果对应的权重为正), 或者导致模型输出的减小(如果对应的权重为负)。 有时这是有道理的。 例如,如果我们试图预测一个人是否会偿还贷款。 我们可以认为,在其他条件不变的情况下, 收入较高的申请人总是比收入较低的申请人更有可能偿还贷款。 但是,虽然收入与还款概率存在单调性,但它们不是线性相关的。 收入从0增加到5万,可能比从100万增加到105万带来更大的还款可能性。 处理这一问题的一种方法是对我们的数据进行预处理, 使线性变得更合理,如使用收入的对数作为我们的特征。 然而我们可以很容易找出违反单调性的例子。 例如,我们想要根据体温预测死亡率。 对于体温高于37摄氏度的人来说,温度越高风险越大。 然而,对于体温低于37摄氏度的人来说,温度越高风险就越低。 在这种情况下,我们也可以通过一些巧妙的预处理来解决问题。 例如,我们可以使用与37摄氏度的距离作为特征。 但是,如何对猫和狗的图像进行分类呢? 增加位置$(13, 17)$处像素的强度是否总是增加(或降低)图像描绘狗的似然? 对线性模型的依赖对应于一个隐含的假设, 即区分猫和狗的唯一要求是评估单个像素的强度。 在一个倒置图像后依然保留类别的世界里,这种方法注定会失败。 与我们前面的例子相比,这里的线性很荒谬, 而且我们难以通过简单的预处理来解决这个问题。 这是因为任何像素的重要性都以复杂的方式取决于该像素的上下文(周围像素的值)。 我们的数据可能会有一种表示,这种表示会考虑到我们在特征之间的相关交互作用。 在此表示的基础上建立一个线性模型可能会是合适的, 但我们不知道如何手动计算这么一种表示。 对于深度神经网络,我们使用观测数据来联合学习隐藏层表示和应用于该表示的线性预测器。 ### 在网络中加入隐藏层 我们可以通过在网络中加入一个或多个隐藏层来克服线性模型的限制, 使其能处理更普遍的函数关系类型。 要做到这一点,最简单的方法是将许多全连接层堆叠在一起。 每一层都输出到上面的层,直到生成最后的输出。 我们可以把前$L-1$层看作表示,把最后一层看作线性预测器。 这种架构通常称为*多层感知机*(multilayer perceptron),通常缩写为*MLP*。 下面,我们以图的方式描述了多层感知机( :numref:`fig_mlp`)。 ![一个单隐藏层的多层感知机,具有5个隐藏单元](../img/mlp.svg) :label:`fig_mlp` 这个多层感知机有4个输入,3个输出,其隐藏层包含5个隐藏单元。 输入层不涉及任何计算,因此使用此网络产生输出只需要实现隐藏层和输出层的计算。 因此,这个多层感知机中的层数为2。 注意,这两个层都是全连接的。 每个输入都会影响隐藏层中的每个神经元, 而隐藏层中的每个神经元又会影响输出层中的每个神经元。 然而,正如 :numref:`subsec_parameterization-cost-fc-layers`所说, 具有全连接层的多层感知机的参数开销可能会高得令人望而却步, 即使在不改变输入或输出大小的情况下, 也可能促使在参数节约和模型有效性之间进行权衡 :cite:`Zhang.Tay.Zhang.ea.2021`。 ### 从线性到非线性 同之前的章节一样, 我们通过矩阵$\mathbf{X} \in \mathbb{R}^{n \times d}$ 来表示$n$个样本的小批量, 其中每个样本具有$d$个输入特征。 对于具有$h$个隐藏单元的单隐藏层多层感知机, 用$\mathbf{H} \in \mathbb{R}^{n \times h}$表示隐藏层的输出, 称为*隐藏表示*(hidden representations)。 在数学或代码中,$\mathbf{H}$也被称为*隐藏层变量*(hidden-layer variable) 或*隐藏变量*(hidden variable)。 因为隐藏层和输出层都是全连接的, 所以我们有隐藏层权重$\mathbf{W}^{(1)} \in \mathbb{R}^{d \times h}$ 和隐藏层偏置$\mathbf{b}^{(1)} \in \mathbb{R}^{1 \times h}$ 以及输出层权重$\mathbf{W}^{(2)} \in \mathbb{R}^{h \times q}$ 和输出层偏置$\mathbf{b}^{(2)} \in \mathbb{R}^{1 \times q}$。 形式上,我们按如下方式计算单隐藏层多层感知机的输出 $\mathbf{O} \in \mathbb{R}^{n \times q}$: $$ \begin{aligned} \mathbf{H} & = \mathbf{X} \mathbf{W}^{(1)} + \mathbf{b}^{(1)}, \\ \mathbf{O} & = \mathbf{H}\mathbf{W}^{(2)} + \mathbf{b}^{(2)}. \end{aligned} $$ 注意在添加隐藏层之后,模型现在需要跟踪和更新额外的参数。 可我们能从中得到什么好处呢? 你可能会惊讶地发现:在上面定义的模型里,我们没有好处! 原因很简单:上面的隐藏单元由输入的仿射函数给出, 而输出(softmax操作前)只是隐藏单元的仿射函数。 仿射函数的仿射函数本身就是仿射函数, 但是我们之前的线性模型已经能够表示任何仿射函数。 我们可以证明这一等价性,即对于任意权重值, 我们只需合并隐藏层,便可产生具有参数 $\mathbf{W} = \mathbf{W}^{(1)}\mathbf{W}^{(2)}$ 和$\mathbf{b} = \mathbf{b}^{(1)} \mathbf{W}^{(2)} + \mathbf{b}^{(2)}$ 的等价单层模型: $$ \mathbf{O} = (\mathbf{X} \mathbf{W}^{(1)} + \mathbf{b}^{(1)})\mathbf{W}^{(2)} + \mathbf{b}^{(2)} = \mathbf{X} \mathbf{W}^{(1)}\mathbf{W}^{(2)} + \mathbf{b}^{(1)} \mathbf{W}^{(2)} + \mathbf{b}^{(2)} = \mathbf{X} \mathbf{W} + \mathbf{b}. $$ 为了发挥多层架构的潜力, 我们还需要一个额外的关键要素: 在仿射变换之后对每个隐藏单元应用非线性的*激活函数*(activation function)$\sigma$。 激活函数的输出(例如,$\sigma(\cdot)$)被称为*活性值*(activations)。 一般来说,有了激活函数,就不可能再将我们的多层感知机退化成线性模型: $$ \begin{aligned} \mathbf{H} & = \sigma(\mathbf{X} \mathbf{W}^{(1)} + \mathbf{b}^{(1)}), \\ \mathbf{O} & = \mathbf{H}\mathbf{W}^{(2)} + \mathbf{b}^{(2)}.\\ \end{aligned} $$ 由于$\mathbf{X}$中的每一行对应于小批量中的一个样本, 出于记号习惯的考量, 我们定义非线性函数$\sigma$也以按行的方式作用于其输入, 即一次计算一个样本。 我们在 :numref:`subsec_softmax_vectorization`中 以相同的方式使用了softmax符号来表示按行操作。 但是在本节中,我们应用于隐藏层的激活函数通常不仅按行操作,也按元素操作。 这意味着在计算每一层的线性部分之后,我们可以计算每个活性值, 而不需要查看其他隐藏单元所取的值。对于大多数激活函数都是这样。 为了构建更通用的多层感知机, 我们可以继续堆叠这样的隐藏层, 例如$\mathbf{H}^{(1)} = \sigma_1(\mathbf{X} \mathbf{W}^{(1)} + \mathbf{b}^{(1)})$和$\mathbf{H}^{(2)} = \sigma_2(\mathbf{H}^{(1)} \mathbf{W}^{(2)} + \mathbf{b}^{(2)})$, 一层叠一层,从而产生更有表达能力的模型。 ### 通用近似定理 多层感知机可以通过隐藏神经元,捕捉到输入之间复杂的相互作用, 这些神经元依赖于每个输入的值。 我们可以很容易地设计隐藏节点来执行任意计算。 例如,在一对输入上进行基本逻辑操作,多层感知机是通用近似器。 即使是网络只有一个隐藏层,给定足够的神经元和正确的权重, 我们可以对任意函数建模,尽管实际中学习该函数是很困难的。 你可能认为神经网络有点像C语言。 C语言和任何其他现代编程语言一样,能够表达任何可计算的程序。 但实际上,想出一个符合规范的程序才是最困难的部分。 而且,虽然一个单隐层网络能学习任何函数, 但并不意味着我们应该尝试使用单隐藏层网络来解决所有问题。 事实上,通过使用更深(而不是更广)的网络,我们可以更容易地逼近许多函数。 我们将在后面的章节中进行更细致的讨论。 ## 激活函数 :label:`subsec_activation_functions` *激活函数*(activation function)通过计算加权和并加上偏置来确定神经元是否应该被激活, 它们将输入信号转换为输出的可微运算。 大多数激活函数都是非线性的。 由于激活函数是深度学习的基础,下面(**简要介绍一些常见的激活函数**)。 ``` %matplotlib inline import torch from d2l import torch as d2l ``` ### ReLU函数 最受欢迎的激活函数是*修正线性单元*(Rectified linear unit,*ReLU*), 因为它实现简单,同时在各种预测任务中表现良好。 [**ReLU提供了一种非常简单的非线性变换**]。 给定元素$x$,ReLU函数被定义为该元素与$0$的最大值: (**$$\operatorname{ReLU}(x) = \max(x, 0).$$**) 通俗地说,ReLU函数通过将相应的活性值设为0,仅保留正元素并丢弃所有负元素。 为了直观感受一下,我们可以画出函数的曲线图。 正如从图中所看到,激活函数是分段线性的。 ``` x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.relu(x) d2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5)) ``` 当输入为负时,ReLU函数的导数为0,而当输入为正时,ReLU函数的导数为1。 注意,当输入值精确等于0时,ReLU函数不可导。 在此时,我们默认使用左侧的导数,即当输入为0时导数为0。 我们可以忽略这种情况,因为输入可能永远都不会是0。 这里引用一句古老的谚语,“如果微妙的边界条件很重要,我们很可能是在研究数学而非工程”, 这个观点正好适用于这里。 下面我们绘制ReLU函数的导数。 ``` y.backward(torch.ones_like(x), retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) ``` 使用ReLU的原因是,它求导表现得特别好:要么让参数消失,要么让参数通过。 这使得优化表现的更好,并且ReLU减轻了困扰以往神经网络的梯度消失问题(稍后将详细介绍)。 注意,ReLU函数有许多变体,包括*参数化ReLU*(Parameterized ReLU,*pReLU*) 函数 :cite:`He.Zhang.Ren.ea.2015`。 该变体为ReLU添加了一个线性项,因此即使参数是负的,某些信息仍然可以通过: $$\operatorname{pReLU}(x) = \max(0, x) + \alpha \min(0, x).$$ ### sigmoid函数 [**对于一个定义域在$\mathbb{R}$中的输入, *sigmoid函数*将输入变换为区间(0, 1)上的输出**]。 因此,sigmoid通常称为*挤压函数*(squashing function): 它将范围(-inf, inf)中的任意输入压缩到区间(0, 1)中的某个值: (**$$\operatorname{sigmoid}(x) = \frac{1}{1 + \exp(-x)}.$$**) 在最早的神经网络中,科学家们感兴趣的是对“激发”或“不激发”的生物神经元进行建模。 因此,这一领域的先驱可以一直追溯到人工神经元的发明者麦卡洛克和皮茨,他们专注于阈值单元。 阈值单元在其输入低于某个阈值时取值0,当输入超过阈值时取值1。 当人们的注意力逐渐转移到基于梯度的学习时, sigmoid函数是一个自然的选择,因为它是一个平滑的、可微的阈值单元近似。 当我们想要将输出视作二元分类问题的概率时, sigmoid仍然被广泛用作输出单元上的激活函数 (你可以将sigmoid视为softmax的特例)。 然而,sigmoid在隐藏层中已经较少使用, 它在大部分时候被更简单、更容易训练的ReLU所取代。 在后面关于循环神经网络的章节中,我们将描述利用sigmoid单元来控制时序信息流的架构。 下面,我们绘制sigmoid函数。 注意,当输入接近0时,sigmoid函数接近线性变换。 ``` y = torch.sigmoid(x) d2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) ``` sigmoid函数的导数为下面的公式: $$\frac{d}{dx} \operatorname{sigmoid}(x) = \frac{\exp(-x)}{(1 + \exp(-x))^2} = \operatorname{sigmoid}(x)\left(1-\operatorname{sigmoid}(x)\right).$$ sigmoid函数的导数图像如下所示。 注意,当输入为0时,sigmoid函数的导数达到最大值0.25; 而输入在任一方向上越远离0点时,导数越接近0。 ``` # 清除以前的梯度 x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) ``` ### tanh函数 与sigmoid函数类似, [**tanh(双曲正切)函数也能将其输入压缩转换到区间(-1, 1)上**]。 tanh函数的公式如下: (**$$\operatorname{tanh}(x) = \frac{1 - \exp(-2x)}{1 + \exp(-2x)}.$$**) 下面我们绘制tanh函数。 注意,当输入在0附近时,tanh函数接近线性变换。 函数的形状类似于sigmoid函数, 不同的是tanh函数关于坐标系原点中心对称。 ``` y = torch.tanh(x) d2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5)) ``` tanh函数的导数是: $$\frac{d}{dx} \operatorname{tanh}(x) = 1 - \operatorname{tanh}^2(x).$$ tanh函数的导数图像如下所示。 当输入接近0时,tanh函数的导数接近最大值1。 与我们在sigmoid函数图像中看到的类似, 输入在任一方向上越远离0点,导数越接近0。 ``` # 清除以前的梯度 x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5)) ``` 总结一下,我们现在了解了如何结合非线性函数来构建具有更强表达能力的多层神经网络架构。 顺便说一句,这些知识已经让你掌握了一个类似于1990年左右深度学习从业者的工具。 在某些方面,你比在20世纪90年代工作的任何人都有优势, 因为你可以利用功能强大的开源深度学习框架,只需几行代码就可以快速构建模型, 而以前训练这些网络需要研究人员编写数千行的C或Fortran代码。 ## 小结 * 多层感知机在输出层和输入层之间增加一个或多个全连接隐藏层,并通过激活函数转换隐藏层的输出。 * 常用的激活函数包括ReLU函数、sigmoid函数和tanh函数。 ## 练习 1. 计算pReLU激活函数的导数。 1. 证明一个仅使用ReLU(或pReLU)的多层感知机构造了一个连续的分段线性函数。 1. 证明$\operatorname{tanh}(x) + 1 = 2 \operatorname{sigmoid}(2x)$。 1. 假设我们有一个非线性单元,将它一次应用于一个小批量的数据。你认为这会导致什么样的问题? [Discussions](https://discuss.d2l.ai/t/1796)
github_jupyter
## Geocoding Tweets This script showcases how we geocoded (e.g. added coordinates) to tweets containing a placename in the Netherlands. For this, we made use of cbsodata and Nominatim from geopy, which uses the OSM database for geocoding strings. ``` # Import needed libraries import pandas as pd import numpy as np import re import geopy from geopy.geocoders import Nominatim import cbsodata # Load cleaned locations from previous script df = pd.read_csv('cleaned_geo_tweets.csv') ``` #### Final check for geocoding In order to be sure that the geolocator will pick up the names from our tweets, we run the tweets through a loop that compares them with an official list of residences (e.g. villages, cities, hamlets, etc.) from [CBS](https://opendata.cbs.nl/statline/portal.html?_la=nl&_catalog=CBS&tableId=84992NED&_theme=238), so that we are sure the geolocater won't crash. ``` # Retrieve metadata from cbsodata metadata = pd.DataFrame(cbsodata.get_meta('84992NED', 'DataProperties')) # Save placenames as dataframe places = pd.DataFrame(cbsodata.get_data('84992NED', select = 'Woonplaatsen')) # Read the places csv file #places = pd.read_csv('Woonplaatsen_in_Nederland.csv',sep = ';') # Make sure the names are in lower case to match our names places['Woonplaatsen'] = places['Woonplaatsen'].str.lower() # Create an empty list for the place names to be added to legit_locs = [] # Create the loop for i in df['location']: for j in places['Woonplaatsen']: if i == j: legit_locs.append(i) else: pass # Create dataframe of location and count fnl_df = pd.DataFrame(legit_locs) # Name the column for clarity fnl_df.columns =['Location'] # Add a count column fnl_df['count'] = fnl_df.groupby('Location')['Location'].transform('count') # Remove the duplicates fnl_df.drop_duplicates(subset=['Location'], keep = 'first', inplace=True) ``` #### Geocoding Now, it's time for the actual geocoding. Be aware that this line takes around 10 minutes to locate all the placenames. The code was inspired by [this](https://medium.com/analytics-vidhya/exploring-twitter-data-using-python-part-iii-analyzing-the-data-e883aa340dff) tutorial. Alternatively, the 'tweets_with_location' file is also provided in the next notebook, so the user doesn't have to run the next part. ``` # Inititiate user geolocator = Nominatim(user_agent='twitter-analysis') # note that user_agent is a random name # Convert locations to a list fnl_locs = list(fnl_df.Location) # This line takes about 10 minutes to run! geolocated = list(map(lambda x: [x,geolocator.geocode(x)[1] if geolocator.geocode(x) else None],fnl_locs)) # Check the result geolocated.head(5) # Transform to lat and long geolocated = pd.DataFrame(geolocated) geolocated.columns = ['locat','latlong'] geolocated['lat'] = geolocated.latlong.apply(lambda x: x[0]) geolocated['lon'] = geolocated.latlong.apply(lambda x: x[1]) geolocated.drop('latlong',axis=1, inplace=True) # Procedure to merge the sentiment and spatial analysis tweets_with_location = df.join(geolocated.set_index('locat'), on = 'location') # Export to csv for the final notebook! tweets_with_location.to_csv('tweets_with_location.csv', header=True, index=False) ```
github_jupyter
# Numpy: Documentação : https://numpy.org/doc/stable/user/absolute_beginners.html#numpy-the-absolute-basics-for-beginners ## Instalação: Insira o código abaixo no Anaconda Prompt: <b>pip install numpy</b> https://numpy.org/install/ ## Importação: https://numpy.org/doc/stable/user/absolute_beginners.html#how-to-import-numpy ``` import numpy as np ``` ## O que é um array? Tipos de arrays: ndarrays -> significam arrays com N dimensões<br> 1-D array-> Possui apenas uma dimensão. Será comumente chamado de <b> vetor ou vector </b> <br> 2-D array -> Possui 2 dimensões. Será comumente chamado de <b> matriz ou matrix</b> <br> 3-D ou Mais array -> Possui 3 ou mais dimensões. Será comumente chamado de <b>tensor</b> https://numpy.org/doc/stable/reference/arrays.html#arrays ## Criando um Array: ### np.array() https://numpy.org/doc/stable/reference/generated/numpy.array.html?highlight=numpy%20array#numpy-array ``` a = np.array([[1, 2, 3, 4, 5, 6], [3, 4, 5, 6, 7, 8]]) print(a) print(type(a)) ``` ### np.zeros() https://numpy.org/doc/stable/reference/generated/numpy.zeros.html ``` zero_array = np.zeros(shape = (5, 3, 6)) print(zero_array) ``` ### np.ones() https://numpy.org/doc/stable/reference/generated/numpy.ones.html ``` one_array = np.ones(2) print(one_array) ``` ### no.empty() https://numpy.org/doc/stable/reference/generated/numpy.empty.html ``` vazio = np.empty((3, 4)) print(vazio) ``` ### np.arange() https://numpy.org/doc/stable/reference/generated/numpy.arange.html ``` arr = np.arange(50, 201, 30) print(arr) ``` ### np.linspace() https://numpy.org/doc/stable/reference/generated/numpy.linspace.html ``` linear_array = np.linspace(0, 100, num = 40, retstep = True) print(linear_array) ``` ## Descobrindo o tamanho de um array: Número de dimensões : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.ndim.html <br> Número de items: https://numpy.org/doc/stable/reference/generated/numpy.ndarray.size.html Formato : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.shape.html ``` zero_array = np.zeros(shape = (5,3,6)) zero_array # tamanhos das dimensões print(zero_array.shape) # quantidade de elementos print(zero_array.size) # quanditade de dimensões print(zero_array.ndim) ``` ### Mudando o tamanho de um array: https://numpy.org/doc/stable/reference/generated/numpy.reshape.html ### Rankeando um array: https://numpy.org/doc/stable/reference/generated/numpy.sort.html ## Transformando um Vetor (1-D) em uma matrix(2-D) .newaxis:https://numpy.org/doc/stable/reference/constants.html?#numpy.newaxis <br> .expand_dims:https://numpy.org/doc/stable/reference/generated/numpy.expand_dims.html#numpy.expand_dims ``` a = np.array([1, 2, 3]) print(a.ndim) print(a.shape) a2_1 = a[np.newaxis,:] print(a2.shape) print(a2.ndim) print(a2_1) a2_2 = a[:,np.newaxis] print(a2.shape) print(a2.ndim) print(a2_2) print(a2_2[2][0]) ``` ### Concatenando arrays: https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html ``` a = np.array([1, 2, 3]) b = np.array([4, 5, 6]) c = np.concatenate((a,b)) d = np.concatenate((b,a)) print(f"{c}\n{d}") ``` ### Consultando itens de uma array: https://numpy.org/doc/stable/user/absolute_beginners.html#indexing-and-slicing ``` a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) print(a) print("-="*10) maior_8 = a[a>8] print(maior_8) ``` ### Operações com Arrays: Soma : https://numpy.org/doc/stable/reference/generated/numpy.sum.html#numpy.sum <br> Valor mínimo : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.min.html<br> Valor máximo : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.max.html<br> Média : https://numpy.org/doc/stable/reference/generated/numpy.ndarray.mean.html ``` a = np.array([1, 2, 3]) print(a.sum()) print(a.max()) print(a.min()) print(a.mean()) ``` ### Gerando amostras aleatórias: ``` from numpy.random import default_rng rng=default_rng() aleatorio = rng.integers(20, size = (2, 4)) print(aleatorio) ``` ## Diferença entre Arrays e Listas: ``` a = np.array([1, 'Thiago', 2, 3, 4, 5, 6, 7, 8]) print(a) print(type(a)) print('-='*25) lista_a = [1, 'Thiago', 2, 3, 4, 5, 6, 7, 8] print(lista_a) print(type(lista_a)) ``` ## Comparando o processamento: ``` from time import process_time lista_a = list(rng.integers(10, 100, 10000000)) lista_b = list(rng.integers(10, 100, 10000000)) # lista_c = lista_a * lista_b # print(lista_c) lista_c = [] t1 = process_time() for i in range(len(lista_a)): lista_c.append(lista_a[i]*lista_b[i]) t2 = process_time() print(t2-t1) a = rng.integers(10, 100, 10000000) b = rng.integers(10, 100, 10000000) t1a = process_time() c = a*b t2a = process_time() print(t2a-t1a) (t2-t1)/(t2a-t1a) import matplotlib.pyplot as plt dados_x = rng.integers(20, size = 30) dados_y = rng.integers(12, size = 30) plt.scatter(x = dados_x, y = dados_y) plt.show() ```
github_jupyter
``` import numpy as np from numpy import loadtxt import pylab as pl from IPython import display from RcTorchPrivate import * from matplotlib import pyplot as plt from scipy.integrate import odeint %matplotlib inline #this method will ensure that the notebook can use multiprocessing on jupyterhub or any other linux based system. try: mp.set_start_method("spawn") except: pass torch.set_default_tensor_type(torch.FloatTensor) %matplotlib inline lineW = 3 lineBoxW=2 font = {'family' : 'normal', 'weight' : 'normal',#'bold', 'size' : 22} #plt.rc('font', **font) #plt.rcParams['text.usetex'] = True #helper functions def pltTr(x,y,clr='cyan', mark='o'): plt.plot(x.detach().numpy(), y.detach().numpy(), marker=mark, color=clr, markersize=8, label='truth', alpha = 0.9) def pltPred(x,y,clr='red', linS='-'): plt.plot(x.detach().numpy(), y.detach().numpy(), color=clr, marker='.', linewidth=2, label='RC') from decimal import Decimal def convert2pd(tensor1, tensor2): pd_ = pd.DataFrame(np.hstack((tensor1.detach().cpu().numpy(), tensor2.detach().cpu().numpy()))) pd_.columns = ["t", "y"] return pd_ '%.2E' % Decimal('40800000000.00000000000000') def param(t,N,y0): f = 1 - torch.exp(-t) f_dot = 1 - f #f = t #f_dot=1 return y0 + f*N #define a reparameterization function def reparam(t, y0 = None, N = None, dN_dt = None, t_only = False): f = 1 - torch.exp(-t) f_dot = 1 - f if t_only: return f, f_dot y = y0 + N*f if dN_dt: ydot = dN_dt * f + f_dot * N else: ydot = None return y, ydot def reparam(t, order = 1): exp_t = torch.exp(-t) derivatives_of_g = [] g = 1 - exp_t #0th derivative derivatives_of_g.append(g) g_dot = 1 - g #first derivative #derivatives_of_g.append(g_dot) # for i in range(order): # if i %2 == 0: # #print("even") # derivatives_of_g.append(g_dot) # else: # #print("odd") # derivatives_of_g.append(-g_dot) # return derivatives_of_g return g, g_dot def force(X, A = 0): return torch.zeros_like(X) q = 0.5 def custom_loss(X , y, ydot, out_weights, f = force, reg = True, ode_coefs = None, q = q, init_conds = None, enet_strength = None, enet_alpha = None): #with paramization L = ydot + ode_coefs[0]* y - f(X) + q*y**2 """ if reg: weight_size_sq = torch.mean(torch.square(out_weights)) weight_size_L1 = torch.mean(torch.abs(out_weights)) L_reg = 0.1*(weight_size_sq + weight_size_L1)/2 L = L + L_reg """ L = torch.mean(torch.square(L)) return L def plot_result(esn, xtrain, lams = [1], y0s = [1], plot_gt = True, loglog = False, ode_coefs = None, force_k = 0, fileName=None, q = q,backprop_f = None, time_result = True, solve = None): RC = esn fig = plt.figure(figsize = (8, 6)) t_pow = 0 gts, ys, ydots, ws, bs, Ls = [], [], [], [], [], [] for i, lam in enumerate(lams): y0 = y0s[i] ode_coefs[0] = lam #fit the echo state network train_args = {"burn_in" : int(BURN_IN), "ODE_order" : 1, #"track_in_grad" : self.track_in_grad, "force" : force, "reparam_f" : reparam, #"nl_f" : self.nl_f, "init_conditions" : [float(y0)], "ode_coefs" : ode_coefs, "y" : None, "X" : xtrain.view(-1,1), "q" : q, "nl" : True, } if not i: y, ydot = esn.fit(**train_args, SOLVE = solve) ode_coefs_copy = ode_coefs.copy() states_dict = {"s" : RC.states.clone(), "s1" : RC.states_dot.clone(), "G" : RC.G, "ex" : RC.extended_states.clone(), "sb1": RC.sb1, "sb" : RC.sb } if esn.ODE_order == 2: states_dict["s2"] = RC.states_dot2.clone() states_dict["sb2"] = RC.sb2.clone() t2 = time.perf_counter() else: y, ydot = RC.fit(preloaded_states_dict = states_dict, SOLVE = solve, **train_args) if backprop_f: weight_dict = backprop_f(esn) y, ydot = esn.fit(**train_args, out_weights = weight_dict, SOLVE = False) ode_coefs_copy = ode_coefs.copy() if ode_coefs[0] == "t**2": sp = esn.X**2 t_pow = 2 ode_coefs_copy[0] = sp def ODE_numSolver(y,t, q = q): k = 1 # dydt = -k * y *t**t_pow + force_k*np.sin(t) dydt = -k * y -q*y**2 return dydt y_truth = odeint(ODE_numSolver,y0,np.array(esn.X.cpu().view(-1,))) y_truth = torch.tensor(y_truth) # y_exac = y0*torch.exp(-lam*(esn.X)) if y0==1: extraWidth = 2; color = 'k' else: extraWidth=0; color = 'b' #line to ensure that cuda tensors can move to cpu for plotti X = esn.X.cpu().detach() y = y.cpu().detach() y_truth = y_truth.cpu().detach() if not i: plt.plot(X, y,color, linewidth=lineW+extraWidth, label = "pred" ) plt.plot(X, y_truth,'--r', linewidth=lineW, alpha=0.85, label = "gt") else: plt.plot(X, y,color, linewidth=lineW+extraWidth) plt.plot(X, y_truth,'--r', linewidth=lineW, alpha=0.85) ## Formating Figure # Changing spine style ax = plt.gca() for ps in ['top','bottom','left','right']: ax.spines[ps].set_linewidth(lineBoxW) plt.xlabel(r'$t$') plt.ylabel(r'$y(t)$') plt.legend() gts.append(y_truth.cpu()) ys.append(y.cpu()) ydots.append(ydot.cpu()) if backprop_f: Ls.append(weight_dict["loss"]) #Ls.append(esn.backprop_args) bs.append(esn.LinOut.bias.data.cpu()) ws.append(esn.LinOut.weight.data.cpu()) if time_result: return t2, ys, ydots, gts, ws, bs, Ls else: return ys, ydots, gts, ws, bs, Ls # plt.savefig(fileName+"Trajectories",format='png')#, dpi=600,transparent=True) # plt.savefig(fileName+'Trajectories.eps',format='eps') # return residuals def optimize_last_layer(esn, SAVE_AFTER_EPOCHS = 1, epochs = 30000, custom_loss = custom_loss, loss_threshold = 10 ** -8, EPOCHS_TO_TERMINATION = None, f = force, learning_rate = 0.01, plott = True, spikethreshold = 0.25): #define new_x new_X = esn.extended_states.detach() #force detach states_dot esn.states_dot = esn.states_dot.detach().requires_grad_(False) #define criterion criterion = torch.nn.MSELoss() try: assert esn.LinOut.weight.requires_grad and esn.LinOut.bias.requires_grad except: esn.LinOut.weight.requires_grad_(True) esn.LinOut.bias.requires_grad_(True) #define previous_loss (could be used to do a convergence stop) previous_loss = 0 #define best score so that we can save the best weights best_score = 0 #define the optimizer optimizer = optim.Adam(esn.parameters(), lr = learning_rate) #define the loss history loss_history = [] if plott: #use pl for live plotting fig, ax = pl.subplots(1,3, figsize = (16,4)) t = esn.X#.view(*N.shape).detach() g, g_dot = esn.G y0 = esn.init_conds[0] #optimizer = torch.optim.SGD(model.parameters(), lr=100) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.5) lrs = [] floss_last = 0 #begin optimization loop for e in range(epochs): optimizer.zero_grad() N = esn.forward( esn.extended_states ) N_dot = esn.calc_Ndot(esn.states_dot) y = y0 + g *N ydot = g_dot * N + g * N_dot assert N.shape == N_dot.shape, f'{N.shape} != {N_dot.shape}' assert esn.LinOut.weight.requires_grad and esn.LinOut.bias.requires_grad assert False, f'{esn.LinOut.weight}' total_ws = esn.LinOut.weight.shape[0] + 1 weight_size_sq = torch.mean(torch.square(esn.LinOut.weight)) loss = custom_loss(esn.X, y, ydot, esn.LinOut.weight, reg = False, ode_coefs = esn.ode_coefs) loss.backward() optimizer.step() floss = float(loss) loss_history.append(floss) if not e and not best_score: best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach() if e > SAVE_AFTER_EPOCHS: if not best_score: if floss <= min(loss_history): best_pred = y.clone() best_pred = y.clone() best_ydot = ydot.clone() best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach() best_score = float(loss) else: if floss < best_score: best_pred = y.clone() best_ydot = ydot.clone() best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach() best_score = float(loss) if not EPOCHS_TO_TERMINATION: if float(loss) < loss_threshold: EPOCHS_TO_TERMINATION = e + 100 else: if e >= EPOCHS_TO_TERMINATION: backprop_args = {"loss_history" : loss_history, "lr" : learning_rate, "epochs" : epochs } return {"weights": best_weight, "bias" : best_bias, "loss" : backprop_args, "ydot" : best_ydot, "y" : best_pred} if e > 1: if float(np.log(floss_last) - np.log(floss)) > spikethreshold: lrs.append(optimizer.param_groups[0]["lr"]) scheduler.step() for param_group in optimizer.param_groups: print('lr', param_group['lr']) floss_last = floss if plott: if e % 1500 == 0: ax[0].clear() logloss_str = 'Log(L) ' + '%.2E' % Decimal((loss).item()) delta_loss = ' delta Log(L) ' + '%.2E' % Decimal((loss-previous_loss).item()) print(logloss_str + ", " + delta_loss) ax[0].plot(N.detach().cpu(), label = "exact") ax[0].set_title(f"Epoch {e}" + ", " + logloss_str) ax[0].set_xlabel("epochs") ax[1].set_title(delta_loss) ax[1].plot(N_dot.detach().cpu()) #ax[0].plot(y_dot.detach(), label = "dy_dx") ax[2].clear() weight_size = str(weight_size_sq.detach().item()) ax[2].set_title("loss history \n and "+ weight_size) ax[2].loglog(loss_history) [ax[i].legend() for i in range(3)] previous_loss = loss.item() #clear the plot outputt and then re-plot display.clear_output(wait=True) display.display(pl.gcf()) backprop_args = {"loss_history" : loss_history, "lr" : learning_rate, "epochs" : epochs } return {"weights": best_weight, "bias" : best_bias, "loss" : backprop_args, "ydot" : best_ydot, "y" : best_pred} #optimized_hyper_params x0,xf, nsteps = 0, 5, 1000 #int(2000 * ratio_up) xtrain = torch.linspace(x0, xf, steps = nsteps, requires_grad=False) BURN_IN = 500 y0 = 1 ; lam = 1 #the length of xtrain won't matter. Only dt , x0, and xf matter. xtrain = torch.linspace(x0, xf, steps = nsteps, requires_grad=False).view(-1,1) xtrain.shape #q = 0.7 hybrid_hps_q07 = {'dt': 0.01, 'n_nodes': 500, 'connectivity': 0.005200326335063122, 'spectral_radius': 4.063828945159912, 'regularization': 0.16819202592057847, 'leaking_rate': 0.07071314752101898, 'bias': 0.6888809204101562} #q = 0.5 ######################################################################################## hybrid_hps_q05 = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.0003179179463749722, 'spectral_radius': 7.975825786590576, 'regularization': 0.3332787303378571, 'leaking_rate': 0.07119506597518921, 'bias': -0.9424528479576111} ######################################################################################## #q = 0.3 ######################################################################################## exact_hps_q03 = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.0020952467703604792, 'spectral_radius': 0.37082985043525696, 'regularization': 0.361264334627276, 'leaking_rate': 0.012962563894689083, 'bias': 0.15055322647094727} another_exact_03_run = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.00010646483429429022, 'spectral_radius': 9.755386352539062, 'regularization': 0.001061326151397624, 'leaking_rate': 0.015667859464883804, 'bias': -0.6486743688583374} # 3000 epochs hybrid_03_hps = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.000876183849077606, 'spectral_radius': 7.2928466796875, 'regularization': 0.6050492589156197, 'leaking_rate': 0.014219114556908607, 'bias': 0.18588018417358398} ######################################################################################## #q = 0.1 ######################################################################################## #y0s = array([-1. , -0.25, 0.5 , 1.25]) hybrid_hyper_params = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.0001340433236446365, 'spectral_radius': 7.1109442710876465, 'regularization': 0.0040541553015366605, 'leaking_rate': 0.022500092163681984, 'bias': 0.7761751413345337} exact_hyper_params = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.00457819326682001, 'spectral_radius': 4.214494228363037, 'regularization': 672.3718753390342, 'leaking_rate': 0.11203678697347641, 'bias': 0.7799162864685059} ######################################################################################## #esn.fit dRay=0.75 np.arange(-1., 1 + dRay, dRay) y0s = np.arange(-1., 1 + dRay, dRay) ``` dt -2.1 n_nodes 500 connectivity -3.8727548122406006 spectral_radius 7.1109442710876465 regularization -2.392099618911743 leaking_rate 0.022500092163681984 bias 0.7761751413345337 ``` log_vars = ['connectivity', 'llambda', 'llambda2', 'noise', 'regularization', 'dt'] hps = {'dt': 0.01, 'n_nodes': 500, 'connectivity': 0.0008771738385033052, 'spectral_radius': 3.8187756538391113, 'regularization': 2.6243606290132924, 'leaking_rate': 0.05788800120353699, 'bias': -0.4182356595993042} for key, val in hps.items(): if key in log_vars: print(key, np.log10(val)) else: print(key, val) #declare the bounds dict. See above for which variables are optimized in linear vs logarithmic space. bounds_dict = {"connectivity" : (-4, -0.1), "spectral_radius" : (1.5, 8.5), "n_nodes" : 500, "regularization" : (-2, 2), "leaking_rate" : (0, 0.1), #"input_scaling" : (0, 1), #"feedback_scaling" : (0, 1), "dt" : -2, "bias": (-1,1) } #declare the esn_cv optimizer: this class will run bayesian optimization to optimize the bounds dict. esn_cv = EchoStateNetworkCV(bounds = bounds_dict, interactive = True, batch_size = 1, cv_samples = 2, initial_samples = 50, #200 subsequence_length = int(xtrain.shape[0] * 0.8), random_seed = 209, success_tolerance = 10, windowsOS =False, validate_fraction = 0.3, ODE_order = 1, length_min = 2 **(-7), esn_burn_in = BURN_IN, log_score = True ) #optimize: opt = False if opt: opt_hps = esn_cv.optimize(y = None, x = xtrain,#.cuda(), reparam_f = reparam, ODE_criterion = custom_loss, init_conditions = [(y0s[0], y0s[-1])], force = force, rounds = 5, ode_coefs = [1, 1], backprop_f = optimize_last_layer, solve = False, q = q, n_outputs = 1, eq_system = False, nonlinear_ode = True) # # new_prop_hps = {'dt': 0.01, 'n_nodes': 500, 'connectivity': 0.001237975145359088, 'spectral_radius': 5.298933029174805, 'regularization': 18.616127927682236, 'leaking_rate': 0.0048981658183038235, 'bias': -0.40049731731414795} #opt_hps #assert False esn = EchoStateNetwork(**hybrid_hps_q05 , random_state = 209, id_ = 10, dtype = torch.float32) sns.heatmap(esn.LinOut.weight[:,:5].detach()); #1. get the linear trajectories #2. do a pure backprop training rez = plot_result(esn, xtrain.cpu(), lams = torch.ones_like(torch.tensor(y0s)),#np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], plot_gt = True, ode_coefs = [1,1], q = q, backprop_f = None,#optimize_last_layer, solve = True) esn.init_conds RC =esn RC.DH1[0:10,:] sns.heatmap(RC.DH2); plt.show(); sns.heatmap(matmul(-esn.DH2, esn.D_A).view(-1,1)) esn.init_conds sns.heatmap(esn.LinOut.weight[0].detach().view(-1,1)) #t2_, ys_, gts_, ws_, bs_, l_trajs = rez #linear_trajs = _, l_trajs, l_trajs_dot, _, _, _, _ = plot_result(esn, xtrain.cpu(), lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], plot_gt = True, ode_coefs = [1,1], q = q, backprop_f = None,#optimize_last_layer, solve = True) esn.ode_coefs esn.DH2.shape sns.heatmap(esn.DH); torch.mean(esn.D_A) sns.heatmap(esn.DH2); plt.show(); torch.mean(esn.D_A) sns.heatmap(esn.LinOut.weight.detach()) sns.heatmap(esn.D_A) assert False t2, ys, ydots, gts, ws, bs, Ls = rez n = 3 plt.loglog(Ls[n]["loss_history"], label = "prop_only") #plt.loglog(h["loss"][n]["loss_history"], label = "hybrid") plt.legend(); assert False import pickle filename = 'bernoulli_q05_hybrid' with open(filename + '_plot_data_.pickle', 'rb') as handle: h = pickle.load(handle) #filename = 'bernoulli_q05_backprop' with open(filename + '_plot_data_.pickle', 'rb') as handle: b = pickle.load(handle) with open(filename + '_plot_data_.pickle', 'rb') as handle: b = pickle.load(handle) n = 3 plt.loglog(b['loss'][n]["loss_history"], color = "blue", label = "backprop_only") plt.loglog(h['loss'][n]["loss_history"], color = "red", label = "hybrid") plt.legend() # for i, key in enumerate(b['loss']): # plt.loglog(key["loss_history"], color = "blue") # for i, key in enumerate(a['loss']): # plt.loglog(key["loss_history"], color = "red") assert False ls import pickle filename = 'bernoulli_q05_linear' #t2, ys, ydots, gts, ws, bs, Ls = rez q05_data = { # "time": esn.X, # "ys" : ys, # "ydots" : ydots, # "gts" : gts, # "q": 0.5, # "loss": Ls, "linear_trajectories" : l_trajs, "linear_trajectories_dot" : l_trajs_dot } #"bprop_only_loss" : Ls_bprop} with open(filename + '_plot_data.pickle', 'wb') as handle: pickle.dump(q05_data, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(filename + '_plot_data.pickle', 'rb') as handle: b = pickle.load(handle) b.keys() for i in b['linear_trajectories']: plt.plot(i) import matplotlib.pyplot as plt with open(filename + '_plot_data.pickle', 'rb') as handle: b = pickle.load(handle) b.keys() plt.plot(b["ydots"][0]) import pickle q05 = {"time": esn.X, "hyper_params" : hybrid_hps_q05, "out_weights" : {"weights": ws, "bias": bs}, "burn_in" : BURN_IN, "epochs" : 30000, "learning_rate": 0.0001, "y0s" : y0s, "q" : 0.5} with open(filename + '_reproduce.pickle', 'wb') as handle: pickle.dump(q05, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(filename + '_reproduce.pickle', 'rb') as handle: b = pickle.load(handle) for param in esn.parameters(): #print(param) if param.requires_grad: print(param) plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], lam_title = 1, y0_title = "[-5, 5]", plot_gt = True, ode_coefs = [1,1], force_k = 0, fileName='population', backprop_f = optimize_last_layer, q = a) opt_hps plt.plot(esn.states[:,7]) correction = (esn.D_A.T * esn.gH.T @ esn.gH) esn.DH.shape (esn.DH1 + correction).shape %%time esn = EchoStateNetwork(**hybrid_03_hps, random_state = 109, feedback = False, id_ = 10, backprop = False, dtype = torch.float32) # y0s = np.linspace(-2, 2, 10) dRay=0.75 y0s = np.arange(-1., 1 + dRay, dRay) plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], lam_title = 1, y0_title = "[-5, 5]", plot_gt = True, ode_coefs = [1,1], force_k = 0, fileName='population', backprop_f = optimize_last_layer, q = 0.1) %%time esn = EchoStateNetwork(**hybrid_hyper_params, random_state = 109, feedback = False, id_ = 10, backprop = False, dtype = torch.float32) # y0s = np.linspace(-2, 2, 10) dRay=0.75 y0s = np.arange(-1., 1 + dRay, dRay) #A * torch.sin(X) plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], lam_title = 1, y0_title = "[-5, 5]", plot_gt = True, ode_coefs = [1,1], force_k = 0, fileName='population', backprop_f = None)#optimize_last_layer) %%time esn = EchoStateNetwork(**exact_hyper_params, random_state = 109, feedback = False, id_ = 10, backprop = False, dtype = torch.float32) # y0s = np.linspace(-2, 2, 10) dRay=0.75 y0s = np.arange(-1., 1 + dRay, dRay) plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], lam_title = 1, y0_title = "[-5, 5]", plot_gt = True, ode_coefs = [1,1], force_k = 0, fileName='population', backprop_f = optimize_last_layer, q = 0.1) ```
github_jupyter
# Decision Tree Classification ``` # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('Social_Network_Ads.csv') X = dataset.iloc[:, [2, 3]].values y = dataset.iloc[:, 4].values # Splitting the dataset into the Training set and Test set from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # Fitting Decision Tree Classification to the Training set from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0) classifier.fit(X_train, y_train) # Predicting the Test set results y_pred = classifier.predict(X_test) # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) # Visualising the Training set results from matplotlib.colors import ListedColormap X_set, y_set = X_train, y_train X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Decision Tree Classification (Training set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() # Visualising the Test set results from matplotlib.colors import ListedColormap X_set, y_set = X_test, y_test X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.75, cmap = ListedColormap(('red', 'green'))) plt.xlim(X1.min(), X1.max()) plt.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], c = ListedColormap(('red', 'green'))(i), label = j) plt.title('Decision Tree Classification (Test set)') plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() plt.show() ```
github_jupyter
# Using TensorNet (Basic) This notebook will demonstrate some of the core functionalities of TensorNet: - Creating and setting up a dataset - Augmenting the dataset - Creating and configuring a model and viewing its summary - Defining an optimizer and a criterion - Setting up callbacks - Training and validating the model - Displaying plots for viewing the change in accuracy during training # Installing Packages ``` !pip install --upgrade --no-cache-dir torch-tensornet ``` # Imports Importing necessary packages and modules ``` %matplotlib inline import matplotlib.pyplot as plt from tensornet.data import CIFAR10 from tensornet.models import mobilenet_v2 from tensornet.models.loss import cross_entropy_loss from tensornet.models.optimizer import sgd from tensornet.utils import initialize_cuda, plot_metric from tensornet.engine.ops import ModelCheckpoint from tensornet.engine.ops.lr_scheduler import reduce_lr_on_plateau ``` ## Set Seed and Get GPU Availability ``` # Initialize CUDA and set random seed cuda, device = initialize_cuda(1) # random seed is set to 1 ``` ## Setup Dataset Downloading and initializing `CIFAR-10` dataset and applying the following augmentations: - Horizontal Flip - Random Rotation - Cutout Augmentation ``` dataset = CIFAR10( train_batch_size=64, val_batch_size=64, cuda=cuda, num_workers=4, horizontal_flip_prob=0.2, rotate_degree=20, cutout_prob=0.3, cutout_dim=(8, 8), ) ``` ## Data Visualization Let's see how our data looks like. This information will help us decide the transformations that can be used on the dataset. ``` # Fetch data classes = dataset.classes sample_data, sample_targets = dataset.data() # Set number of images to display num_images = 4 # Display images with labels fig, axs = plt.subplots(1, 4, figsize=(8, 8)) fig.tight_layout() for i in range(num_images): axs[i].axis('off') axs[i].set_title(f'Label: {classes[sample_targets[i]]}') axs[i].imshow(sample_data[i]) ``` ## Training and Validation Dataloaders This is the final step in data preparation. It sets the dataloader arguments and then creates the dataloader ``` # Create train data loader train_loader = dataset.loader(train=True) # Create val data loader val_loader = dataset.loader(train=False) ``` # Model Architecture and Summary We'll download a pretrained ResNet18 model and train it on our dataset using fine-tuning. ``` model = mobilenet_v2(pretrained=True).to(device) # Create model model.summary(dataset.image_size) # Display model summary ``` # Model Training and Validation - Loss Function: `Cross Entropy Loss` - Optimizer: `SGD` - Callbacks: `Model Checkpoint` and `Reduce LR on Plateau` ``` criterion = cross_entropy_loss() # Create loss function optimizer = sgd(model) # Create optimizer with deafult learning rate # Create callbacks checkpoint_path = 'checkpoints' callbacks = [ ModelCheckpoint(checkpoint_path, monitor='val_accuracy'), reduce_lr_on_plateau(optimizer, factor=0.2, patience=2, min_lr=1e-6), ] model.fit( train_loader, optimizer, criterion, device=device, epochs=10, val_loader=val_loader, callbacks=callbacks, metrics=['accuracy'], ) ``` ## Result Analysis Displaying the change in accuracy of the training and the validation set during training ``` plot_metric({ 'Training': model.learner.train_metrics[0]['accuracy'], 'Validation': model.learner.val_metrics[0]['accuracy'] }, 'Accuracy') ```
github_jupyter
``` # Code source: Sebastian Curi and Andreas Krause. # Python Notebook Commands %matplotlib inline %reload_ext autoreload %load_ext autoreload %autoreload 2 # Numerical Libraries import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams rcParams['figure.figsize'] = (10, 5) # Change this if figures look ugly. rcParams['font.size'] = 16 # IPython Libraries import IPython import ipywidgets from IPython.display import display from ipywidgets import interact, interactive, interact_manual # sklearn library import sklearn from sklearn.datasets import make_regression from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline import warnings warnings.filterwarnings('ignore') # Custom Libraries from utilities import plot_helpers ``` # Model Selection In this task we have noisy samples of the function $f(x) = x \sin(x) $, and our objective is to learn it from data (here we're cheating because we already know the function). In this demo we will see how model selection works and how to use K-fold cross-validation. ``` rcParams['figure.figsize'] = (10, 5) # Change this if figures look ugly. rcParams['font.size'] = 16 # Let's plot the function first (without noise) def f(x): return x * np.sin(x) x_plot = np.linspace(-1, 11, 100) f_plot = f(x_plot) X_plot = x_plot[:, np.newaxis] plot_opts = {'x_label': '$x$', 'y_label': '$y$', 'y_lim': [np.min(f_plot)-3, np.max(f_plot)+3], 'legend':False, 'legend_loc': 'lower left'} plot_helpers.plot_data(x_plot, f_plot, fig=plt.subplot(111), options=plot_opts) rcParams['figure.figsize'] = (10, 5) # Change this if figures look ugly. rcParams['font.size'] = 16 noise_widget = ipywidgets.FloatSlider(value=1, min=0, max=3, step=0.5, readout_format='.1f', description='Noise level:', style={'description_width': 'initial'}, continuous_update=False) resample_button = ipywidgets.ToggleButton(description="Resample!") degree_widget = ipywidgets.IntSlider(min=1, max=19, step=1, description='Polynomial Degree:', style={'description_width': 'initial'}, continuous_update=False) reg_widget = ipywidgets.Dropdown( options=[0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2], value=0, description='Regularizer:', disabled=False, style={'description_width': 'initial'}, continuous_update=False ) def resample(b, noise): x = 10 * np.random.rand(20) y = f(x) + np.random.normal(size=(20,)) * noise # create matrix versions of these arrays X = x[:, np.newaxis] def change_degree(degree, reg): model = make_pipeline(PolynomialFeatures(degree), Ridge(alpha=reg)) model.fit(X, y) fig = plt.figure() lw = 2 plt.plot(x_plot, f_plot, color='cornflowerblue', linewidth=lw, label="Ground Truth") y_plot = model.predict(X_plot) plt.plot(x_plot, y_plot, color='r', linewidth=lw, label="Degree %d" % degree) plot_opts = {'x_label': '$x$', 'y_label': '$y$', 'y_lim': [np.min(f_plot)-3, np.max(f_plot)+3], 'legend':True, 'legend_loc': 'lower left'} opts = {'marker': 'b*', 'label': 'Training Points'} plot_opts.update(opts) plot_helpers.plot_data(X, y, fig=fig.gca(), options=plot_opts) plt.show() coefs = model._final_estimator.coef_ coefs[0] = model._final_estimator.intercept_ print("Estimated coefficients{}".format(coefs)) interact(change_degree, degree=degree_widget, reg=reg_widget); interact(resample, b=resample_button, noise=noise_widget); ``` # K-Fold Cross-Validation The idea of this method is to split the dataset into K different bins, use K-1 to learn and 1 to validate. Then you can interchange which split you validate on and make statistics on the different errors on each split (such as avg, std, etc). ``` rcParams['figure.figsize'] = (10, 5) # Change this if figures look ugly. rcParams['font.size'] = 16 folds = 5 N = 50 n = int(N/folds) resample_button = ipywidgets.ToggleButton(description="Resample!") degree_widget = ipywidgets.IntSlider(value=1, min=1, max=19, step=1, description='Polynomial Degree:', style={'description_width': 'initial'}, continuous_update=False) fold_widget = ipywidgets.ToggleButtons(value=1, options=np.arange(1, folds+1), description='Validation fold:', style={'description_width': 'initial'}, continuous_update=False) noise_widget = ipywidgets.FloatSlider(value=1, min=0, max=3, step=0.5, readout_format='.1f', description='Noise level:', style={'description_width': 'initial'}, continuous_update=False) reg_widget = reg_widget = ipywidgets.Dropdown( options=[0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2], value=0, description='Regularizer:', disabled=False, style={'description_width': 'initial'}, continuous_update=False ) def resample(b, noise): xraw = 10 * np.random.rand(N) # rng = np.random.RandomState(0) np.random.shuffle(xraw) #noise=1 x = dict() y = dict() for i in range(folds): x[i] = xraw[n*i:n*(i+1)] y[i] = f(x[i]) + np.random.normal(size=(n,)) * noise def change_degree(degree, reg, fold): X = np.array(()) Y = np.array(()) for i in range(folds): if i == (fold-1): Xval = x[i] Yval = y[i] else: X = np.concatenate((X, x[i])) Y = np.concatenate((Y, y[i])) X = X[:, np.newaxis] Xval = Xval[:, np.newaxis] model = make_pipeline(PolynomialFeatures(degree), Ridge(alpha=reg)) model.fit(X, Y) fig = plt.subplot(111) lw = 2 plt.plot(x_plot, f_plot, color='cornflowerblue', linewidth=lw, label="Ground Truth") y_plot = model.predict(X_plot) plt.plot(x_plot, y_plot, color='r', linewidth=lw, label="Degree %d" % degree) opts = {'marker': 'b*', 'label': 'Training Points'} plot_helpers.plot_data(X, Y, fig=fig, options=opts) plot_opts = {'x_label': '$x$', 'y_label': '$y$', 'y_lim': [np.min(f_plot)-3, np.max(f_plot)+3], 'legend':True, 'legend_loc': 'lower left'} opts = {'marker': 'mX', 'label': 'Validation Points'} plot_opts.update(opts) plot_helpers.plot_data(Xval, Yval, fig=fig, options=plot_opts) plt.show() print("Train. Error: {:.2f}".format(1/X.size * np.linalg.norm(model.predict(X) - Y, 2))) print("Valid. Error: {:.2f}".format(1/Xval.size * np.linalg.norm(model.predict(Xval) - Yval, 2))) interact(change_degree, degree=degree_widget, reg=reg_widget, fold=fold_widget); interact(resample, b=resample_button, noise=noise_widget); ```
github_jupyter
``` #Install bert package for tensorflow v1 !pip install bert-tensorflow==1.0.1 import bert from bert import run_classifier from bert import optimization from bert import tokenization from datetime import datetime import keras from keras import layers from keras.callbacks import ReduceLROnPlateau import numpy as np import pandas as pd import tensorflow as tf import tensorflow_hub as hub from tqdm.notebook import tqdm #adds progress bars to show loop status from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder class Generator(object): """This class consists of functions to convert the training, validation and test datasets into a format acceptable by LSTM model. LSTM takes inputs with fixed width only. But the vector representations of every report are of variable length as different reports have different number of words and thus different number of text splits. Each generator function takes batches of given size, gets the size of the largest input and extends the remaining inputs to the size of the largest, filling them with a special value. This process is along all the data. This way, all batches sequences would have the same length. """ def __init__(self, config): self.config = config def train_generator(self, df): num_sequences = len(df['emb'].to_list()) batch_size = self.config.training.batch_size_train batches_per_epoch = self.config.training.batches_per_epoch_train #make sure that all input data passes throught training assert batch_size * batches_per_epoch == num_sequences num_features= 768 x_list= df['emb'].to_list() y_list = df.label.to_list() # Generate batches while True: for b in range(batches_per_epoch): longest_index = (b + 1) * batch_size - 1 timesteps = len(max(df['emb'].to_list()[:(b + 1) * batch_size][-batch_size:], key=len)) x_train = np.full((batch_size, timesteps, num_features), -99.) y_train = np.zeros((batch_size, 1)) for i in range(batch_size): li = b * batch_size + i x_train[i, 0:len(x_list[li]), :] = x_list[li] y_train[i] = y_list[li] yield x_train, y_train def val_generator(self, df): num_sequences = len(df['emb'].to_list()) batch_size = self.config.training.batch_size_val batches_per_epoch = self.config.training.batches_per_epoch_val #make sure that all input data passes throught training assert batch_size * batches_per_epoch == num_sequences num_features= 768 x_list= df['emb'].to_list() y_list = df.label.to_list() # Generate batches while True: for b in range(batches_per_epoch): longest_index = (b + 1) * batch_size - 1 timesteps = len(max(df['emb'].to_list()[:(b + 1) * batch_size][-31:], key=len)) x_train = np.full((batch_size, timesteps, num_features), -99.) y_train = np.zeros((batch_size, 1)) for i in range(batch_size): li = b * batch_size + i x_train[i, 0:len(x_list[li]), :] = x_list[li] y_train[i] = y_list[li] yield x_train, y_train def test_generator(self, df): num_sequences = len(df['emb'].to_list()) batch_size = self.config.training.batch_size_test batches_per_epoch = self.config.training.batches_per_epoch_test #make sure that all input data passes throught training assert batch_size * batches_per_epoch == num_sequences num_features= 768 x_list= df['emb'].to_list() y_list = df.label.to_list() # Generate batches while True: for b in range(batches_per_epoch): longest_index = (b + 1) * batch_size - 1 timesteps = len(max(df['emb'].to_list()[:(b + 1) * batch_size][-31:], key=len)) x_train = np.full((batch_size, timesteps, num_features), -99.) y_train = np.zeros((batch_size, 1)) for i in range(batch_size): li = b * batch_size + i x_train[i, 0:len(x_list[li]), :] = x_list[li] y_train[i] = y_list[li] yield x_train, y_train ```
github_jupyter
``` import tensorflow as tf print(tf.__version__) ``` The Fashion MNIST data is available directly in the tf.keras datasets API. You load it like this: ``` mnist = tf.keras.datasets.fashion_mnist ``` Calling load_data on this object will give you two sets of two lists, these will be the training and testing values for the graphics that contain the clothing items and their labels. ``` (training_images, training_labels), (test_images, test_labels) = mnist.load_data() ``` What does these values look like? Let's print a training image, and a training label to see...Experiment with different indices in the array. For example, also take a look at index 42...that's a a different boot than the one at index 0 ``` import numpy as np np.set_printoptions(linewidth=200) import matplotlib.pyplot as plt plt.imshow(training_images[0]) print(training_labels[0]) print(training_images[0]) ``` You'll notice that all of the values in the number are between 0 and 255. If we are training a neural network, for various reasons it's easier if we treat all values as between 0 and 1, a process called '**normalizing**'...and fortunately in Python it's easy to normalize a list like this without looping. You do it like this: ``` training_images = training_images / 255.0 test_images = test_images / 255.0 ``` Now you might be wondering why there are 2 sets...training and testing -- remember we spoke about this in the intro? The idea is to have 1 set of data for training, and then another set of data...that the model hasn't yet seen...to see how good it would be at classifying values. After all, when you're done, you're going to want to try it out with data that it hadn't previously seen! Let's now design the model. There's quite a few new concepts here, but don't worry, you'll get the hang of them. ``` model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) ``` **Sequential**: That defines a SEQUENCE of layers in the neural network **Flatten**: Remember earlier where our images were a square, when you printed them out? Flatten just takes that square and turns it into a 1 dimensional set. **Dense**: Adds a layer of neurons Each layer of neurons need an **activation function** to tell them what to do. There's lots of options, but just use these for now. **Relu** effectively means "If X>0 return X, else return 0" -- so what it does it it only passes values 0 or greater to the next layer in the network. **Softmax** takes a set of values, and effectively picks the biggest one, so, for example, if the output of the last layer looks like [0.1, 0.1, 0.05, 0.1, 9.5, 0.1, 0.05, 0.05, 0.05], it saves you from fishing through it looking for the biggest value, and turns it into [0,0,0,0,1,0,0,0,0] -- The goal is to save a lot of coding! The next thing to do, now the model is defined, is to actually build it. You do this by compiling it with an optimizer and loss function as before -- and then you train it by calling **model.fit ** asking it to fit your training data to your training labels -- i.e. have it figure out the relationship between the training data and its actual labels, so in future if you have data that looks like the training data, then it can make a prediction for what that data would look like. ``` model.compile(optimizer = tf.optimizers.Adam(), loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=5) ``` Once it's done training -- you should see an accuracy value at the end of the final epoch. It might look something like 0.9098. This tells you that your neural network is about 91% accurate in classifying the training data. I.E., it figured out a pattern match between the image and the labels that worked 91% of the time. Not great, but not bad considering it was only trained for 5 epochs and done quite quickly. But how would it work with unseen data? That's why we have the test images. We can call model.evaluate, and pass in the two sets, and it will report back the loss for each. Let's give it a try: ``` model.evaluate(test_images, test_labels) ``` For me, that returned a accuracy of about .8838, which means it was about 88% accurate. As expected it probably would not do as well with *unseen* data as it did with data it was trained on! As you go through this course, you'll look at ways to improve this. To explore further, try the below exercises: # Exploration Exercises ###Exercise 1: For this first exercise run the below code: It creates a set of classifications for each of the test images, and then prints the first entry in the classifications. The output, after you run it is a list of numbers. Why do you think this is, and what do those numbers represent? ``` test_images.shape classifications = model.predict(test_images) print(classifications[0]) print(classifications.shape) # the picture is given to us # the model predicts the probability according to each label with the index # the index having the highest probability is the predicted label # here, index 9 has the highest probability # thus, it means that the item is most likely to be 9 (ankle boot) ``` Hint: try running print(test_labels[0]) -- and you'll get a 9. Does that help you understand why this list looks the way it does? ``` print(test_labels[0]) ``` ### What does this list represent? 1. It's 10 random meaningless values 2. It's the first 10 classifications that the computer made 3. It's the probability that this item is each of the 10 classes ####Answer: The correct answer is (3) The output of the model is a list of 10 numbers. These numbers are a probability that the value being classified is the corresponding value (https://github.com/zalandoresearch/fashion-mnist#labels), i.e. the first value in the list is the probability that the image is of a '0' (T-shirt/top), the next is a '1' (Trouser) etc. Notice that they are all VERY LOW probabilities. For the 9 (Ankle boot), the probability was in the 90's, i.e. the neural network is telling us that it's almost certainly a 7. ### How do you know that this list tells you that the item is an ankle boot? 1. There's not enough information to answer that question 2. The 10th element on the list is the biggest, and the ankle boot is labelled 9 2. The ankle boot is label 9, and there are 0->9 elements in the list ####Answer The correct answer is (2). Both the list and the labels are 0 based, so the ankle boot having label 9 means that it is the 10th of the 10 classes. The list having the 10th element being the highest value means that the Neural Network has predicted that the item it is classifying is most likely an ankle boot ##Exercise 2: Let's now look at the layers in your model. Experiment with different values for the dense layer with 512 neurons. What different results do you get for loss, training time etc? Why do you think that's the case? ``` import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.mnist (training_images, training_labels) , (test_images, test_labels) = mnist.load_data() training_images = training_images/255.0 test_images = test_images/255.0 model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(1024, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(training_images, training_labels, epochs=5) model.evaluate(test_images, test_labels) classifications = model.predict(test_images) print(classifications[0]) print(test_labels[0]) ``` ###Question 1. Increase to 1024 Neurons -- What's the impact? 1. Training takes longer, but is more accurate 2. Training takes longer, but no impact on accuracy 3. Training takes the same time, but is more accurate ####Answer The correct answer is (1) by adding more Neurons we have to do more calculations, slowing down the process, but in this case they have a good impact -- we do get more accurate. That doesn't mean it's always a case of 'more is better', you can hit the law of diminishing returns very quickly! ##Exercise 3: What would happen if you remove the Flatten() layer. Why do you think that's the case? You get an error about the shape of the data. It may seem vague right now, but it reinforces the rule of thumb that the first layer in your network should be the same shape as your data. Right now our data is 28x28 images, and 28 layers of 28 neurons would be infeasible, so it makes more sense to 'flatten' that 28,28 into a 784x1. Instead of wriitng all the code to handle that ourselves, we add the Flatten() layer at the begining, and when the arrays are loaded into the model later, they'll automatically be flattened for us. ``` import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.mnist (training_images, training_labels) , (test_images, test_labels) = mnist.load_data() training_images = training_images/255.0 test_images = test_images/255.0 model = tf.keras.models.Sequential([#tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy') model.fit(training_images, training_labels, epochs=5) model.evaluate(test_images, test_labels) classifications = model.predict(test_images) print(classifications[0]) print(test_labels[0]) ``` ##Exercise 4: Consider the final (output) layers. Why are there 10 of them? What would happen if you had a different amount than 10? For example, try training the network with 5 You get an error as soon as it finds an unexpected value. Another rule of thumb -- the number of neurons in the last layer should match the number of classes you are classifying for. In this case it's the digits 0-9, so there are 10 of them, hence you should have 10 neurons in your final layer. ``` import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.mnist (training_images, training_labels) , (test_images, test_labels) = mnist.load_data() training_images = training_images/255.0 test_images = test_images/255.0 model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(64, activation=tf.nn.relu), tf.keras.layers.Dense(5, activation=tf.nn.softmax)]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy') model.fit(training_images, training_labels, epochs=5) model.evaluate(test_images, test_labels) classifications = model.predict(test_images) print(classifications[0]) print(test_labels[0]) ``` ##Exercise 5: Consider the effects of additional layers in the network. What will happen if you add another layer between the one with 512 and the final layer with 10. Ans: There isn't a significant impact -- because this is relatively simple data. For far more complex data (including color images to be classified as flowers that you'll see in the next lesson), extra layers are often necessary. ``` import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.mnist (training_images, training_labels) , (test_images, test_labels) = mnist.load_data() training_images = training_images/255.0 test_images = test_images/255.0 model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy']) model.fit(training_images, training_labels, epochs=5) model.evaluate(test_images, test_labels) classifications = model.predict(test_images) print(classifications[0]) print(test_labels[0]) ``` #Exercise 6: Consider the impact of training for more or less epochs. Why do you think that would be the case? Try 15 epochs -- you'll probably get a model with a much better loss than the one with 5 Try 30 epochs -- you might see the loss value stops decreasing, and sometimes increases. This is a side effect of something called 'overfitting' which you can learn about [somewhere] and it's something you need to keep an eye out for when training neural networks. There's no point in wasting your time training if you aren't improving your loss, right! :) ``` import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.mnist (training_images, training_labels) , (test_images, test_labels) = mnist.load_data() training_images = training_images/255.0 test_images = test_images/255.0 model = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy') model.fit(training_images, training_labels, epochs=30) model.evaluate(test_images, test_labels) classifications = model.predict(test_images) print(classifications[34]) print(test_labels[34]) ``` #Exercise 7: Before you trained, you normalized the data, going from values that were 0-255 to values that were 0-1. What would be the impact of removing that? Here's the complete code to give it a try. Why do you think you get different results? ``` import tensorflow as tf print(tf.__version__) mnist = tf.keras.datasets.mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() # training_images=training_images/255.0 # test_images=test_images/255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy') model.fit(training_images, training_labels, epochs=5) model.evaluate(test_images, test_labels) classifications = model.predict(test_images) print(classifications[0]) print(test_labels[0]) ``` #Exercise 8: Earlier when you trained for extra epochs you had an issue where your loss might change. It might have taken a bit of time for you to wait for the training to do that, and you might have thought 'wouldn't it be nice if I could stop the training when I reach a desired value?' -- i.e. 95% accuracy might be enough for you, and if you reach that after 3 epochs, why sit around waiting for it to finish a lot more epochs....So how would you fix that? Like any other program...you have callbacks! Let's see them in action... ``` import tensorflow as tf print(tf.__version__) class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('loss')<0.4): print("\nReached 60% accuracy so cancelling training!") self.model.stop_training = True callbacks = myCallback() mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (test_images, test_labels) = mnist.load_data() training_images=training_images/255.0 test_images=test_images/255.0 model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy') model.fit(training_images, training_labels, epochs=5, callbacks=[callbacks]) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 # Add parent directory into system path import sys, os sys.path.insert(1, os.path.abspath(os.path.normpath('..'))) from utils.dataset_generator import generate_dataset, ImplicitDataset, TestDataset, SliceDataset import numpy as np from sdf import * import math @sdf3 def gyroid(w = 3.14159, t=0): def f(p): q = w*p x, y, z = (q[:, i] for i in range(3)) return (np.cos(x)*np.sin(y) + np.cos(y)*np.sin(z) + np.cos(z)*np.sin(x) - t) return f generate_dataset(box(1.0) & gyroid(w=math.pi*4, t=0), N_train=100*100*100, N_test=3e6, name='box_1f0_gyroid_4pi', save_dir='../datasets') name = 'box_1f0_gyroid_4pi' train = ImplicitDataset.from_file(f'../datasets/{name}/train.npz', device='cuda') #test = TestDataset(f'../datasets/{name}_test.npz') #slice_dataset = SliceDataset.from_file(f'../datasets/{name}_slice.npz') from utils.dataset_generator import batch_loader for i in batch_loader(train.points, batch_size=10000): print(i.shape) from utils.geometry import Mesh mesh = Mesh(f'../datasets/{name}/raw.stl', doNormalize=True) bv, _ = mesh.bounding_box() np.min(bv, axis=0) print(train) print(test) print(slice_dataset) print(test.random.points.shape) ndim = round(train.sdfs.shape[0]**(1/3)) print(ndim) points = train.points.reshape((ndim, ndim, ndim, 3)) sdfs = train.sdfs.reshape((ndim, ndim, ndim)) true_sdfs = train.true_sdfs.reshape((ndim, ndim, ndim)) dx = points[1,1,1,:] - points[0,0,0,:] #grad = np.linalg.norm(np.array(np.gradient(sdfs, *dx)), axis=0) grad = train.grads.reshape((ndim, ndim, ndim, 3)) norm_grad = np.linalg.norm(grad, axis=3) true_grad = train.true_grads.reshape((ndim, ndim, ndim, 3)) norm_true_grad = np.linalg.norm(true_grad, axis=3) cosine_similarity = np.einsum('ij,ij->i', train.grads, train.true_grads).reshape((ndim,ndim,ndim)) / norm_grad / norm_true_grad slice_z = 20 from utils.visualizer import SDFVisualize visualizer = SDFVisualize() visualizer._plot(sdfs[:, :, slice_z], norm_grad[:, :, slice_z]) visualizer._plot(true_sdfs[:, :, slice_z], norm_true_grad[:, :, slice_z]) visualizer._plot(sdfs[:, :, slice_z] - true_sdfs[:, :, slice_z]) visualizer._plot(norm_grad[:, :, slice_z] - norm_true_grad[:, :, slice_z]) visualizer._plot(cosine_similarity[:,:,slice_z]) import matplotlib.pyplot as plt _norm_grad = norm_grad.reshape((ndim**3,)) _norm_true_grad = norm_true_grad.reshape((ndim**3,)) plt.hist(_norm_grad, bins=120) plt.show() plt.hist(_norm_true_grad, bins=120) plt.show() ```
github_jupyter
# Exploring Datasets with Python In this short demo we will analyse a given dataset from 1978, which contains information about politicians having affairs. To analyse it, we will use a Jupyter Notebook, which is basically a REPL++ for Python. Entering a command with shift executes the line and prints the result. ``` 4 + 4 def sum(a, b): return a + b sum(40, 2) import pandas as pd affairs = pd.read_csv('affairs.csv') affairs.head() affairs['sex'].head() affairs['sex'].value_counts() affairs['age'].describe() affairs['age'].max() affairs.describe() affairs[affairs['sex'] == 'female'].head() affairs[affairs['sex'] == 'female'].describe() affairs['below_30'] = affairs['age'] < 30 affairs['below_30'].value_counts() affairs.head() rel_meanings = ['not', 'mildly', 'fairly', 'strongly'] affairs['religious'] = affairs['religious'].apply(lambda x: rel_meanings[min(x, 4)-1]) affairs.head() ``` # Visualize Data To visualize our data, we will use Seaborn, a Python visualization library based on matplotlib. It provides a high-level interface for drawing attractive statistical graphics. Let's import it. ``` import seaborn as sns %matplotlib inline sns.set() sns.set_context('talk') ``` Seaborn together with Pandas makes it pretty easy to create charts to analyze our data. We can pass our Dataframes and Series directly into Seaborn methods. We will see how in the following sections. # Univariate Plotting Let's start by visualizing the distribution of the age our our people. We can achieve this with a simple method called distplot by passing our series of ages as argument. ``` sns.distplot(affairs['age']) sns.distplot(affairs['age'], bins=50, rug=True, kde=False) sns.distplot(affairs['ym'], bins=10, kde=False) ``` The average age of our people is around 32, but the most people are married for more than 14 years! # Bivariate Plotting Numbers get even more interesting when we can compare them to other numbers! Lets start comparing the number of years married vs the number of affairs. Seaborn provides us with a method called jointplot for this use case. ``` sns.jointplot(affairs['ym'], affairs['nbaffairs']) sns.jointplot(affairs['ym'], affairs['nbaffairs'], kind='reg') sns.jointplot(affairs['ym'], affairs['age'], kind='kde', shade=True) sns.pairplot(affairs.drop('below_30', axis=1), hue='sex', kind='reg') sns.lmplot(x="ym", y="nbaffairs", hue="sex", col="child", row="religious", data=affairs) sns.boxplot(x="sex", y="ym", hue="child", data=affairs); sns.violinplot(x="religious", y="nbaffairs", hue="sex", data=affairs, split=True); affairs.corr() sns.heatmap(affairs.corr(), cmap='coolwarm') ```
github_jupyter
# MSTICpy - Mordor data provider and browser ### Description This notebook provides a guided example of using the Mordor data provider and browser included with MSTICpy. For more information on the Mordor data sets see the [Open Threat Research Forge Mordor GitHub repo](https://github.com/OTRF/mordor) You must have msticpy installed to run this notebook: ``` %pip install --upgrade msticpy ``` MSTICpy versions >= 0.8.5 ### Contents: - Using the Mordor data provider to retrieve data sets - Listing queries - Running a query to retrieve data - Optional parameters - Searching for queries by Mordor property - Mordor Browser ## Using the Data Provider to download datasets Using the data provider you can download and render event data as a pandas DataFrame. > **Note** - Mordor includes both host event data and network capture data.<br> > Although Capture files can be downloaded and unpacked<br> > they currently cannot be populated into a pandas DataFrame. > This is the case for most `network` datasets.<br> > `Host` event data is retrieved and populated into DataFrames. ``` from msticpy.data import QueryProvider mdr_data = QueryProvider("Mordor", save_folder="./mordor") mdr_data.connect() ``` ### List Queries > Note: Many Mordor data entries have multiple data sets, so we see more queries than Mordor entries. (Only first 15 shown) ``` mdr_data.list_queries()[:15] ``` ### Retrieving/querying a data set ``` mdr_data.small.windows.credential_access.host.covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges().head(3) ``` ### Optional parameters The data provider and the query functions support some parameters to control aspects of the query operation. - **use_cached** : bool, optional<br> Try to use locally saved file first, by default True. If you’ve previously downloaded a file, it will use this rather than downloading a new copy. - **save_folder** : str, optional<br> Path to output folder, by default ".". The path that downloaded and extracted files are saved to. - **silent** : bool<br> If True, suppress feedback. By default, False. If you specify these when you initialize the data provider, the settings will apply to all queries. ``` mdr_data = QueryProvider("Mordor", save_folder="./mordor") mdr_data.connect() ``` Using these parameters in the query will override the provider settings and defaults for that query. ``` mdr_data.small.windows.credential_access.host.covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges(silent=True, save_folder="./mordor").head(2) ``` ## Getting summary data about a query Call the query function with a single "?" parameter. ``` mdr_data.small.windows.credential_access.host.covenant_dcsync_dcerpc_drsuapi_DsGetNCChanges("?") ``` ### Searching for Queries with QueryProvider.search_queries() Search queries for matching attributes. #### Parameters **search** : str Search string. Substrings separated by commas will be treated as OR terms - e.g. "a, b" == "a" or "b".<br> Substrings separated by "+" will be treated as AND terms - e.g. "a + b" == "a" and "b" #### Returns List of matching query names. ``` mdr_data.search_queries("AWS") mdr_data.search_queries("Empire + T1222") mdr_data.search_queries("Empire + Credential") ``` ## Mordor Browser We've also built a more specialized browser for Mordor data. This uses the metadata in the repository to let you view full details of the dataset. You can also preview the dataset (if it is convertible to a DataFrame). For details of the data shown please see the [Mordor GitHub repo](https://github.com/OTRF/mordor)<br> and the [Threat Hunter Playbook](https://threathunterplaybook.com/introduction.html) ``` from msticpy.data.browsers.mordor_browser import MordorBrowser mdr_browser = MordorBrowser() ``` ### Mordor Browser Details The top scrollable list is a list of the Mordor datasets. Selecting one of these updates the data in the lower half of the browser. #### Filter Drop-down To narrow your search you can filter using a text search or filter by Mitre Attack Techniques or Tactics. - The Filter text box uses the same syntax as the provider `search_queries()` function. - Simple text string will find matches for datasets that contain this string - Strings separated by "," are treated as OR terms - i.e. it will match items that contain ANY of the substrings - Strings separated by "+" are treated as AND terms - i.e. it will match items that contain ALL of the substrings - The Mitre Techniques and Tactics lists are multi-select lists. Only items that have techniques and tactics matching the selected items will be show. - Reset Filter button will clear any filtering. #### Main Details Window - title, ID, author, creation date, modification date and description are self-explanatory. - tags can be used for searching - file_paths (see below) - attacks - lists related Mitre Technique and Tactics. The item title is a link to the Mitre page describing the technique or tactic. - notebooks - if there is a notebook in the Threat Hunter Playbook site, a link to it is shown here. (multiple notebooks might be shown) - simulation - raw data listing the steps in the attack (and useful for replaying the attack in a demo environment). - references - links to any external data about the attack. #### File_paths This section allows you to select, download and (in most cases) display the event data relating to the attack. Select a file and click on the Download button. The zipped file is downloaded and extracted. If it is event data, this is converted to a pandas DataFrame and displayed below the rest of the data. The current dataset is available as an attribute of the browser: ``` mdr_browser.current_dataset ``` Datasets that you've downloaded and displayed in this session are also cached in the browser and available in the `mdr_browser.datasets` attribute. #### Downloaded files By default files are downloaded and extracted to the current folder. You can change this with the `save_folder` parameter when creating the `MordorBrowser` object. You can also specify the `use_cached` parameter. By default, this is `True`, which causes downloaded files not to be deleted after extraction. These local copies are used if you try to view the same data set again. This also works across sessions. If `use_cache` is set to False, files are deleted immediately after downloading, extracting and populating the DataFrame. ### Using the standard query browser > **Note** - In the `Example` section, ignore the examples of parameters<br> > passed to the query - these are not needed and ignored. ``` mdr_data.browse_queries() ``` ## Remove cached files ``` from pathlib import Path for file in Path("./mordor").glob("*"): file.unlink() Path("./mordor").rmdir() ```
github_jupyter
``` import torch from transformers import MT5ForConditionalGeneration, MT5Config, MT5EncoderModel, MT5Tokenizer, Trainer, TrainingArguments from progeny_tokenizer import TAPETokenizer import numpy as np import math import random import scipy import time import pandas as pd from torch.utils.data import DataLoader, RandomSampler, Dataset, BatchSampler import typing from pathlib import Path import argparse from collections import OrderedDict import pickle import matplotlib.pyplot as plt from tape.metrics import spearmanr before_foldx = False ``` # Analyze 250K gen seqs and prepare for FoldX saved output tsv file to run FoldX inference ``` wt_seq = 'STIEEQAKTFLDKFNHEAEDLFYQSSLASWNYNTNITEENVQNMNNAGDKWSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ' constant_region = 'NTNITEEN' wt_cs_ind = wt_seq.index(constant_region) gen250k_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000.tsv' # gen250k_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000.tsv' gen250k_df = pd.read_table(gen250k_tsv_name) gen250k_df ``` filter out sequences without constant region ``` indices_to_drop = [] dropped_seqs = [] for index, row in gen250k_df.iterrows(): seq = row['MT_seq'] if constant_region not in seq: indices_to_drop.append(index) dropped_seqs.append(seq) else: cs_ind = seq.index(constant_region) if cs_ind != wt_cs_ind: indices_to_drop.append(index) dropped_seqs.append(seq) print(len(indices_to_drop)) print(indices_to_drop) print(dropped_seqs) gen250k_df_dropped_nocon = gen250k_df.drop(indices_to_drop) gen250k_df_dropped_nocon ``` filter out sequences with non-AA tokens ``` rejected_tokens = ["<pad>", "<sep>", "<cls>", "<mask>", "<unk>"] indices_to_drop = [] dropped_seqs = [] for index, row in gen250k_df_dropped_nocon.iterrows(): seq = row['MT_seq'] for rejected_token in rejected_tokens: if rejected_token in seq: indices_to_drop.append(index) dropped_seqs.append(seq) break print(len(indices_to_drop)) print(indices_to_drop) print(dropped_seqs) gen250k_df_dropped = gen250k_df_dropped_nocon.drop(indices_to_drop) print(len(gen250k_df_dropped)) gen250k_df_dropped ``` # Filter out sequences that are repeat or in training set ``` input_data_file = 'data/gen_train_data/top_half_ddG/train_ddG.pkl' input_data_df = pd.read_pickle(input_data_file) input_data_df.iloc[0]['MT_seq'] input_data_df.iloc[0]['MT_seq'] in input_data_df['MT_seq'] input_data_df.iloc[0]['MT_seq'] in input_data_df['MT_seq'].tolist() train_seq_list = input_data_df['MT_seq'].tolist() train_seq_list len(train_seq_list) ``` Filter out those that are repeat ``` gen250k_df_dropped_norepeat = gen250k_df_dropped[gen250k_df_dropped['repeated_gen'] == False] gen250k_df_dropped_norepeat gen250k_df_dropped_norepeat.iloc[0] ``` Filter out those from the training set ``` gen250k_df_filtered = gen250k_df_dropped_norepeat[gen250k_df_dropped_norepeat['in_train_data_gen'] == False] gen250k_df_filtered gen250k_df_filtered.iloc[0] np.sum(gen250k_df_filtered['repeated_gen']) np.sum(gen250k_df_filtered['in_train_data_gen']) topK_saved = 10000 gen250k_df_filtered = gen250k_df_filtered[:250000] gen250k_df_filtered = gen250k_df_filtered.sort_values(by='latent_head_pred', ascending=True) # gen250k_df_filtered = gen250k_df_filtered.sort_values(by='disc_pred', ascending=True) gen250k_df_filtered_topK = gen250k_df_filtered.iloc[:topK_saved] filtered_LHscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Klatentheadfiltered.tsv' # filtered_LHscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000_top10Klatentheadfiltered.tsv' disc_latenthead_cor = spearmanr(gen250k_df_filtered_topK['disc_pred'], gen250k_df_filtered_topK['latent_head_pred']) print("disc_latenthead_cor: ", disc_latenthead_cor) gen250k_df_filtered_sorted_disc = gen250k_df_filtered.sort_values(by='disc_pred', ascending=True) gen250k_df_filtered_sorted_disc_topK = gen250k_df_filtered_sorted_disc.iloc[:topK_saved] filtered_Dscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Kdiscfiltered.tsv' # filtered_Dscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000_top10Kdiscfiltered.tsv' all250K_disc_latenthead_cor = spearmanr(gen250k_df_filtered['disc_pred'], gen250k_df_filtered['latent_head_pred']) print("all250K_disc_latenthead_cor: ", all250K_disc_latenthead_cor) ``` # Save top 10K seqs for FoldX Evaluation ``` if before_foldx: gen250k_df_filtered_topK.to_csv(filtered_LHscored_gen250k_top10K_tsv_name, sep="\t", index=False) gen250k_df_filtered_sorted_disc_topK.to_csv(filtered_Dscored_gen250k_top10K_tsv_name, sep="\t", index=False) len(gen250k_df_filtered_topK) df_toplot = gen250k_df_filtered ``` # Analyze hamming distance ``` # Compute hamming distance between MT and WT def hamming_dist(str1, str2): i = 0 count = 0 while(i < len(str1)): if(str1[i] != str2[i]): count += 1 i += 1 return count hamming_dist_list = [] wt_seq = df_toplot.iloc[0]['WT_seq'] for index, row in df_toplot.iterrows(): gen_seq = row['MT_seq'] h_dist = hamming_dist(gen_seq, wt_seq) hamming_dist_list.append(h_dist) print("Hamming distance stats") print("max: ", np.max(hamming_dist_list)) print("min: ", np.min(hamming_dist_list)) print("median: ", np.median(hamming_dist_list)) print("mean: ", np.mean(hamming_dist_list)) print("std: ", np.std(hamming_dist_list)) ``` hamming distance for generator training data ``` gen_train_data = 'data/gen_train_data/top_half_ddG/train_ddG.pkl' gen_train_df = pd.read_pickle(gen_train_data) wt_seq = gen_train_df.iloc[0]['WT_seq'] gen_train_hamming_dist_list = [] for index, row in gen_train_df.iterrows(): train_seq = row['MT_seq'] h_dist = hamming_dist(train_seq, wt_seq) gen_train_hamming_dist_list.append(h_dist) plt.figure(figsize=(8,6)) plt.hist(hamming_dist_list, density=True, label='generated', bins=[i for i in range(46)], alpha=0.4) # plt.xlabel("Hamming Distance", size=14) # plt.ylabel("Count", size=14) # plt.title("Hamming Distance from WT seq") plt.hist(gen_train_hamming_dist_list, density=True, label='train_data', bins=[i for i in range(46)], alpha=0.4) plt.xlabel("Hamming Distance", size=14) plt.ylabel("Density", size=14) plt.title("Top 5% Generator") plt.legend(loc='upper left') ``` # Sample for E[min] FoldX Computation ``` gen250k_df_filtered # Get topk seqs num_rounds = 100 # N round_pool_size = 10000 topk = 10 # K round_topk = {} cols_to_sort = ['latent_head_pred'] # cols_to_sort = ['disc_pred', 'latent_head_pred'] foldx_df = None in_count = 0 for col_to_sort in cols_to_sort: print("col_to_sort: ", col_to_sort) round_topk[col_to_sort] = {} for round_ind in range(num_rounds): sampled_rows = gen250k_df_filtered.sample(n=round_pool_size) sorted_sampled_rows = sampled_rows.sort_values(by=col_to_sort, ascending=True)[:topk] topk_rows = sorted_sampled_rows[:topk] round_topk[col_to_sort][round_ind] = topk_rows for round_ind in round_topk[col_to_sort]: round_topk_df = round_topk[col_to_sort][round_ind] if foldx_df is None: foldx_df = round_topk_df else: all_mt = foldx_df['MT_seq'].tolist() for row_ind, row in round_topk_df.iterrows(): if row['MT_seq'] not in all_mt: foldx_df = foldx_df.append(row) else: in_count += 1 print("len(foldx_df)+in_count: ", len(foldx_df)+in_count) foldx_df in_count ``` # save E[min] seqs to do FoldX¶ ``` seqsforEmin_dict_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_df.pkl' # seqsforEmin_dict_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqsforEmin_df.pkl' if before_foldx: with open(seqsforEmin_dict_name, 'wb') as f: pickle.dump(round_topk, f) # with open(seqsforEmin_dict_name, 'rb') as f: # b = pickle.load(f) seqsforEmin_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_foldx.tsv' # seqsforEmin_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqsforEmin_foldx.tsv' if before_foldx: foldx_df.to_csv(seqsforEmin_tsv_name, sep="\t", index=False) ``` # <<===== After Foldx Computation =====>> ``` # foldx_results_name = "path_to_foldx_results" # # foldx_results_name = "foldx_sim_results/tophalf-basegen_top10K-Dscore_250Kgen/results_full.tsv" # foldx_results_df = pd.read_table(foldx_results_name) foldx_results_names = [ "foldx_sim_results/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Klatentheadfiltered/results_full.tsv", # "foldx_sim_results/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Kdiscfiltered/results_full.tsv", ] # foldx_results_name = "foldx_sim_results/tophalf-basegen_top10K-Dscore_250Kgen/results_full.tsv" foldx_results_df = None for foldx_results_name in foldx_results_names: if foldx_results_df is None: foldx_results_df = pd.read_table(foldx_results_name) else: next_foldx_results_df = pd.read_table(foldx_results_name) foldx_results_df = foldx_results_df.append(next_foldx_results_df, ignore_index=True) foldx_results_df # Compute Emin from foldx values rows_to_patch = None Emin_results_dict = {} mean_disc_ddG_cor_results_dict = {} mean_latent_ddG_cor_results_dict = {} for col_to_sort in round_topk: print(col_to_sort) current_score_round_topk = round_topk[col_to_sort] round_min_list = [] round_disc_ddG_cor_list = [] round_latent_ddG_cor_list = [] for round_ind in current_score_round_topk: round_topk_df = current_score_round_topk[round_ind] round_ddG = [] round_disc_pred = [] round_latent_head_pred = [] for row_ind, row in round_topk_df.iterrows(): row_seq = row['MT_seq'] matched_row = foldx_results_df.loc[foldx_results_df['MT_seq'] == row_seq] if len(matched_row) != 1 : # print("matched_row: ", matched_row) if len(matched_row) == 0 : if rows_to_patch is None: rows_to_patch = row else: rows_to_patch.append(row) # raise else: # round_ddG.append(matched_row.iloc[0]['ddG'].to_numpy()[0]) # round_disc_pred.append(matched_row.iloc[0]['disc_pred'].to_numpy()[0]) # round_latent_head_pred.append(matched_row.iloc[0]['latent_head_pred'].to_numpy()[0]) round_ddG.append(matched_row.iloc[0]['ddG']) round_disc_pred.append(matched_row.iloc[0]['disc_pred']) round_latent_head_pred.append(matched_row.iloc[0]['latent_head_pred']) else: # print("matched_row['ddG'] to_numpy: ", matched_row['ddG'].to_numpy()) # print("matched_row['ddG'] to_numpy 0: ", matched_row['ddG'].to_numpy()[0]) # print("matched_row['ddG']: ", matched_row['ddG']) # print("matched_row['disc_pred']: ", matched_row['disc_pred']) round_ddG.append(matched_row['ddG'].to_numpy()[0]) # ! changed to ddG round_disc_pred.append(matched_row['disc_pred'].to_numpy()[0]) round_latent_head_pred.append(matched_row['latent_head_pred'].to_numpy()[0]) # round_ddG.append(matched_row['ddG']) # ! changed to ddG # round_disc_pred.append(matched_row['disc_pred']) # round_latent_head_pred.append(matched_row['latent_head_pred']) # print("len(round_disc_pred): ", len(round_disc_pred)) # print("len(round_ddG): ", len(round_ddG)) # print("round_disc_pred: ", round_disc_pred) # print("round_ddG: ", round_ddG) # print("round_ddG.to_numpy(): ", round_ddG.to_numpy()) round_disc_ddG_cor = spearmanr(round_disc_pred, round_ddG) round_disc_ddG_cor_list.append(round_disc_ddG_cor) round_latent_ddG_cor = spearmanr(round_latent_head_pred, round_ddG) round_latent_ddG_cor_list.append(round_latent_ddG_cor) round_min = np.min(round_ddG) # print("round_ddG: ", round_ddG) # print("round_min: ", round_min) round_min_list.append(round_min) Emin = np.mean(round_min_list) # print("round_min_list: ", round_min_list) # print("Emin: ", Emin) mean_disc_ddG_cor = np.mean(round_disc_ddG_cor_list) mean_latent_ddG_cor = np.mean(round_latent_ddG_cor_list) Emin_results_dict[col_to_sort] = Emin mean_disc_ddG_cor_results_dict[col_to_sort] = mean_disc_ddG_cor mean_latent_ddG_cor_results_dict[col_to_sort] = mean_latent_ddG_cor print(rows_to_patch) ``` # Save Emin Results ``` Emin_results_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_results.txt' # Emin_results_name = 'generated_seqs/baseline_gen/Emin_results/tophalf-basegen_seqsforEmin_results.txt' with open(Emin_results_name, "w") as writer: writer.write("***** E[min] results *****\n") writer.write("seqsforEmin_dict_name: {}\n".format(seqsforEmin_dict_name)) for key in sorted(Emin_results_dict.keys()): writer.write("sorted by %s = %s\n" % (key, str(Emin_results_dict[key]))) writer.write("***** mean_disc_ddG_cor results *****\n") for key in sorted(mean_disc_ddG_cor_results_dict.keys()): writer.write("sorted by %s = %s\n" % (key, str(mean_disc_ddG_cor_results_dict[key]))) writer.write("***** mean_latent_ddG_cor results *****\n") for key in sorted(mean_latent_ddG_cor_results_dict.keys()): writer.write("sorted by %s = %s\n" % (key, str(mean_latent_ddG_cor_results_dict[key]))) ```
github_jupyter
``` from util.MicFileTool import MicFile import util.Simulation as Gsim import util.RotRep as Rot import numpy as np import matplotlib.pyplot as plt from scipy import ndimage from scipy import optimize import json import os ``` # Extract the windows around the Bragg Peaks from a small grain ``` a=MicFile("AuxData/Ti7_WithHRM_2ndLoad_z1_.mic.LBFS") # crystal_file = {'material':'Ti7LP15', # 'A':list(2.95*np.array([1,0,0])), # 'B':list(2.95*np.array([np.cos(np.pi*2/3),np.sin(np.pi*2/3),0])), # 'C':list(4.7152*np.array([0,0,1])), # 'atom_pos':[[[1/3.0,2/3.0,1/4.0],22],[[2/3.0,1/3.0,3/4.0],22]]} # with open('Ti7LP15.json','w') as file: # json.dump(crystal_file,file) # det_file = {'name':'test', # 'psizeJ':0.001454, # 'psizeK':0.001454, # 'pnJ':2048, # 'pnK':2048, # 'J':1182.19, # 'K':2026.27, # 'trans_vec':[7.14503,0,0], # 'tilt':Rot.EulerZXZ2Mat(np.array([89.1588,87.5647,0.278594])/180.0*np.pi).tolist()} # with open('det_params.json','w') as file: # json.dump(det_file,file) ############ Experimental Parameters ############ crystal_file = 'Ti7LP15.json' detector_file = 'det_params.json' energy = 51.9957 etalimit=81/180.0*np.pi omegaL,omegaU = 0,180 ################ grain ID and hexomap output file ############# grain_ID = '55_2nd' path = 'Calibration_Files/grain_%s/'%grain_ID grain_pos=np.array([-0.345937, 0.238698, 0]) o_mat=Rot.EulerZXZ2Mat(np.array([97.9141, 90.0041, 259.313])/180.0*np.pi) ################### raw data ####################### raw_data='/mnt/data/sparrow_data/NF-DataSets/2013-07_NF-Strain1/V/Suter_Jul13/NF/Ti7_WithHRM_Under2ndLoad/Ti7_WithHRM_Under2ndLoad__' if 'grain_%s'%grain_ID not in os.listdir('Calibration_Files/'): os.mkdir(path) os.mkdir(path+'Ps_bf/') os.mkdir(path+'Ps_filtered/') # create detector object Det1=Gsim.Detector(param_file=detector_file) #create crystal object crystal_str=Gsim.CrystalStr(cryst_file=crystal_file) crystal_str.getRecipVec() crystal_str.getGs(13) Ps,Gs,Info=Gsim.GetProjectedVertex(Det1,crystal_str,o_mat,etalimit, grain_pos,getPeaksInfo=True, omegaL=omegaL,omegaU=omegaU,energy=energy) # extract window around the Bragg peak on an omega frame def fetch(ii,pks,fn,offset=0,dx=100,dy=50,verbo=False,more=False,pnx=2048,pny=2048,omega_step=20): omegid=int((180-pks[ii,2])*omega_step)+offset if omegid<0: omegid+=3600 if omegid>=3600: omegid-=3600 I=plt.imread(fn+'{0:06d}.tif'.format(omegid)) x1=int((pny-1-pks[ii,0])-dx) y1=int(pks[ii,1]-dy) if verbo: print('y=',pks[ii,1]) print('x=',pks[ii,0]) x1=max(0,x1) y1=max(0,y1) x2=x1+2*dx y2=y1+2*dy x2=min(x2,pnx) y2=min(y2,pny) if more: return I[y1:y2,x1:x2],(x1,x2,y1,y2,omegid) return I[y1:y2,x1:x2] pks=Ps dx = 150 dy = 80 for ii in range(len(pks)): allpks=[] alllims=[] totoffset=0 f,axis=plt.subplots(9,5) i=0 j=0 for offset in range(totoffset-22,totoffset+23): Im,limits=fetch(ii,pks,raw_data,offset,dx=dx,dy=dy,more=True) if i==9: j+=1 i=0 axis[i,j].imshow(Im,vmin=0,vmax=30) i+=1 allpks.append(Im) alllims.append(limits) f.subplots_adjust(wspace=0,hspace=0) f.savefig(path+'Ps_bf/{0:d}.png'.format(ii),dpi=200,bbox_inches='tight') plt.close(f) allpks=np.array(allpks) alllims=np.array(alllims) np.save(path+'Ps_bf/Im{0:d}'.format(ii),allpks) np.save(path+'Ps_bf/limit{0:d}'.format(ii),alllims) Nfile=len(pks) Im=[] flucThresh=4 for ii in range(Nfile): Im.append(np.load(path+'Ps_bf/Im{:d}.npy'.format(ii))) Im[ii]=Im[ii]-np.median(Im[ii],axis=0) #substract the median mask=Im[ii]>flucThresh Im[ii]=mask*Im[ii] #make all pixel that below the fluctuation to be zero from scipy.signal import convolve2d mykernel=np.array([[1,1,1],[1,-1,1],[1,1,1]]) # remove hot spot (whose value is higher than the sum of 8 neighbors) for ii in range(Nfile): for jj in range(45): mask=convolve2d(Im[ii][jj],mykernel,mode='same')>0 Im[ii][jj]*=mask mykernel2=np.array([[1,2,1],[2,4,2],[1,2,1]])/16.0 # Smoothing for ii in range(Nfile): for jj in range(45): Im[ii][jj]=convolve2d(Im[ii][jj],mykernel2,mode='same') for ii in range(Nfile): np.save(path+'Ps_filtered/Im{:d}'.format(ii),Im[ii].astype('uint16')) idx = 2 a = np.load(path+f'Ps_filtered/Im{idx}.npy') plt.imshow(a[41,:,:]) plt.show() maxes = a.sum(axis=(1,2)) np.where(maxes==maxes.max()) # manually write down the IDs of "good" peaks for calibration goodidx=np.array([2,4,5,7,8,9,10,12,15,18, 19,24,25,26,27,28,29,30, 31,32,33,34,35,37,38,39, 40,41,43,46,49,50,51,52, 53,54,55,56,57,60,63,64, 65,66,67,69,70,71,72,73, 74,76,77,78,79,81,82,83, 84,85,86,87,88,89,90,91, 92,93,94,95, 36,44,45,62,68,80]) ``` # Find the Center of Mass of each Bragg Peak ``` # choose one of following two methods to find the center of mass of each "good" Bragg peak. # can be used even the peak persists on several omega frames def getCenter1(Im,Omeg1,Omeg2,lower=100,upper=2000): blobs, _ = ndimage.label(Im[Omeg1:Omeg2+1]) _,size=np.unique(blobs,return_counts=True) blobID = np.where((size>lower)*(size<upper))[0] if len(blobID)==1: blobID=blobID[0] else: print('need manual pick') return co,cy,cx = ndimage.measurements.center_of_mass(Im[Omeg1:Omeg2+1],blobs,blobID) return co,cy,cx # can be only used on single omega frame def getCenter2(Im,Omeg,dx=15,dy=7): Py,Px=ndimage.measurements.maximum_position(Im[Omeg]) labels=np.zeros(Im[Omeg].shape,dtype=int) labels[Py-dy:Py+dy+1,Px-dx:Px+dx+1]=1 cy,cx = ndimage.measurements.center_of_mass(Im[Omeg],labels=labels,index=1) return Py,Px,cy,cx # %matplotlib notebook tmp=np.load(path+'Ps_filtered/Im88.npy') bOmeg=21 eOmeg=22 co,cy,cx = getCenter1(tmp,bOmeg,eOmeg,lower=100,upper=1000) print("({:.2f}, {:.2f}, {:.2f})".format(co+bOmeg,cx,cy)) plt.imshow(np.sum(tmp[bOmeg:eOmeg+1],axis=0)) plt.scatter(cx,cy,c='k') plt.show() # %matplotlib notebook tmp=np.load(path+'Ps_filtered/Im94.npy') Omeg=21 Py,Px,cy,cx = getCenter2(tmp,Omeg,dx=15,dy=5) print("({:.2f}, {:.2f}, {:.2f})".format(Omeg,cx,cy)) plt.imshow(tmp[Omeg]) plt.scatter(cx,cy,c='k') plt.show() ``` # Write down all the center of mass for the good Peaks ``` # Write down the center of mass of each Bragg Peak, in the order of goodidx center_of_mass = np.array([(28.79, 161.38, 36.47), (20, 141.56, 51.82), (20.00, 136.09, 52.67), (28.00, 152.87, 54.98), (31.00, 150.29, 56.65), (19.86, 141.39, 56.96), (21.00, 135.42, 55.26), (27.47, 147.29, 57.41), (19.33, 139.94, 59.89), (26.00, 152.22, 58.66), (26.00, 152.82, 60.23), (21.00, 140.49, 63.34), (22.00, 77.09, 63.10), (26.61, 153.50, 67.04), (22.00, 141.62, 66.31), (22.00, 22.54, 66.19), (26.00, 149.67, 67.16), (26.55, 146.24, 67.16), (31.40, 144.80, 69.36), (20.42, 142.44, 68.36), (21.48, 140.41, 68.70), (21.91, 138.86, 68.54), (22.00, 68.60, 68.84), (25.00, 147.25, 68.22), (25.00, 146.93, 68.76), (19.00, 153.14, 71.91), (25.00, 148.87, 68.36), (19.68, 146.27, 72.12), (21.53, 78.33, 70.98), (26.00, 152.47, 72.68), (22.00, 68.98, 70.95), (25.00, 148.49, 73.62), (25.00, 148.11, 74.45), (25.67, 150.38, 75.97), (23.00, 144.00, 72.79), (22.93, 141.36, 73.26), (23.22, 140.70, 74.06), (23.00, 60.33, 73.96), (24.74, 144.38, 74.19), (23.00, 142.72, 75.71), (24.28, 146.32, 74.34), (21.00, 74.10, 78.07), (24.00, 149.06, 74.65), (22.00, 115.61, 77.83), (22.00, 53.23, 76.99), (23.00, 13.56, 74.23), (24.00, 146.26, 77.74), (23.00, 148.08, 75.22), (23.00, 143.69, 75.62), (25.00, 145.35, 77.68), (24.00, 143.52, 77.78), (24.00, 142.50, 77.13), (23.00, 142.20, 77.28), (23.00, 142.62, 77.41), (23.00, 66.54, 77.65), (24.00, 141.78, 77.94), (24.00, 142.46, 78.02), (23.00, 146.43, 77.95), (23.00, 148.30, 78.59), (23.00, 145.70, 78.51), (23.00, 145.49, 78.34), (24.00, 146.21, 78.10), (24.00, 146.66, 78.50), (24.48, 148.78, 79.36), (22.00, 150.51, 75.82), (23.00, 148.37, 76.01), (22.00, 103.28, 76.59), (24.00, 147.77, 78.74), (23.00, 148.90, 76.78), (23.00, 145.66, 76.77), (26.00, 148.64, 67.44), (24.37, 152.98, 69.57), (25.00, 152.38, 71.98), (25.00, 144.92, 74.90), (25.00, 151.22, 76.78), (24.81, 142.24, 77.70)]) # np.save('center_of_mass.npy',center_of_mass) imgN = len(goodidx) LimH = np.empty((imgN,5),dtype=np.int32) good_Gs = Gs[goodidx] whichOmega = np.empty(imgN,dtype=np.int32) for ii in range(imgN): limit=np.load(path+'Ps_bf/limit{0:d}.npy'.format(goodidx[ii])) LimH[ii,:]=limit[0] if Info[goodidx[ii]]['WhichOmega']=='b': whichOmega[ii] = 2 else: whichOmega[ii] = 1 absCOM=np.empty(center_of_mass.shape) for ii in range(len(absCOM)): absCOM[ii,1]=LimH[ii,2]+center_of_mass[ii,2] absCOM[ii,0]=2047-(LimH[ii,0]+center_of_mass[ii,1]) absCOM[ii,2]=(LimH[ii,4]+center_of_mass[ii,0]) if absCOM[ii,2] >=3600: absCOM[ii,2] -= 3600 absCOM[ii,2] = 180-absCOM[ii,2]*0.05 ``` # Start Calibration ``` def GetVertex(Det1,Gs,Omegas,orien,etalimit,grainpos,bIdx=True,omegaL=-90,omegaU=90,energy=50): Peaks=[] rotatedG=orien.dot(Gs.T).T for ii in range(len(rotatedG)): g1=rotatedG[ii] res=Gsim.frankie_angles_from_g(g1,verbo=False,energy=energy) if Omegas[ii]==1: omega=res['omega_a']/180.0*np.pi newgrainx=np.cos(omega)*grainpos[0]-np.sin(omega)*grainpos[1] newgrainy=np.cos(omega)*grainpos[1]+np.sin(omega)*grainpos[0] idx=Det1.IntersectionIdx(np.array([newgrainx,newgrainy,0]),res['2Theta'],res['eta'],bIdx ,checkBoundary=False ) Peaks.append([idx[0],idx[1],res['omega_a']]) else: omega=res['omega_b']/180.0*np.pi newgrainx=np.cos(omega)*grainpos[0]-np.sin(omega)*grainpos[1] newgrainy=np.cos(omega)*grainpos[1]+np.sin(omega)*grainpos[0] idx=Det1.IntersectionIdx(np.array([newgrainx,newgrainy,0]),res['2Theta'],-res['eta'],bIdx ,checkBoundary=False ) Peaks.append([idx[0],idx[1],res['omega_b']]) Peaks=np.array(Peaks) return Peaks pars={'J':0,'K':0,'L':0,'tilt':(0,0,0),'x':0,'y':0,'distortion':((0,0,0),(0,0,0),(0,0,0))} DetDefault=Gsim.Detector(psizeJ=0.001454, psizeK=0.001454) def SimP(x): DetDefault.Reset() pars['J']=x[0]+1182.19 pars['K']=x[1]+2026.27 pars['L']=x[2]*10**(-3)+7.14503 pars['tilt']=Rot.EulerZXZ2Mat((x[3:6]+np.array([89.1588,87.5647,0.278594]))/180.0*np.pi) pars['x']=x[6]*10**(-3)-0.345937 pars['y']=x[7]*10**(-3)+0.238698 pars['distortion']=x[8:17].reshape((3,3))*10**(-3)+np.eye(3) DetDefault.Move(pars['J'],pars['K'],np.array([pars['L'],0,0]),pars['tilt']) pos=np.array([pars['x'], pars['y'], 0]) Ps=GetVertex(DetDefault, good_Gs, whichOmega, pars['distortion'], etalimit, pos, bIdx=False, omegaL=0,omegaU=180,energy=energy) return Ps def CostFunc(x): Ps = SimP(x) weights=np.array((1,5,100)) tmp=np.sum(((Ps-absCOM)*weights)**2,axis=0) return np.sum(tmp) res=optimize.minimize(CostFunc,np.zeros(17) ,bounds=[(-5,5),(-5,2),(-100,50)]+3*[(-0.3,3)]+2*[(-10,20)]+9*[(-5,10)] ) print(res) ``` # Check the position of simulated Bragg peaks after calibration ``` newPs=SimP(res['x']) oldPs=SimP(np.zeros(17)) fig,ax=plt.subplots(1,2,figsize=(15,4)) ax[0].hist(oldPs[:,2]-absCOM[:,2],label='before calibration',bins=np.arange(-0.2,0.35,0.05),alpha=0.5) ax[0].hist(newPs[:,2]-absCOM[:,2],label='after calibration',bins=np.arange(-0.2,0.35,0.05),alpha=0.5) ax[0].legend(loc='upper right', fontsize=15) ax[0].set_xlabel(r'$\Omega$ difference $(^\circ)$', fontsize=20) ax[1].scatter(oldPs[:,0]-absCOM[:,0],oldPs[:,1]-absCOM[:,1],label='before calibration') ax[1].scatter(newPs[:,0]-absCOM[:,0],newPs[:,1]-absCOM[:,1],label='after calibration',alpha=0.5) ax[1].set_xlabel('horizontal difference (pixels)', fontsize=20) ax[1].set_ylabel('vertical difference (pixels)', fontsize=20) ax[1].legend(loc='upper right', fontsize=15) ax[0].tick_params(axis='both', which='major', labelsize=20) ax[1].tick_params(axis='both', which='major', labelsize=20) plt.savefig('calibration.png',dpi=100,bbox_inches='tight') plt.show() ``` # Check the position of simulated Bragg peaks after calibration: Image ``` x= res['x'] pars={} pars['J']=x[0]+1182.19 pars['K']=x[1]+2026.27 pars['L']=x[2]*10**(-3)+7.14503 pars['tilt']=Rot.EulerZXZ2Mat((x[3:6]+np.array([89.1588,87.5647,0.278594]))/180.0*np.pi) pars['x']=x[6]*10**(-3)-0.345937 pars['y']=x[7]*10**(-3)+0.238698 pars['distortion']=x[8:17].reshape((3,3))*10**(-3)+np.eye(3) DetDefault=Gsim.Detector(psizeJ=0.001454, psizeK=0.001454) DetDefault.Reset() DetDefault.Move(pars['J'],pars['K'],np.array([pars['L'],0,0]),pars['tilt']) pos=np.array([pars['x'], pars['y'], 0]) Ps_new,Gs_new,Info_new=Gsim.GetProjectedVertex(DetDefault,crystal_str, pars['distortion'].dot(o_mat), etalimit,pos,getPeaksInfo=True, omegaL=omegaL,omegaU=omegaU,energy=energy) print(pars) # %matplotlib notebook # ii is the image ID ii=86 f,axis=plt.subplots() omegid=int(round((180-Ps_new[ii,2])*20)) if omegid<0: omegid+=3600 if omegid>=3600: omegid-=3600 I=plt.imread(raw_data+'{0:06d}.tif'.format(omegid)) axis.imshow(I,vmax=40) axis.scatter(2047-Ps_new[ii,0],Ps_new[ii,1],c='r') plt.show() ```
github_jupyter
# Part 1 - 2D mesh tallies So far we have seen that neutron and photon interactions can be tallied on surfaces or cells, but what if we want to tally neutron behaviour throughout a geometry? (rather than the integrated neutron behaviour over a surface or cell). A mesh tally allows a visual inspection of the neutron behaviour spatially throughout the geometry. The geometry is subdivided into many rectangles and the neutron behaviour is recorded (tallied) by the simulation in each of the small rectangles. This can form a 2D slice of the neutron interactions throughout the model. This notebook allows users to create a simple geometry from a few different materials and plot the results of a 2D regular mesh tally applied to the geometry. ``` from IPython.display import HTML HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/KYIsDjip1nQ" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>') ``` This code block defines the model geometry, materials, neutron source and regular mesh tally. Run the cell to see the model geometry. Observe how a 2D mesh is achieved by creating a 3D mesh with a thickness of one mesh cell in one dimension. ``` import openmc import matplotlib.pyplot as plt # MATERIALS # creates two materials, one is a neutron multiplier (lead) and the other a tritium breeder (lithium) mats = openmc.Materials() breeder_material = openmc.Material(name="breeder") breeder_material.add_element('Li', 1, percent_type='ao') breeder_material.set_density('g/cm3', 2.0) multiplier_material = openmc.Material(name="multiplier") multiplier_material.add_element('Pb', 1, percent_type='ao') multiplier_material.set_density('g/cm3', 11.0) mats = [breeder_material, multiplier_material] # GEOMETRY # surfaces sph1 = openmc.Sphere(r=50) sph2 = openmc.Sphere(r=90, boundary_type='vacuum') plane1 = openmc.XPlane(20) # cells breeder_cell = openmc.Cell(region=+sph1 & -sph2 & -plane1) breeder_cell.fill = breeder_material multiplier_cell = openmc.Cell(region=+sph1 & -sph2 & +plane1) multiplier_cell.fill = multiplier_material inner_vacuum_cell = openmc.Cell(region=-sph1) universe = openmc.Universe(cells=[inner_vacuum_cell, breeder_cell, multiplier_cell]) geom = openmc.Geometry(universe) # SETTINGS # Instantiate a Settings object sett = openmc.Settings() sett.batches = 100 sett.inactive = 0 sett.particles = 50 sett.particle = "neutron" sett.run_mode = 'fixed source' # creates a 14MeV point source source = openmc.Source() source.space = openmc.stats.Point((0, 0, 0)) source.angle = openmc.stats.Isotropic() source.energy = openmc.stats.Discrete([14e6], [1]) sett.source = source # Create mesh which will be used for tally mesh = openmc.RegularMesh() mesh_height = 100 # number of cells in the X and Z dimensions mesh_width = mesh_height mesh.dimension = [mesh_width, 1, mesh_height] # only 1 cell in the Y dimension mesh.lower_left = [-200, -200, -200] # physical limits (corners) of the mesh mesh.upper_right = [200, 200, 200] tallies = openmc.Tallies() # Create mesh filter for tally mesh_filter = openmc.MeshFilter(mesh) mesh_tally = openmc.Tally(name='tallies_on_mesh') mesh_tally.filters = [mesh_filter] mesh_tally.scores = ['flux', 'absorption', '(n,2n)'] # change flux to absorption tallies.append(mesh_tally) # combines the geometry, materials, settings and tallies to create a neutronics model model = openmc.model.Model(geom, mats, sett, tallies) plt.show(universe.plot(width=(180, 180), basis='xz')) ``` The next code block performs the simulation which tallies neutron flux on the mesh, and loads the results for inspection. ``` # deletes old files !rm summary.h5 !rm statepoint.*.h5 # runs the simulation output_filename = model.run() # open the results file results = openmc.StatePoint(output_filename) ``` This code block filters the results to show the neutron flux recorded by the mesh tally. ``` # access the flux tally my_tally = results.get_tally(scores=['flux']) my_slice = my_tally.get_slice(scores=['flux']) my_slice.mean.shape = (mesh_width, mesh_height) fig = plt.subplot() plt.show(fig.imshow(my_slice.mean)) # notice that neutrons are produced and emitted isotropically from a point source. # There is a slight increase in flux within the neutron multiplier. ``` This code block filters the results to show the neutron absorption recorded by the mesh tally. ``` # access the absorption tally my_tally = results.get_tally(scores=['absorption']) my_slice = my_tally.get_slice(scores=['absorption']) my_slice.mean.shape = (mesh_width, mesh_height) fig = plt.subplot() plt.show(fig.imshow(my_slice.mean)) # notice that neutrons are being absorpted on the left hand side of the model ``` This code block filters the results to show the neutron multiplication recorded by the mesh tally. ``` # access the neutron multiplication tally my_tally = results.get_tally(scores=['(n,2n)']) my_slice = my_tally.get_slice(scores=['(n,2n)']) my_slice.mean.shape = (mesh_width, mesh_height) fig = plt.subplot() plt.show(fig.imshow(my_slice.mean)) # notice that neutrons are being muliplied on the right hand side of the model # Bonus information # The 2D mesh tally is currently recording all interactions in the 3rd dimention (z). # The diagrams are showing the xy plane and all interactions in the z direction. # However one can also change the mesh to take a central slice of with a 1cm thickness in the following way. # The tally takes a little longer to converge as less neutrons are interacting in the tally region. # Create mesh which will be used for tally mesh = openmc.RegularMesh() mesh_height = 100 mesh_width = mesh_height mesh.dimension = [mesh_width, 1, mesh_height] # only one entry in the Y direction mesh.lower_left = [-200, -0.5, -200] # Y thickness is now smaller mesh.upper_right = [200, 0.5, 200] # Y thickness is now smaller ``` **Learning Outcomes for Part 1:** - Mesh tallies can be used to visualise neutron interactions spatially throughout geometry.
github_jupyter
# Tutorial: Computing with shapes of landmarks in Kendall shape spaces In this tutorial, we show how to use geomstats to perform a shape data analysis. Specifically, we aim to study the difference between two groups of data: - optical nerve heads that correspond to normal eyes, - optical nerve heads that correspond to glaucoma eyes. We wish to investigate if there is a difference in these two groups, and if this difference is a difference in sizes of the optical nerve heads, or a difference in shapes (where the size has been quotiented out). <img src="figures/optic_nerves.png" /> ## Set up ``` import os import sys import warnings sys.path.append(os.path.dirname(os.getcwd())) warnings.filterwarnings('ignore') %matplotlib inline import matplotlib.colors as colors import matplotlib.patches as mpatches import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection import geomstats.backend as gs import geomstats.datasets.utils as data_utils from geomstats.geometry.pre_shape import PreShapeSpace, KendallShapeMetric ``` We import the dataset of the optical nerve heads from 22 images of Rhesus monkeys’ eyes (11 monkeys), available in [[PE2015]](#References). For each monkey, an experimental glaucoma was introduced in one eye, while the second eye was kept as control. One seeks to observe differences between the glaucoma and the control eyes. On each image, 5 anatomical landmarks were recorded: - 1st landmark: superior aspect of the retina, - 2nd landmark: side of the retina closest to the temporal bone of the skull, - 3rd landmark: nose side of the retina, - 4th landmark: inferior point, - 5th landmark: optical nerve head deepest point. Label 0 refers to a normal eye, and Label 1 to an eye with glaucoma. ``` nerves, labels, monkeys = data_utils.load_optical_nerves() print(nerves.shape) print(labels) print(monkeys) ``` We extract the landmarks' sets corresponding to the two eyes' nerves of the first monkey, with their corresponding labels. ``` two_nerves = nerves[monkeys==0] print(two_nerves.shape) two_labels = labels[monkeys==0] print(two_labels) label_to_str = {0: 'Normal nerve', 1: 'Glaucoma nerve'} label_to_color = {0: (102/255, 178/255, 255/255, 1.), 1: (255/255, 178/255, 102/255, 1.)} fig = plt.figure() ax = Axes3D(fig) ax.set_xlim((2000, 4000)) ax.set_ylim((1000, 5000)) ax.set_zlim((-600, 200)) for nerve, label in zip(two_nerves, two_labels): x = nerve[:, 0] y = nerve[:, 1] z = nerve[:, 2] verts = [list(zip(x,y,z))] poly = Poly3DCollection(verts, alpha=0.5) color = label_to_color[int(label)] poly.set_color(colors.rgb2hex(color)) poly.set_edgecolor('k') ax.add_collection3d(poly) patch_0 = mpatches.Patch(color=label_to_color[0], label=label_to_str[0], alpha=0.5) patch_1 = mpatches.Patch(color=label_to_color[1], label=label_to_str[1], alpha=0.5) plt.legend(handles=[patch_0, patch_1], prop={'size': 14}) plt.show() ``` We first try to detect if there are two groups of optical nerve heads, based on the 3D coordinates of the landmarks sets. ``` from geomstats.geometry.euclidean import EuclideanMetric nerves_vec = nerves.reshape(22, -1) eucl_metric = EuclideanMetric(nerves_vec.shape[-1]) eucl_dist = eucl_metric.dist_pairwise(nerves_vec) plt.figure() plt.imshow(eucl_dist); ``` We do not see any two clear clusters. We want to investigate if there is a difference between these two groups of shapes - normal nerve versus glaucoma nerve - or if the main difference is merely relative to the global size of the landmarks' sets. ``` m_ambient = 3 k_landmarks = 5 preshape = PreShapeSpace(m_ambient=m_ambient, k_landmarks=k_landmarks) matrices_metric = preshape.embedding_metric sizes = matrices_metric.norm(preshape.center(nerves)) plt.figure(figsize=(6, 4)) for label, col in label_to_color.items(): label_sizes = sizes[labels==label] plt.hist(label_sizes, color=col, label=label_to_str[label], alpha=0.5, bins=10) plt.axvline(gs.mean(label_sizes), color=col) plt.legend(fontsize=14) plt.title('Sizes of optical nerves', fontsize=14); ``` The vertical lines represent the sample mean of each group (normal/glaucoma). ``` plt.figure(figsize=(6, 4)) plt.hist(sizes[labels==1] - sizes[labels==0], alpha=0.5) plt.axvline(0, color='black') plt.title('Difference in size of optical nerve between glaucoma and normal eyes', fontsize=14); ``` We perform a hypothesis test, testing if the two samples of sizes have the same average. We use the t-test for related samples, since the sample elements are paired: two eyes for each monkey. ``` from scipy import stats signif_level = 0.05 tstat, pvalue = stats.ttest_rel(sizes[labels==0], sizes[labels==1]) print(pvalue < signif_level) ``` There is a significative difference, in optical nerve eyes' sizes, between the glaucoma and normal eye. We want to investigate if there is a difference in shapes, where the size component has been quotiented out. We project the data to the Kendall pre-shape space, which: - centers the nerve landmark sets so that they share the same barycenter, - normalizes the sizes of the landmarks' sets to 1. ``` nerves_preshape = preshape.projection(nerves) print(nerves_preshape.shape) print(preshape.belongs(nerves_preshape)) print(gs.isclose(matrices_metric.norm(nerves_preshape), 1.)) ``` In order to quotient out the 3D orientation component, we align the landmark sets in the preshape space. ``` base_point = nerves_preshape[0] nerves_shape = preshape.align(point=nerves_preshape, base_point=base_point) ``` The Kendall metric is a Riemannian metric that takes this alignment into account. It corresponds to the metric of the Kendall shape space, which is the manifold defined as the preshape space quotient by the action of the rotation in m_ambient dimensions, here in 3 dimensions. ``` kendall_metric = KendallShapeMetric(m_ambient=m_ambient, k_landmarks=k_landmarks) ``` We can use it to perform a tangent PCA in the Kendall shape space, and determine if we see a difference in the shapes of the optical nerves. ``` from geomstats.learning.pca import TangentPCA tpca = TangentPCA(kendall_metric) tpca.fit(nerves_shape) plt.plot( tpca.explained_variance_ratio_) plt.xlabel("Number of principal tangent components", size=14) plt.ylabel("Fraction of explained variance", size=14); ``` Two principal components already describe around 60% of the variance. We plot the data projected in the tangent space defined by these two principal components. ``` X = tpca.transform(nerves_shape) plt.figure(figsize=(12, 12)) for label, col in label_to_color.items(): mask = labels == label plt.scatter(X[mask, 0], X[mask, 1], color=col, s=100, label=label_to_str[label]); plt.legend(fontsize=14); for label, x, y in zip(monkeys, X[:, 0], X[:, 1]): plt.annotate( label, xy=(x, y), xytext=(-20, 20), textcoords='offset points', ha='right', va='bottom', bbox=dict(boxstyle='round,pad=0.5', fc='white', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0')) plt.show() ``` The indices represent the monkeys' indices. In contrast to the above study focusing on the optical nerves' sizes, visual inspection does not reveal any clusters between the glaucoma and normal optical nerves' shapes. We also do not see any obvious pattern between the two optical nerves of the same monkey. This shows that the difference between the optical nerve heads mainly resides in the over sizes of the optical nerves. ``` dist_pairwise = kendall_metric.dist_pairwise(nerves_shape) print(dist_pairwise .shape) plt.figure() plt.imshow(dist_pairwise); ``` We try a agglomerative hierarchical clustering to investigate if we can cluster in the Kendall shape space. ``` from geomstats.learning.agglomerative_hierarchical_clustering import AgglomerativeHierarchicalClustering clustering = AgglomerativeHierarchicalClustering(distance='precomputed', n_clusters=2) clustering.fit(dist_pairwise) predicted_labels = clustering.labels_ print('True labels:', labels) print('Predicted labels:', predicted_labels) accuracy = gs.sum(labels==predicted_labels) / len(labels) print(f'Accuracy: {accuracy:.2f}') ``` The accuracy is barely above the accuracy of a random classifier, that would assign 0 or 1 with probably 0.5 to each of the shapes. This confirms that the difference that exists between the two groups is mostly due to the landmarks' set size and not their shapes. ## References .. [PE2015] Patrangenaru and L. Ellingson. Nonparametric Statistics on Manifolds and Their Applications to Object Data, 2015. https://doi.org/10.1201/b18969
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Pandas DataFrame to Fairness Indicators Case Study <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/responsible_ai/fairness_indicators/tutorials/Fairness_Indicators_Pandas_Case_Study"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/fairness-indicators/blob/master/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/fairness-indicators/tree/master/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/fairness-indicators/g3doc/tutorials/Fairness_Indicators_Pandas_Case_Study.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Case Study Overview In this case study we will apply [TensorFlow Model Analysis](https://www.tensorflow.org/tfx/model_analysis/get_started) and [Fairness Indicators](https://www.tensorflow.org/tfx/guide/fairness_indicators) to evaluate data stored as a Pandas DataFrame, where each row contains ground truth labels, various features, and a model prediction. We will show how this workflow can be used to spot potential fairness concerns, independent of the framework one used to construct and train the model. As in this case study, we can analyze the results from any machine learning framework (e.g. TensorFlow, JAX, etc) once they are converted to a Pandas DataFrame. For this exercise, we will leverage the Deep Neural Network (DNN) model that was developed in the [Shape Constraints for Ethics with Tensorflow Lattice](https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/shape_constraints_for_ethics.ipynb#scrollTo=uc0VwsT5nvQi) case study using the Law School Admissions dataset from the Law School Admissions Council (LSAC). This classifier attempts to predict whether or not a student will pass the bar, based on their Law School Admission Test (LSAT) score and undergraduate GPA. ## LSAC Dataset The dataset used within this case study was originally collected for a study called '[LSAC National Longitudinal Bar Passage Study. LSAC Research Report Series](https://eric.ed.gov/?id=ED469370)' by Linda Wightman in 1998. The dataset is currently hosted [here](http://www.seaphe.org/databases.php). * **dnn_bar_pass_prediction**: The LSAT prediction from the DNN model. * **gender**: Gender of the student. * **lsat**: LSAT score received by the student. * **pass_bar**: Ground truth label indicating whether or not the student eventually passed the bar. * **race**: Race of the student. * **ugpa**: A student's undergraduate GPA. ``` !pip install -q -U pip==20.2 !pip install -q -U \ tensorflow-model-analysis==0.39.0 \ tensorflow-data-validation==1.8.0 \ tfx-bsl==1.8.0 ``` ## Importing required packages: ``` import os import tempfile import pandas as pd import six.moves.urllib as urllib import pprint import tensorflow_model_analysis as tfma from google.protobuf import text_format import tensorflow as tf tf.compat.v1.enable_v2_behavior() ``` ## Download the data and explore the initial dataset. ``` # Download the LSAT dataset and setup the required filepaths. _DATA_ROOT = tempfile.mkdtemp(prefix='lsat-data') _DATA_PATH = 'https://storage.googleapis.com/lawschool_dataset/bar_pass_prediction.csv' _DATA_FILEPATH = os.path.join(_DATA_ROOT, 'bar_pass_prediction.csv') data = urllib.request.urlopen(_DATA_PATH) _LSAT_DF = pd.read_csv(data) # To simpliy the case study, we will only use the columns that will be used for # our model. _COLUMN_NAMES = [ 'dnn_bar_pass_prediction', 'gender', 'lsat', 'pass_bar', 'race1', 'ugpa', ] _LSAT_DF.dropna() _LSAT_DF['gender'] = _LSAT_DF['gender'].astype(str) _LSAT_DF['race1'] = _LSAT_DF['race1'].astype(str) _LSAT_DF = _LSAT_DF[_COLUMN_NAMES] _LSAT_DF.head() ``` ## Configure Fairness Indicators. There are several parameters that you’ll need to take into account when using Fairness Indicators with a DataFrame * Your input DataFrame must contain a prediction column and label column from your model. By default Fairness Indicators will look for a prediction column called `prediction` and a label column called `label` within your DataFrame. * If either of these values are not found a KeyError will be raised. * In addition to a DataFrame, you’ll also need to include an `eval_config` that should include the metrics to compute, slices to compute the metrics on, and the column names for example labels and predictions. * `metrics_specs` will set the metrics to compute. The `FairnessIndicators` metric will be required to render the fairness metrics and you can see a list of additional optional metrics [here](https://www.tensorflow.org/tfx/model_analysis/metrics). * `slicing_specs` is an optional slicing parameter to specify what feature you’re interested in investigating. Within this case study race1 is used, however you can also set this value to another feature (for example gender in the context of this DataFrame). If `slicing_specs` is not provided all features will be included. * If your DataFrame includes a label or prediction column that is different from the default `prediction` or `label`, you can configure the `label_key` and `prediction_key` to a new value. * If `output_path` is not specified a temporary directory will be created. ``` # Specify Fairness Indicators in eval_config. eval_config = text_format.Parse(""" model_specs { prediction_key: 'dnn_bar_pass_prediction', label_key: 'pass_bar' } metrics_specs { metrics {class_name: "AUC"} metrics { class_name: "FairnessIndicators" config: '{"thresholds": [0.50, 0.90]}' } } slicing_specs { feature_keys: 'race1' } slicing_specs {} """, tfma.EvalConfig()) # Run TensorFlow Model Analysis. eval_result = tfma.analyze_raw_data( data=_LSAT_DF, eval_config=eval_config, output_path=_DATA_ROOT) ``` ## Explore model performance with Fairness Indicators. After running Fairness Indicators, we can visualize different metrics that we selected to analyze our models performance. Within this case study we’ve included Fairness Indicators and arbitrarily picked AUC. When we first look at the overall AUC for each race slice we can see a slight discrepancy in model performance, but nothing that is arguably alarming. * **Asian**: 0.58 * **Black**: 0.58 * **Hispanic**: 0.58 * **Other**: 0.64 * **White**: 0.6 However, when we look at the false negative rates split by race, our model again incorrectly predicts the likelihood of a user passing the bar at different rates and, this time, does so by a lot. * **Asian**: 0.01 * **Black**: 0.05 * **Hispanic**: 0.02 * **Other**: 0.01 * **White**: 0.01 Most notably the difference between Black and White students is about 380%, meaning that our model is nearly 4x more likely to incorrectly predict that a black student will not pass the bar, than a whilte student. If we were to continue with this effort, a practitioner could use these results as a signal that they should spend more time ensuring that their model works well for people from all backgrounds. ``` # Render Fairness Indicators. tfma.addons.fairness.view.widget_view.render_fairness_indicator(eval_result) ``` # tfma.EvalResult The [`eval_result`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult) object, rendered above in `render_fairness_indicator()`, has its own API that can be used to read TFMA results into your programs. ## [`get_slice_names()`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult#get_slice_names) and [`get_metric_names()`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult#get_metric_names) To get the evaluated slices and metrics, you can use the respective functions. ``` pp = pprint.PrettyPrinter() print("Slices:") pp.pprint(eval_result.get_slice_names()) print("\nMetrics:") pp.pprint(eval_result.get_metric_names()) ``` ## [`get_metrics_for_slice()`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult#get_metrics_for_slice) and [`get_metrics_for_all_slices()`](https://www.tensorflow.org/tfx/model_analysis/api_docs/python/tfma/EvalResult#get_metrics_for_all_slices) If you want to get the metrics for a particular slice, you can use `get_metrics_for_slice()`. It returns a dictionary mapping metric names to [metric values](https://github.com/tensorflow/model-analysis/blob/cdb6790dcd7a37c82afb493859b3ef4898963fee/tensorflow_model_analysis/proto/metrics_for_slice.proto#L194). ``` baseline_slice = () black_slice = (('race1', 'black'),) print("Baseline metric values:") pp.pprint(eval_result.get_metrics_for_slice(baseline_slice)) print("Black metric values:") pp.pprint(eval_result.get_metrics_for_slice(black_slice)) ``` If you want to get the metrics for all slices, `get_metrics_for_all_slices()` returns a dictionary mapping each slice to the corresponding `get_metrics_for_slices(slice)`. ``` pp.pprint(eval_result.get_metrics_for_all_slices()) ``` ## Conclusion Within this case study we imported a dataset into a Pandas DataFrame that we then analyzed with Fairness Indicators. Understanding the results of your model and underlying data is an important step in ensuring your model doesn't reflect harmful bias. In the context of this case study we examined the the LSAC dataset and how predictions from this data could be impacted by a students race. The concept of “what is unfair and what is fair have been introduced in multiple disciplines for well over 50 years, including in education, hiring, and machine learning.”<sup>1</sup> Fairness Indicator is a tool to help mitigate fairness concerns in your machine learning model. For more information on using Fairness Indicators and resources to learn more about fairness concerns see [here](https://www.tensorflow.org/responsible_ai/fairness_indicators/guide). --- 1. Hutchinson, B., Mitchell, M. (2018). 50 Years of Test (Un)fairness: Lessons for Machine Learning. https://arxiv.org/abs/1811.10104 ## Appendix Below are a few functions to help convert ML models to Pandas DataFrame. ``` # TensorFlow Estimator to Pandas DataFrame: # _X_VALUE = # X value of binary estimator. # _Y_VALUE = # Y value of binary estimator. # _GROUND_TRUTH_LABEL = # Ground truth value of binary estimator. def _get_predicted_probabilities(estimator, input_df, get_input_fn): predictions = estimator.predict( input_fn=get_input_fn(input_df=input_df, num_epochs=1)) return [prediction['probabilities'][1] for prediction in predictions] def _get_input_fn_law(input_df, num_epochs, batch_size=None): return tf.compat.v1.estimator.inputs.pandas_input_fn( x=input_df[[_X_VALUE, _Y_VALUE]], y=input_df[_GROUND_TRUTH_LABEL], num_epochs=num_epochs, batch_size=batch_size or len(input_df), shuffle=False) def estimator_to_dataframe(estimator, input_df, num_keypoints=20): x = np.linspace(min(input_df[_X_VALUE]), max(input_df[_X_VALUE]), num_keypoints) y = np.linspace(min(input_df[_Y_VALUE]), max(input_df[_Y_VALUE]), num_keypoints) x_grid, y_grid = np.meshgrid(x, y) positions = np.vstack([x_grid.ravel(), y_grid.ravel()]) plot_df = pd.DataFrame(positions.T, columns=[_X_VALUE, _Y_VALUE]) plot_df[_GROUND_TRUTH_LABEL] = np.ones(len(plot_df)) predictions = _get_predicted_probabilities( estimator=estimator, input_df=plot_df, get_input_fn=_get_input_fn_law) return pd.DataFrame( data=np.array(np.reshape(predictions, x_grid.shape)).flatten()) ```
github_jupyter
<a href="https://cognitiveclass.ai/"> <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png" width="200" align="center"> </a> <h1>String Operations</h1> <p><strong>Welcome!</strong> This notebook will teach you about the string operations in the Python Programming Language. By the end of this notebook, you'll know the basics string operations in Python, including indexing, escape sequences and operations.</p> <div class="alert alert-block alert-info" style="margin-top: 20px"> <a href="https://cocl.us/topNotebooksPython101Coursera"> <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/TopAd.png" width="750" align="center"> </a> </div> <h2>Table of Contents</h2> <div class="alert alert-block alert-info" style="margin-top: 20px"> <ul> <li> <a href="#strings">What are Strings?</a> </li> <li> <a href="#index">Indexing</a> <ul> <li><a href="neg">Negative Indexing</a></li> <li><a href="slice">Slicing</a></li> <li><a href="stride">Stride</a></li> <li><a href="concat">Concatenate Strings</a></li> </ul> </li> <li> <a href="#escape">Escape Sequences</a> </li> <li> <a href="#operations">String Operations</a> </li> <li> <a href="#quiz">Quiz on Strings</a> </li> </ul> <p> Estimated time needed: <strong>15 min</strong> </p> </div> <hr> <h2 id="strings">What are Strings?</h2> The following example shows a string contained within 2 quotation marks: ``` # Use quotation marks for defining string "Michael Jackson" ``` We can also use single quotation marks: ``` # Use single quotation marks for defining string 'Michael Jackson' ``` A string can be a combination of spaces and digits: ``` # Digitals and spaces in string '1 2 3 4 5 6 ' ``` A string can also be a combination of special characters : ``` # Special characters in string '@#2_#]&*^%$' ``` We can print our string using the print statement: ``` # Print the string print("hello!") ``` We can bind or assign a string to another variable: ``` # Assign string to variable Name = "Michael Jack" Name ``` <hr> <h2 id="index">Indexing</h2> It is helpful to think of a string as an ordered sequence. Each element in the sequence can be accessed using an index represented by the array of numbers: <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsIndex.png" width="600" align="center" /> The first index can be accessed as follows: <hr/> <div class="alert alert-success alertsuccess" style="margin-top: 20px"> [Tip]: Because indexing starts at 0, it means the first index is on the index 0. </div> <hr/> ``` # Print the first element in the string print(Name[0]) ``` We can access index 6: ``` # Print the element on index 6 in the string print(Name[6]) ``` Moreover, we can access the 13th index: ``` # Print the element on the 13th index in the string print(Name[13]) ``` <h3 id="neg">Negative Indexing</h3> We can also use negative indexing with strings: <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsNeg.png" width="600" align="center" /> Negative index can help us to count the element from the end of the string. The last element is given by the index -1: ``` # Print the last element in the string print(Name[-1]) ``` The first element can be obtained by index -15: ``` # Print the first element in the string print(Name[-15]) ``` We can find the number of characters in a string by using <code>len</code>, short for length: ``` # Find the length of string len("Michael Jackson") ``` <h3 id="slice">Slicing</h3> We can obtain multiple characters from a string using slicing, we can obtain the 0 to 4th and 8th to the 12th element: <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsSlice.png" width="600" align="center" /> <hr/> <div class="alert alert-success alertsuccess" style="margin-top: 20px"> [Tip]: When taking the slice, the first number means the index (start at 0), and the second number means the length from the index to the last element you want (start at 1) </div> <hr/> ``` # Take the slice on variable Name with only index 0 to index 3 Name[0:4] and Name [8:13] # Take the slice on variable Name with only index 8 to index 11 Name[8:12] ``` <h3 id="stride">Stride</h3> We can also input a stride value as follows, with the '2' indicating that we are selecting every second variable: <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsStride.png" width="600" align="center" /> ``` # Get every second element. The elments on index 1, 3, 5 ... Name[::2] ``` We can also incorporate slicing with the stride. In this case, we select the first five elements and then use the stride: ``` # Get every second element in the range from index 0 to index 4 Name[0:5:2] ``` <h3 id="concat">Concatenate Strings</h3> We can concatenate or combine strings by using the addition symbols, and the result is a new string that is a combination of both: ``` # Concatenate two strings Statement = Name + "is the best" Statement ``` To replicate values of a string we simply multiply the string by the number of times we would like to replicate it. In this case, the number is three. The result is a new string, and this new string consists of three copies of the original string: ``` # Print the string for 3 times 3 * "Michael Jackson" ``` You can create a new string by setting it to the original variable. Concatenated with a new string, the result is a new string that changes from Michael Jackson to “Michael Jackson is the best". ``` # Concatenate strings Name = "Michael Jackson" Name = Name + " is the best" Name ``` <hr> <h2 id="escape">Escape Sequences</h2> Back slashes represent the beginning of escape sequences. Escape sequences represent strings that may be difficult to input. For example, back slash "n" represents a new line. The output is given by a new line after the back slash "n" is encountered: ``` # New line escape sequence print(" Michael Jackson \n is the best" ) ``` Similarly, back slash "t" represents a tab: ``` # Tab escape sequence print(" Michael Jackson \t is the best" ) ``` If you want to place a back slash in your string, use a double back slash: ``` # Include back slash in string print(" Michael Jackson \\ is the best" ) ``` We can also place an "r" before the string to display the backslash: ``` # r will tell python that string will be display as raw string print(r" Michael Jackson \ is the best" ) ``` <hr> <h2 id="operations">String Operations</h2> There are many string operation methods in Python that can be used to manipulate the data. We are going to use some basic string operations on the data. Let's try with the method <code>upper</code>; this method converts lower case characters to upper case characters: ``` # Convert all the characters in string to upper case A = "Thriller is the sixth studio album" print("before upper:", A) B = A.upper() print("After upper:", B) ``` The method <code>replace</code> replaces a segment of the string, i.e. a substring with a new string. We input the part of the string we would like to change. The second argument is what we would like to exchange the segment with, and the result is a new string with the segment changed: ``` # Replace the old substring with the new target substring is the segment has been found in the string A = "Michael Jackson is the best" B = A.replace('Michael', 'Janet') B ``` The method <code>find</code> finds a sub-string. The argument is the substring you would like to find, and the output is the first index of the sequence. We can find the sub-string <code>jack</code> or <code>el<code>. <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%201/Images/StringsFind.png" width="600" align="center" /> ``` # Find the substring in the string. Only the index of the first elment of substring in string will be the output Name = "Michael Jackson" Name.find('el') # Find the substring in the string. Name.find('Jack') ``` If the sub-string is not in the string then the output is a negative one. For example, the string 'Jasdfasdasdf' is not a substring: ``` # If cannot find the substring in the string Name.find('Jasdfasdasdf') ``` <hr> <h2 id="quiz">Quiz on Strings</h2> What is the value of the variable <code>A</code> after the following code is executed? ``` # Write your code below and press Shift+Enter to execute A = "1" ``` Double-click <b>here</b> for the solution. <!-- Your answer is below: "1" --> What is the value of the variable <code>B</code> after the following code is executed? ``` # Write your code below and press Shift+Enter to execute B = "2" ``` Double-click <b>here</b> for the solution. <!-- Your answer is below: "2" --> What is the value of the variable <code>C</code> after the following code is executed? ``` # Write your code below and press Shift+Enter to execute C = A + B ``` Double-click <b>here</b> for the solution. <!-- Your answer is below: "12" --> <hr> Consider the variable <code>D</code> use slicing to print out the first three elements: ``` # Write your code below and press Shift+Enter to execute D = "ABCDEFG" ``` Double-click <b>here</b> for the solution. <!-- Your answer is below: print(D[:3]) # or print(D[0:3]) --> <hr> Use a stride value of 2 to print out every second character of the string <code>E</code>: ``` # Write your code below and press Shift+Enter to execute E = 'clocrkr1e1c1t' ``` Double-click <b>here</b> for the solution. <!-- Your answer is below: print(E[::2]) --> <hr> Print out a backslash: ``` # Write your code below and press Shift+Enter to execute ``` Double-click <b>here</b> for the solution. <!-- Your answer is below: print("\\") or print(r" \ ") --> <hr> Convert the variable <code>F</code> to uppercase: ``` # Write your code below and press Shift+Enter to execute F = "You are wrong" ``` Double-click <b>here</b> for the solution. <!-- Your answer is below: F.upper() --> <hr> Consider the variable <code>G</code>, and find the first index of the sub-string <code>snow</code>: ``` # Write your code below and press Shift+Enter to execute G = "Mary had a little lamb Little lamb, little lamb Mary had a little lamb \ Its fleece was white as snow And everywhere that Mary went Mary went, Mary went \ Everywhere that Mary went The lamb was sure to go" ``` Double-click __here__ for the solution. <!-- Your answer is below: G.find("snow") --> In the variable <code>G</code>, replace the sub-string <code>Mary</code> with <code>Bob</code>: ``` # Write your code below and press Shift+Enter to execute ``` Double-click __here__ for the solution. <!-- Your answer is below: G.replace("Mary", "Bob") --> <hr> <h2>The last exercise!</h2> <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work. <hr> <div class="alert alert-block alert-info" style="margin-top: 20px"> <h2>Get IBM Watson Studio free of charge!</h2> <p><a href="https://cocl.us/bottemNotebooksPython101Coursera"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png" width="750" align="center"></a></p> </div> <h3>About the Authors:</h3> <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p> Other contributors: <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a> <hr> <p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
github_jupyter
``` from fastai.text import * from fastai.tabular import * path = Path('') data = pd.read_csv('good_small_dataset.csv', engine='python') data.head() df = data.dropna() df.to_csv('good_small_dataset_drop_missing.csv') data_lm = TextLMDataBunch.from_csv(path, 'good_small_dataset_drop_missing.csv', text_cols = 'content', label_cols = 'type') data_lm.save('data_lm_export.pkl') data_clas = TextClasDataBunch.from_csv(path, 'good_small_dataset_drop_missing.csv', vocab=data_lm.train_ds.vocab, text_cols = 'content', label_cols = 'type',bs=16) data_clas.save('data_clas_export.pkl') from fastai.text import * data_lm = load_data('NLP/', 'data_lm_export.pkl') data_clas = load_data('', 'data_clas_export.pkl') learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.5) learn.save('initial') learn.fit_one_cycle(1, 1e-2) learn.save('initial') learn.unfreeze() learn.fit_one_cycle(1, 1e-3) learn.save_encoder('ft_enc') learn.save('ft_encoder_model') learn.predict("The President today spoke on", n_words=10) learn.predict("Kim Kardashian released a new photo depicting her doing", n_words=6) learn.predict("World War Three has begun between", n_words=10) learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5); learn.load_encoder('ft_enc') learn.load('good_model_epoc_2'); learn.summary() data_clas.show_batch() learn.fit_one_cycle(1, 1e-2) learn.save('good_model') learn.freeze_to(-2) learn.fit_one_cycle(1, slice(5e-3/2., 5e-3)) learn.save('good_model_epoc_2') learn.unfreeze() learn.fit_one_cycle(1, slice(2e-3/100, 2e-3)) learn.save('good_model_epoc_3') # BBC learn.predict("Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.") # Fox News: learn.predict("Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.") # BrightBert again learn.predict("The border agencies need tougher leadership, President Donald Trump declared Friday as he dropped plans to appoint a long-time agency staffer to run the Immigration and Customs Enforcement agency (ICE).'Ron [Vitiello is] a good man,” Trump told reporters. 'But we’re going in a tougher direction. We want to go in a tougher direction.” Trump’s 'tougher direction” statement suggests he may pressure Department of Homeland Secretary (DHS) Secretary Kirstjen Nielsen to implement policies that top agency staffers oppose, such as rejecting legal interpretations and bureaucratic practices set by former President Barack Obama. Immigration reformers blame those Obama policies for encouraging the wave of economic migrants from Central America.Breitbart TVDonald Trump Says Everything Jared Kushner Touched ‘Turned To Gold’The shift comes amid the growing wave of Central American economic migrants who are using Obama-era legal loopholes to walk through the border wall and into jobs, neighborhoods, and blue-collar schools throughout the United States. That wave is expected to deliver one million migrants into the United States by October, and it is made possible because Democrats are blocking any reform the border loopholes.Immigration reformers fear that Obama-appointed staffers and former business lobbyists are keeping Trump in the dark about ways to improve operation at the DHS. 'I don’t now know if the President is getting the information he needs about what powers he has,” according to Rosemary Jenks, policy director at the Center for Immigration Studies. 'Secretary Nielsen and some of the attorneys in DHS are blocking the information because they are afraid of implementing some of the things they can do,” partly because they are afraid of lawsuits, she said.For example, many so-called 'Unaccompanied Alien Children” are being smuggled up the border because Trump’s agencies will pass them to their illegal immigrant parents living throughout the United States, under policies set by Obama. But those youths and children should be sent home, said Jenks, because the 2008 law only protects trafficked victims, such as forced prostitutes, not youths and children who have parents in the United States or who are willingly smuggled up to the border. According to the Washington Post, Vitiello’s exit was prompted by Steve Miller, one of Trump’s first aides who earlier played a key role in derailing the 2013 'Gang of Eight” amnesty and cheap labor bill. The Post said:Six administration officials said Friday that the decision to jettison Vitiello was a sign of the expanding influence that Miller now wields over immigration matters in the White House, particularly as Trump lashes out at Mexico and Central American nations — as well as Homeland Security officials and aides who express doubts about the legality of his ideas.The New York Times reported:One person familiar with the president’s thinking said that Mr. Trump believed that Mr. Vitiello did not favor closing the border, as the president had proposed before backing off that threat this week.Another person said that Stephen Miller, the president’s chief policy adviser and a supporter of curtailing legal and illegal immigration, did not support Mr. Vitiello’s nomination.Vitiello’s defenders lashed out at Miller. The Washington Post highlighted the complaints:'Ron Vitiello has spent as much time defending our nation’s borders as Stephen Miller has been alive,” one official said of Miller, who is 33.One senior official said: 'This is part of an increasingly desperate effort by Stephen to throw people under the bus when the policies he has advocated are not effective. Once it becomes clear that Stephen’s policies aren’t working, he tells the president, ‘They’re not the right people.’” But Vitiello’s appointment was opposed by the ICE officers’ union, the National ICE Council. Vitiello 'lacks the judgment and professionalism to effectively lead a federal agency,” said a February letter from union President Chris Crane.") # BBC learn.predict("Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.") # Fox News: learn.predict("Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.") # BrightBert again learn.predict("The border agencies need tougher leadership, President Donald Trump declared Friday as he dropped plans to appoint a long-time agency staffer to run the Immigration and Customs Enforcement agency (ICE).'Ron [Vitiello is] a good man,' Trump told reporters. 'But we’re going in a tougher direction. We want to go in a tougher direction.' Trump’s 'tougher direction' statement suggests he may pressure Department of Homeland Secretary (DHS) Secretary Kirstjen Nielsen to implement policies that top agency staffers oppose, such as rejecting legal interpretations and bureaucratic practices set by former President Barack Obama. Immigration reformers blame those Obama policies for encouraging the wave of economic migrants from Central America.Breitbart TVDonald Trump Says Everything Jared Kushner Touched ‘Turned To Gold’The shift comes amid the growing wave of Central American economic migrants who are using Obama-era legal loopholes to walk through the border wall and into jobs, neighborhoods, and blue-collar schools throughout the United States. That wave is expected to deliver one million migrants into the United States by October, and it is made possible because Democrats are blocking any reform the border loopholes.Immigration reformers fear that Obama-appointed staffers and former business lobbyists are keeping Trump in the dark about ways to improve operation at the DHS. 'I don’t now know if the President is getting the information he needs about what powers he has,' according to Rosemary Jenks, policy director at the Center for Immigration Studies. 'Secretary Nielsen and some of the attorneys in DHS are blocking the information because they are afraid of implementing some of the things they can do,' partly because they are afraid of lawsuits, she said.For example, many so-called 'Unaccompanied Alien Children' are being smuggled up the border because Trump’s agencies will pass them to their illegal immigrant parents living throughout the United States, under policies set by Obama. But those youths and children should be sent home, said Jenks, because the 2008 law only protects trafficked victims, such as forced prostitutes, not youths and children who have parents in the United States or who are willingly smuggled up to the border. According to the Washington Post, Vitiello’s exit was prompted by Steve Miller, one of Trump’s first aides who earlier played a key role in derailing the 2013 'Gang of Eight' amnesty and cheap labor bill. The Post said:Six administration officials said Friday that the decision to jettison Vitiello was a sign of the expanding influence that Miller now wields over immigration matters in the White House, particularly as Trump lashes out at Mexico and Central American nations — as well as Homeland Security officials and aides who express doubts about the legality of his ideas.The New York Times reported:One person familiar with the president’s thinking said that Mr. Trump believed that Mr. Vitiello did not favor closing the border, as the president had proposed before backing off that threat this week.Another person said that Stephen Miller, the president’s chief policy adviser and a supporter of curtailing legal and illegal immigration, did not support Mr. Vitiello’s nomination.Vitiello’s defenders lashed out at Miller. The Washington Post highlighted the complaints:'Ron Vitiello has spent as much time defending our nation’s borders as Stephen Miller has been alive,' one official said of Miller, who is 33.One senior official said: 'This is part of an increasingly desperate effort by Stephen to throw people under the bus when the policies he has advocated are not effective. Once it becomes clear that Stephen’s policies aren’t working, he tells the president, ‘They’re not the right people.’' But Vitiello’s appointment was opposed by the ICE officers’ union, the National ICE Council. Vitiello 'lacks the judgment and professionalism to effectively lead a federal agency,' said a February letter from union President Chris Crane.") # BBC learn.predict("Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.") # Fox News: learn.predict("Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.") #Pseudoscience learn.predict("Have you ever clicked on a link like 'What does your favorite animal say about you?' wondering what your love of hedgehogs reveals about your psyche? Or filled out a personality assessment to gain new understanding into whether you’re an introverted or extroverted 'type'? People love turning to these kinds of personality quizzes and tests on the hunt for deep insights into themselves. People tend to believe they have a 'true' and revealing self hidden somewhere deep within, so it’s natural that assessments claiming to unveil it will be appealing.As psychologists, we noticed something striking about assessments that claim to uncover people’s 'true type.' Many of the questions are poorly constructed – their wording can be ambiguous and they often contain forced choices between options that are not opposites. This can be true of BuzzFeed-type quizzes as well as more seemingly sober assessments.On the other hand, assessments created by trained personality psychologists use questions that are more straightforward to interpret. The most notable example is probably the well-respected Big Five Inventory. Rather than sorting people into 'types,' it scores people on the established psychological dimensions of openness to new experience, conscientiousness, extroversion, agreeableness and neuroticism. This simplicity is by design; psychology researchers know that the more respondents struggle to understand the question, the worse the question is.But the lack of rigor in 'type' assessments turns out to be a feature, not a bug, for the general public. What makes tests less valid can ironically make them more interesting. Since most people aren’t trained to think about psychology in a scientifically rigorous way, it stands to reason they also won’t be great at evaluating those assessments. We recently conducted series of studies to investigate how consumers view these tests. When people try to answer these harder questions, do they think to themselves 'This question is poorly written'? Or instead do they focus on its difficulty and think 'This question’s deep'? Our results suggest that a desire for deep insight can lead to deep confusion.Confusing difficult for deepIn our first study, we showed people items from both the Big Five and from the Keirsey Temperament Sorter (KTS), a popular 'type' assessment that contains many questions we suspected people find comparatively difficult. Our participants rated each item in two ways. First, they rated difficulty. That is, how confusing and ambiguous did they find it? Second, what was its perceived 'depth'? In other words, to what extent did they feel the item seemed to be getting at something hidden deep in the unconscious?Sure enough, not only were these perceptions correlated, the KTS was seen as both more difficult and deeper. In follow-up studies, we experimentally manipulated difficulty. In one study, we modified Big Five items to make them harder to answer like the KTS items, and again we found that participants rated the more difficult versions as 'deeper.'We also noticed that some personality assessments seem to derive their intrigue from having seemingly nothing to do with personality at all. Take one BuzzFeed quiz, for example, that asks about which colors people associate with abstract concepts like letters and days of the week and then outputs 'the true age of your soul.' Even if people trust BuzzFeed more for entertainment than psychological truths, perhaps they are actually on board with the idea that these difficult, abstract decisions do reveal some deep insights. In fact, that is the entire idea behind classically problematic measures such as the Rorschach, or 'ink blot,' test.In two studies inspired by that BuzzFeed quiz, we found exactly that. We gave people items from purported 'personality assessment' checklists. In one study, we assigned half the participants to the 'difficult' condition, wherein the assessment items required them to choose which of two colors they associated with abstract concepts, like the letter 'M.' In the 'easier' condition, respondents were still required to rate colors on how much they associated them with those abstract concepts, but they more simply rated one color at a time instead of choosing between two.Again, participants rated the difficult version as deeper. Seemingly, the sillier the assessment, the better people think it can read the hidden self.Intuition may steer you wrongOne of the implications of this research is that people are going to have a hard time leaving behind the bad ideas baked into popular yet unscientific personality assessments. The most notable example is the Myers-Briggs Type Indicator, which infamously remains quite popular while doing a fairly poor job of assessing personality, due to longstanding issues with the assessment itself and the long-discredited Jungian theory behind it. Our findings suggest that Myers-Briggs-like assessments that have largely been debunked by experts might persist in part because their formats overlap quite well with people’s intuitions about what will best access the “true self.”People’s intuitions do them no favors here. Intuitions often undermine scientific thinking on topics like physics and biology. Psychology is no different. People arbitrarily divide parts of themselves into “true” and superficial components and seem all too willing to believe in tests that claim to definitively make those distinctions. But the idea of a “true self” doesn’t really work as a scientific concept.Some people might be stuck in a self-reinforcing yet unproductive line of thought: Personality assessments can cause confusion. That confusion in turn overlaps with intuitions of how they think their deep psychology works, and then they tell themselves the confusion is profound. So intuitions about psychology might be especially pernicious. Following them too closely could lead you to know less about yourself, not more.", thresh=0.5) learn.predict("PETALUMA, CA — An incident in which a white man was reportedly beaten in downtown Petaluma by a group of suspects the victim described as four or five black men is being investigated as a hate crime and an assault, the Petaluma Police Department said Tuesday in a news release.Petaluma police Lt. Ed Crosby said officers immediately responded at 9:03 p.m. Saturday, March 9 to the intersection of Mary Street at Petaluma Boulevard North to a woman's report that her domestic partner, a 60-year-old white man, had just been attacked.The lieutenant said when officers arrived they found the victim on the ground suffering from numerous facial injuries.The man was rushed to Santa Rosa Memorial Hospital where according to police, he stayed two days. Injuries to the victim were confirmed as a fractured left eye socket, a broken nose and other abrasions to his face including facial swelling, Crosby said.The couple told police that the night of the incident they had just finished eating dinner at a restaurant on Petaluma Boulevard North and were walking westbound toward their car, which was parked on Mary Street, when they passed a group of several African-American men who looked to be in their 20s, standing around a four-door, emerald green Honda Civic.The couple said they did not interact with the group and were continuing on their way when one of the men by the green Honda 'hurled profanity at the victim and referred to his [the victim's] race,' Crosby said.'The victim turned around and saw one of the males rushing at him, swinging his arms,' Crosby said.'The victim grabbed the advancing male, brought him to the ground, and pinned him,' Crosby said. 'In response, the other males by the green Honda repeatedly kicked the victim in the face before getting into the green Honda and fleeing the scene.'Petaluma police are asking anyone with information about the incident to contact or leave a message for Petaluma Police Department Officer Ron Flores by calling 707-778-4372.The victim and his female companion were not able to give many descriptive details about the suspects, the lieutenant said, and thus far, officers' efforts in canvassing the downtown area for any witnesses or video footage that would help identify the suspects have not been successful.The green Honda was missing a front license plate; the back license plate may possibly include the numbers 611, according to police.", thresh=.5) learn.data.classes ```
github_jupyter
``` import paths import yaml import os import copy import numpy as np import numpy.random as npr import scipy.optimize as spo import scipy.linalg as spl from matplotlib import pyplot as plt, collections as mc, patches as mpatches, cm, ticker from sdfs.geom_mrst import GeomMRST from sdfs.bc_mrst import BCMRST from sdfs.darcy import DarcyExp from sdfs.tpfa import TPFA from sdfs.dasa import DASAExpLM, DASAExpLMWithFlux from time import perf_counter import ckli.mapest as mapest import ckli.ckliest_l2reg as ckliest import h5py import GPy plt.rc('text', usetex=True) plt.rc('image', cmap='plasma') def plot_patch(patches, values, fig, ax, points, title, cmin=None, cmax=None, cb=False): p = mc.PatchCollection(patches, cmap=cm.jet) p.set_array(values) p.set_clim([cmin, cmax]) ax.add_collection(p) if points is not None: ax.plot(*points, 'ko', markersize=0.5) ax.set_aspect('equal') ax.axis('off') ax.autoscale(tight=True) #ax.set_title(title) if cb: fig.colorbar(p, ax=ax) return p # Parameters seed = 0 num_trials = 1 res_fac = 1 resolution = '1x' resolution_fine = '16x' NYobs = 100 NYlearn = NYobs NYrefobs = 50 NYxi = 1000 Nuxi = 1000 Nens = 5000 beta_ckli = 1e1 Ygamma_ckli = 1e-4 ugamma_ckli = 1e-4 gamma_map = 1e-6 std_dev_ref = 1.0 cor_len_ref = 0.1 Neumann_sd = 0 lsq_method = 'trf' data_path = '../data/' results_path = '../results/' figures_path = '../figures/' geom_filename = data_path + f'geom/geom_{resolution}.mat' geom_fine_filename = data_path + f'geom/geom_{resolution_fine}.mat' bc_filename = data_path + f'bc/bc_{resolution}.mat' conduct_filename = data_path + f'RF2/conduct_log_RF2_{NYrefobs}_{resolution}.mat' well_cells_filename = data_path + f'well_cells/well_cells_{resolution}.mat' yobs_filename = data_path + f'yobs/yobs_{NYobs}_{resolution}.npy' yobs_fine_filename = data_path + f'yobs/yobs_{NYobs}_{resolution_fine}.npy' ref = f"Yref=RF2_{NYrefobs}_{resolution}" Yfac = 7.0 # Rescaling factor for log-conductivity. Must be applied to Yref and the BCs geom = GeomMRST(geom_filename) bc = BCMRST(geom, bc_filename) bc.rescale('N', Yfac) prob = DarcyExp(TPFA(geom, bc), None) Nc = geom.cells.num Ninf = geom.faces.num_interior print(f'Ninf = {Ninf}, Nc = {Nc}') patches = [mpatches.Polygon(v, closed=True) for v in geom.nodes.coords.T[geom.cells.nodes.T, :]] # Observations rs = npr.RandomState(seed) # Read stochastic model from GPML output with h5py.File(conduct_filename, 'r') as f: Yref = f.get('mu')[:].ravel() - Yfac xrefYobs = f.get('xYobs')[:] uref = prob.randomize_bc('N', Neumann_sd).solve(Yref) # u observations with h5py.File(well_cells_filename, 'r') as f: iuobs = f.get('well_cells')[:].ravel() - 1 uobs = uref[iuobs] Nuobs = iuobs.size fig, ax = plt.subplots(figsize=(4, 4)) p = plot_patch(patches, Yref + Yfac, fig, ax, xrefYobs, 'Yref', 0, 12) cbar = fig.colorbar(p, ax=ax) cbar.ax.tick_params(labelsize='30') cbar.locator = ticker.MaxNLocator(nbins=7) cbar.update_ticks() fig.tight_layout() fig.savefig(figures_path + f'ref/Yref_{ref}.pdf', dpi=300) rl2e = lambda yest, yref : spl.norm(yest - yref, 2) / spl.norm(yref, 2) infe = lambda yest, yref : spl.norm(yest - yref, np.inf) if os.path.exists(yobs_filename): print(f"iYobs set read from file {yobs_filename}") iYobs = np.load(yobs_filename) elif os.path.exists(yobs_fine_filename): print(f"iYobs set read from file {yobs_fine_filename} and randomly selected nearby cell") iYobs_fine = np.load(yobs_fine_filename) geom_fine = GeomMRST(geom_fine_filename) iYobs = np.array([geom.anyCellsWithin(geom_fine.nodes.coords.T[geom_fine.cells.nodes.T[iYobs_fine[t]]]) for t in range(num_trials)]) np.save(yobs_filename, iYobs) else: print(f"iYobs set randomly generated and saved to {yobs_filename}") iYobs = np.array([np.sort(rs.choice(Nc, NYobs, replace=False)) for _ in range(num_trials)]) np.save(yobs_filename, iYobs) print(f"{iYobs.shape=}") print(iYobs) exp = f'NY={NYobs}_Nu={iuobs.size}_{NYlearn=}_{Nuxi=}_{NYxi=}_beta={beta_ckli}_gamma={ugamma_ckli}_Neumann_sd={Neumann_sd}_{lsq_method=}_h1reg_{ref}' print(exp) timings = np.zeros((num_trials, 6)) nfevs = np.zeros((num_trials, 3), dtype=int) rel_errors = np.zeros((num_trials, 4)) abs_errors = np.zeros((num_trials, 4)) Yobs = np.zeros((num_trials, NYobs)) Ypred = np.zeros((num_trials, Nc)) CYpred = np.zeros((num_trials, Nc, Nc)) umean = np.zeros((num_trials, Nc)) Cu = np.zeros((num_trials, Nc, Nc)) upred = np.zeros((num_trials, Nc)) Cupred = np.zeros((num_trials, Nc, Nc)) PsiY = np.zeros((num_trials, Nc, NYxi)) LambdaY = np.zeros((num_trials, NYxi)) Psiu = np.zeros((num_trials, Nc, Nuxi)) Lambdau = np.zeros((num_trials, Nuxi)) Yxi = np.zeros((num_trials, NYxi)) uxi = np.zeros((num_trials, Nuxi)) Yest = np.zeros((num_trials, Nc)) uest = np.zeros((num_trials, Nc)) Yest_MAPH1 = np.zeros((num_trials, Nc)) if Neumann_sd != 0: Nq = np.count_nonzero(bc.kind == 'N') q_MAPH1 = np.zeros((num_trials, Nq)) for t in range(num_trials): Yobs[t] = Yref[iYobs[t]] ts = perf_counter() klearn = GPy.kern.sde_Matern52(input_dim=2, variance=std_dev_ref**2, lengthscale=cor_len_ref) mYlearn = GPy.models.GPRegression(geom.cells.centroids[:, iYobs[t]].T, Yobs[t, :,None], klearn, noise_var=np.sqrt(np.finfo(float).eps)) mYlearn.optimize(messages=True, ipython_notebook=False) print(f"{klearn.lengthscale.values[0]=}") print(f"{np.sqrt(klearn.variance.values[0])=}") mYref = GPy.models.GPRegression(geom.cells.centroids[:, iYobs[t]].T, Yobs[t, :, None], mYlearn.kern, noise_var=np.sqrt(np.finfo(float).eps)) Ypred[t], CYpred[t] = (lambda x, y : (x.ravel(), y))(*mYref.predict_noiseless(geom.cells.centroids.T, full_cov=True)) timings[t, 0] = perf_counter() - ts print(f"GPR: {timings[:, 0]} s") for t in range(num_trials): # Compute GP model for u ts = perf_counter() umean[t], Cu[t] = ckliest.smc_gp(Ypred[t], CYpred[t], Nens, copy.deepcopy(prob), rs, randomize_bc=True, randomize_scale=Neumann_sd) upred[t], Cupred[t] = ckliest.gpr(umean[t], Cu[t], uobs, iuobs) timings[t, 1] = perf_counter() - ts print(f"Monte Carlo: {timings[:, 1]} s") # PICKLE models Ym = Ypred CYm = CYpred um = umean #or change to upred Cum = Cu #or change to Cupred rel_errors[:, 0] = np.array([rl2e(Ym[t], Yref) for t in range(num_trials)]) abs_errors[:, 0] = np.array([infe(Ym[t], Yref) for t in range(num_trials)]) print(f"GPR\tRelative error: {rel_errors[:, 0]}") print(f"GPR\tInfinity error: {abs_errors[:, 0]}") for t in range(num_trials): ts = perf_counter() PsiY[t], LambdaY[t] = ckliest.KL_via_eigh(CYm[t], NYxi) Psiu[t], Lambdau[t] = ckliest.KL_via_eigh(Cum[t], Nuxi) timings[t, 2] = perf_counter() - ts print(f"eigendecomposition: {timings[:, 2]} s") # PICKLE estimate ssv = None if Neumann_sd == 0 else np.delete(np.arange(Nc), np.unique(geom.cells.to_hf[2*geom.faces.num_interior:][bc.kind == 'N'])) for t in range(num_trials): res = ckliest.LeastSqRes(NYxi, Ym[t], PsiY[t], Nuxi, um[t], Psiu[t], prob, ugamma_ckli, Ygamma_ckli, res_fac, iuobs, uobs, iYobs[t], Yobs[t], beta_ckli, ssv=ssv) x0 = np.zeros(Nuxi + NYxi) ts = perf_counter() sol = spo.least_squares(res.val, x0, jac=res.jac, method=lsq_method, verbose=2) ckli_status = sol.status timings[t, 3] = perf_counter() - ts nfevs[t, 0] = sol.nfev print(f'CKLI optimality: {sol.optimality : g}') uxi[t] = sol.x[:Nuxi] Yxi[t] = sol.x[Nuxi:] uest[t] = um[t] + Psiu[t] @ uxi[t] Yest[t] = Ym[t] + PsiY[t] @ Yxi[t] rel_errors[:, 1] = np.array([rl2e(Yest[t], Yref) for t in range(num_trials)]) abs_errors[:, 1] = np.array([infe(Yest[t], Yref) for t in range(num_trials)]) print(f"PICKLE: {timings[:, 3]} s") print(f"PICKLE\trelative L2 error: {rel_errors[:, 1]}") print(f"PICKLE\tabsolute Infinity error: {abs_errors[:, 1]}") # MAP H1 estimate Lreg = mapest.compute_Lreg(geom) for t in range(num_trials): if Neumann_sd == 0: loss = mapest.LossVec(Nc, Nc, iuobs, uobs, iYobs[t], Yobs[t], gamma_map, Lreg) # H1 regularization dasa = DASAExpLM(loss.val, loss.grad_u, loss.grad_Y, prob.solve, prob.residual_sens_u, prob.residual_sens_Y) ts = perf_counter() sol = spo.least_squares(dasa.obj, np.zeros(Nc), jac=dasa.grad, method=lsq_method, verbose=2) Yest_MAPH1[t] = sol.x else: loss = mapest.LossVecWithFlux(Nc, Nc, Nq, iuobs, uobs, iYobs[t], Yobs[t], gamma_map, Lreg) # H1 regularization dasa = DASAExpLMWithFlux(Nc, loss.val, loss.grad_u, loss.grad_p, prob.solve, prob.residual_sens_u, prob.residual_sens_p) ts = perf_counter() sol = spo.least_squares(dasa.obj, np.zeros(Nc + Nq), jac=dasa.grad, method=lsq_method, verbose=2) Yest_MAPH1[t] = sol.x[:Nc] q_MAPH1[t] = sol.x[Nc:] MAP_status = sol.status timings[t, 4] = perf_counter() - ts nfevs[t, 1] = sol.nfev print(f'MAP status: {MAP_status}, message: {sol.message}') rel_errors[:, 2] = np.array([rl2e(Yest_MAPH1[t], Yref) for t in range(num_trials)]) abs_errors[:, 2] = np.array([infe(Yest_MAPH1[t], Yref) for t in range(num_trials)]) print(f"MAP: {timings[:, 4]} s") print(f"MAP\trelative L2 error: {rel_errors[:, 2]}") print(f"MAP\tabsolute infinity error: {abs_errors[:, 2]}") np.savetxt(results_path + f'iYobs/iYobs_{exp}.txt', iYobs.astype(int), fmt='%i') np.savetxt(results_path + f'timings/timings_{exp}.txt', timings) np.savetxt(results_path + f'nfevs/nfevs_{exp}.txt', nfevs.astype(int), fmt='%i') np.savetxt(results_path + f'rel_errors/rel_errors_{exp}.txt', rel_errors) np.savetxt(results_path + f'abs_errors/abs_errors_{exp}.txt', abs_errors) np.savetxt(results_path + f'YGPR/YGPR_{exp}.txt', Yref) np.savetxt(results_path + f'YPICKLE/YPICKLE_{exp}.txt', Yest) np.savetxt(results_path + f'YMAP/YMAP_{exp}.txt', Yest_MAPH1) ```
github_jupyter
# Sentiment Classification & How To "Frame Problems" for a Neural Network by Andrew Trask - **Twitter**: @iamtrask - **Blog**: http://iamtrask.github.io ### What You Should Already Know - neural networks, forward and back-propagation - stochastic gradient descent - mean squared error - and train/test splits ### Where to Get Help if You Need it - Re-watch previous Udacity Lectures - Leverage the recommended Course Reading Material - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) (Check inside your classroom for a discount code) - Shoot me a tweet @iamtrask ### Tutorial Outline: - Intro: The Importance of "Framing a Problem" (this lesson) - [Curate a Dataset](#lesson_1) - [Developing a "Predictive Theory"](#lesson_2) - [**PROJECT 1**: Quick Theory Validation](#project_1) - [Transforming Text to Numbers](#lesson_3) - [**PROJECT 2**: Creating the Input/Output Data](#project_2) - Putting it all together in a Neural Network (video only - nothing in notebook) - [**PROJECT 3**: Building our Neural Network](#project_3) - [Understanding Neural Noise](#lesson_4) - [**PROJECT 4**: Making Learning Faster by Reducing Noise](#project_4) - [Analyzing Inefficiencies in our Network](#lesson_5) - [**PROJECT 5**: Making our Network Train and Run Faster](#project_5) - [Further Noise Reduction](#lesson_6) - [**PROJECT 6**: Reducing Noise by Strategically Reducing the Vocabulary](#project_6) - [Analysis: What's going on in the weights?](#lesson_7) # Lesson: Curate a Dataset<a id='lesson_1'></a> ``` def pretty_print_review_and_label(i): print(labels[i] + "\t:\t" + reviews[i][:80] + "...") g = open('reviews.txt','r') # What we know! reviews = list(map(lambda x:x[:-1],g.readlines())) g.close() g = open('labels.txt','r') # What we WANT to know! labels = list(map(lambda x:x[:-1].upper(),g.readlines())) g.close() ``` **Note:** The data in `reviews.txt` we're using has already been preprocessed a bit and contains only lower case characters. If we were working from raw data, where we didn't know it was all lower case, we would want to add a step here to convert it. That's so we treat different variations of the same word, like `The`, `the`, and `THE`, all the same way. ``` len(reviews) reviews[0] labels[0] ``` # Lesson: Develop a Predictive Theory<a id='lesson_2'></a> ``` print("labels.txt \t : \t reviews.txt\n") pretty_print_review_and_label(2137) pretty_print_review_and_label(12816) pretty_print_review_and_label(6267) pretty_print_review_and_label(21934) pretty_print_review_and_label(5297) pretty_print_review_and_label(4998) ``` # Project 1: Quick Theory Validation<a id='project_1'></a> There are multiple ways to implement these projects, but in order to get your code closer to what Andrew shows in his solutions, we've provided some hints and starter code throughout this notebook. You'll find the [Counter](https://docs.python.org/2/library/collections.html#collections.Counter) class to be useful in this exercise, as well as the [numpy](https://docs.scipy.org/doc/numpy/reference/) library. ``` from collections import Counter import numpy as np ``` We'll create three `Counter` objects, one for words from postive reviews, one for words from negative reviews, and one for all the words. ``` # Create three Counter objects to store positive, negative and total counts positive_counts = Counter() negative_counts = Counter() total_counts = Counter() ``` **TODO:** Examine all the reviews. For each word in a positive review, increase the count for that word in both your positive counter and the total words counter; likewise, for each word in a negative review, increase the count for that word in both your negative counter and the total words counter. **Note:** Throughout these projects, you should use `split(' ')` to divide a piece of text (such as a review) into individual words. If you use `split()` instead, you'll get slightly different results than what the videos and solutions show. ``` # Loop over all the words in all the reviews and increment the counts in the appropriate counter objects for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 ``` Run the following two cells to list the words used in positive reviews and negative reviews, respectively, ordered from most to least commonly used. ``` # Examine the counts of the most common words in positive reviews positive_counts.most_common() # Examine the counts of the most common words in negative reviews negative_counts.most_common() ``` As you can see, common words like "the" appear very often in both positive and negative reviews. Instead of finding the most common words in positive or negative reviews, what you really want are the words found in positive reviews more often than in negative reviews, and vice versa. To accomplish this, you'll need to calculate the **ratios** of word usage between positive and negative reviews. **TODO:** Check all the words you've seen and calculate the ratio of postive to negative uses and store that ratio in `pos_neg_ratios`. >Hint: the positive-to-negative ratio for a given word can be calculated with `positive_counts[word] / float(negative_counts[word]+1)`. Notice the `+1` in the denominator – that ensures we don't divide by zero for words that are only seen in positive reviews. ``` pos_neg_ratios = Counter() # Calculate the ratios of positive and negative uses of the most common words # Consider words to be "common" if they've been used at least 100 times for term,cnt in list(total_counts.most_common()): if(cnt > 100): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) pos_neg_ratios[term] = pos_neg_ratio ``` Examine the ratios you've calculated for a few words: ``` print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"])) ``` Looking closely at the values you just calculated, we see the following: * Words that you would expect to see more often in positive reviews – like "amazing" – have a ratio greater than 1. The more skewed a word is toward postive, the farther from 1 its positive-to-negative ratio will be. * Words that you would expect to see more often in negative reviews – like "terrible" – have positive values that are less than 1. The more skewed a word is toward negative, the closer to zero its positive-to-negative ratio will be. * Neutral words, which don't really convey any sentiment because you would expect to see them in all sorts of reviews – like "the" – have values very close to 1. A perfectly neutral word – one that was used in exactly the same number of positive reviews as negative reviews – would be almost exactly 1. The `+1` we suggested you add to the denominator slightly biases words toward negative, but it won't matter because it will be a tiny bias and later we'll be ignoring words that are too close to neutral anyway. Ok, the ratios tell us which words are used more often in postive or negative reviews, but the specific values we've calculated are a bit difficult to work with. A very positive word like "amazing" has a value above 4, whereas a very negative word like "terrible" has a value around 0.18. Those values aren't easy to compare for a couple of reasons: * Right now, 1 is considered neutral, but the absolute value of the postive-to-negative rations of very postive words is larger than the absolute value of the ratios for the very negative words. So there is no way to directly compare two numbers and see if one word conveys the same magnitude of positive sentiment as another word conveys negative sentiment. So we should center all the values around netural so the absolute value fro neutral of the postive-to-negative ratio for a word would indicate how much sentiment (positive or negative) that word conveys. * When comparing absolute values it's easier to do that around zero than one. To fix these issues, we'll convert all of our ratios to new values using logarithms. **TODO:** Go through all the ratios you calculated and convert them to logarithms. (i.e. use `np.log(ratio)`) In the end, extremely positive and extremely negative words will have positive-to-negative ratios with similar magnitudes but opposite signs. ``` # Convert ratios to logs for word,ratio in pos_neg_ratios.most_common(): pos_neg_ratios[word] = np.log(ratio) ``` **NOTE:** In the video, Andrew uses the following formulas for the previous cell: > * For any postive words, convert the ratio using `np.log(ratio)` > * For any negative words, convert the ratio using `-np.log(1/(ratio + 0.01))` These won't give you the exact same results as the simpler code we show in this notebook, but the values will be similar. In case that second equation looks strange, here's what it's doing: First, it divides one by a very small number, which will produce a larger positive number. Then, it takes the `log` of that, which produces numbers similar to the ones for the postive words. Finally, it negates the values by adding that minus sign up front. The results are extremely positive and extremely negative words having positive-to-negative ratios with similar magnitudes but oppositite signs, just like when we use `np.log(ratio)`. Examine the new ratios you've calculated for the same words from before: ``` print("Pos-to-neg ratio for 'the' = {}".format(pos_neg_ratios["the"])) print("Pos-to-neg ratio for 'amazing' = {}".format(pos_neg_ratios["amazing"])) print("Pos-to-neg ratio for 'terrible' = {}".format(pos_neg_ratios["terrible"])) ``` If everything worked, now you should see neutral words with values close to zero. In this case, "the" is near zero but slightly positive, so it was probably used in more positive reviews than negative reviews. But look at "amazing"'s ratio - it's above `1`, showing it is clearly a word with positive sentiment. And "terrible" has a similar score, but in the opposite direction, so it's below `-1`. It's now clear that both of these words are associated with specific, opposing sentiments. Now run the following cells to see more ratios. The first cell displays all the words, ordered by how associated they are with postive reviews. (Your notebook will most likely truncate the output so you won't actually see *all* the words in the list.) The second cell displays the 30 words most associated with negative reviews by reversing the order of the first list and then looking at the first 30 words. (If you want the second cell to display all the words, ordered by how associated they are with negative reviews, you could just write `reversed(pos_neg_ratios.most_common())`.) You should continue to see values similar to the earlier ones we checked – neutral words will be close to `0`, words will get more positive as their ratios approach and go above `1`, and words will get more negative as their ratios approach and go below `-1`. That's why we decided to use the logs instead of the raw ratios. ``` # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] # Note: Above is the code Andrew uses in his solution video, # so we've included it here to avoid confusion. # If you explore the documentation for the Counter class, # you will see you could also find the 30 least common # words like this: pos_neg_ratios.most_common()[:-31:-1] ``` # End of Project 1. ## Watch the next video to continue with Andrew's next lesson. # Transforming Text into Numbers<a id='lesson_3'></a> ``` from IPython.display import Image review = "This was a horrible, terrible movie." Image(filename='sentiment_network.png') review = "The movie was excellent" Image(filename='sentiment_network_pos.png') ``` # Project 2: Creating the Input/Output Data<a id='project_2'></a> **TODO:** Create a [set](https://docs.python.org/3/tutorial/datastructures.html#sets) named `vocab` that contains every word in the vocabulary. ``` vocab = set(total_counts.keys()) ``` Run the following cell to check your vocabulary size. If everything worked correctly, it should print **74074** ``` vocab_size = len(vocab) print(vocab_size) ``` Take a look at the following image. It represents the layers of the neural network you'll be building throughout this notebook. `layer_0` is the input layer, `layer_1` is a hidden layer, and `layer_2` is the output layer. ``` from IPython.display import Image Image(filename='sentiment_network_2.png') ``` **TODO:** Create a numpy array called `layer_0` and initialize it to all zeros. You will find the [zeros](https://docs.scipy.org/doc/numpy/reference/generated/numpy.zeros.html) function particularly helpful here. Be sure you create `layer_0` as a 2-dimensional matrix with 1 row and `vocab_size` columns. ``` layer_0 = np.zeros((1,vocab_size)) ``` Run the following cell. It should display `(1, 74074)` ``` layer_0.shape from IPython.display import Image Image(filename='sentiment_network.png') ``` `layer_0` contains one entry for every word in the vocabulary, as shown in the above image. We need to make sure we know the index of each word, so run the following cell to create a lookup table that stores the index of every word. ``` # Create a dictionary of words in the vocabulary mapped to index positions # (to be used in layer_0) word2index = {} for i,word in enumerate(vocab): word2index[word] = i # display the map of words to indices word2index ``` **TODO:** Complete the implementation of `update_input_layer`. It should count how many times each word is used in the given review, and then store those counts at the appropriate indices inside `layer_0`. ``` def update_input_layer(review): """ Modify the global layer_0 to represent the vector form of review. The element at a given index of layer_0 should represent how many times the given word occurs in the review. Args: review(string) - the string of the review Returns: None """ global layer_0 # clear out previous state, reset the layer to be all 0s layer_0 *= 0 # count how many times each word is used in the given review and store the results in layer_0 for word in review.split(" "): layer_0[0][word2index[word]] += 1 ``` Run the following cell to test updating the input layer with the first review. The indices assigned may not be the same as in the solution, but hopefully you'll see some non-zero values in `layer_0`. ``` update_input_layer(reviews[0]) layer_0 ``` **TODO:** Complete the implementation of `get_target_for_labels`. It should return `0` or `1`, depending on whether the given label is `NEGATIVE` or `POSITIVE`, respectively. ``` def get_target_for_label(label): """Convert a label to `0` or `1`. Args: label(string) - Either "POSITIVE" or "NEGATIVE". Returns: `0` or `1`. """ if(label == 'POSITIVE'): return 1 else: return 0 ``` Run the following two cells. They should print out`'POSITIVE'` and `1`, respectively. ``` labels[0] get_target_for_label(labels[0]) ``` Run the following two cells. They should print out `'NEGATIVE'` and `0`, respectively. ``` labels[1] get_target_for_label(labels[1]) ``` # End of Project 2 solution. ## Watch the next video to continue with Andrew's next lesson. # Project 3: Building a Neural Network<a id='project_3'></a> **TODO:** We've included the framework of a class called `SentimentNetork`. Implement all of the items marked `TODO` in the code. These include doing the following: - Create a basic neural network much like the networks you've seen in earlier lessons and in Project 1, with an input layer, a hidden layer, and an output layer. - Do **not** add a non-linearity in the hidden layer. That is, do not use an activation function when calculating the hidden layer outputs. - Re-use the code from earlier in this notebook to create the training data (see `TODO`s in the code) - Implement the `pre_process_data` function to create the vocabulary for our training data generating functions - Ensure `train` trains over the entire corpus ### Where to Get Help if You Need it - Re-watch previous week's Udacity Lectures - Chapters 3-5 - [Grokking Deep Learning](https://www.manning.com/books/grokking-deep-learning) - (Check inside your classroom for a discount code) ``` import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): # populate review_vocab with all of the words in the given reviews review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights # These are the weights between the input layer and the hidden layer. self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) # The input layer, a two-dimensional matrix with shape 1 x input_nodes self.layer_0 = np.zeros((1,input_nodes)) def update_input_layer(self,review): # clear out previous state, reset the layer to be all 0s self.layer_0 *= 0 for word in review.split(" "): # NOTE: This if-check was not in the version of this method created in Project 2, # and it appears in Andrew's Project 3 solution without explanation. # It simply ensures the word is actually a key in word2index before # accessing it, which is important because accessing an invalid key # with raise an exception in Python. This allows us to ignore unknown # words encountered in new reviews. if(word in self.word2index.keys()): self.layer_0[0][self.word2index[word]] += 1 def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews, training_labels): # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): # Get the next review and its correct label review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### # Input Layer self.update_input_layer(review) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step # Keep track of correct predictions. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): """ Attempts to predict the labels for the given testing_reviews, and uses the test_labels to calculate the accuracy of those predictions. """ # keep track of how many correct predictions we make correct = 0 # we'll time how many predictions per second we make start = time.time() # Loop through each of the given reviews and call run to predict # its label. for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the prediction process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ # Run a forward pass through the network, like in the "train" function. # Input Layer self.update_input_layer(review.lower()) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2[0] >= 0.5): return "POSITIVE" else: return "NEGATIVE" ``` Run the following cell to create a `SentimentNetwork` that will train on all but the last 1000 reviews (we're saving those for testing). Here we use a learning rate of `0.1`. ``` mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) ``` Run the following cell to test the network's performance against the last 1000 reviews (the ones we held out from our training set). **We have not trained the model yet, so the results should be about 50% as it will just be guessing and there are only two possible values to choose from.** ``` mlp.test(reviews[-1000:],labels[-1000:]) ``` Run the following cell to actually train the network. During training, it will display the model's accuracy repeatedly as it trains so you can see how well it's doing. ``` mlp.train(reviews[:-1000],labels[:-1000]) ``` That most likely didn't train very well. Part of the reason may be because the learning rate is too high. Run the following cell to recreate the network with a smaller learning rate, `0.01`, and then train the new network. ``` mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000]) ``` That probably wasn't much different. Run the following cell to recreate the network one more time with an even smaller learning rate, `0.001`, and then train the new network. ``` mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001) mlp.train(reviews[:-1000],labels[:-1000]) ``` With a learning rate of `0.001`, the network should finally have started to improve during training. It's still not very good, but it shows that this solution has potential. We will improve it in the next lesson. # End of Project 3. ## Watch the next video to continue with Andrew's next lesson. # Understanding Neural Noise<a id='lesson_4'></a> ``` from IPython.display import Image Image(filename='sentiment_network.png') def update_input_layer(review): global layer_0 # clear out previous state, reset the layer to be all 0s layer_0 *= 0 for word in review.split(" "): layer_0[0][word2index[word]] += 1 update_input_layer(reviews[0]) layer_0 review_counter = Counter() for word in reviews[0].split(" "): review_counter[word] += 1 review_counter.most_common() ``` # Project 4: Reducing Noise in Our Input Data<a id='project_4'></a> **TODO:** Attempt to reduce the noise in the input data like Andrew did in the previous video. Specifically, do the following: * Copy the `SentimentNetwork` class you created earlier into the following cell. * Modify `update_input_layer` so it does not count how many times each word is used, but rather just stores whether or not a word was used. The following code is the same as the previous project, with project-specific changes marked with `"New for Project 4"` ``` import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): # populate review_vocab with all of the words in the given reviews review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights # These are the weights between the input layer and the hidden layer. self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) # The input layer, a two-dimensional matrix with shape 1 x input_nodes self.layer_0 = np.zeros((1,input_nodes)) def update_input_layer(self,review): # clear out previous state, reset the layer to be all 0s self.layer_0 *= 0 for word in review.split(" "): # NOTE: This if-check was not in the version of this method created in Project 2, # and it appears in Andrew's Project 3 solution without explanation. # It simply ensures the word is actually a key in word2index before # accessing it, which is important because accessing an invalid key # with raise an exception in Python. This allows us to ignore unknown # words encountered in new reviews. if(word in self.word2index.keys()): ## New for Project 4: changed to set to 1 instead of add 1 self.layer_0[0][self.word2index[word]] = 1 def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) def train(self, training_reviews, training_labels): # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): # Get the next review and its correct label review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### # Input Layer self.update_input_layer(review) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step # Keep track of correct predictions. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): """ Attempts to predict the labels for the given testing_reviews, and uses the test_labels to calculate the accuracy of those predictions. """ # keep track of how many correct predictions we make correct = 0 # we'll time how many predictions per second we make start = time.time() # Loop through each of the given reviews and call run to predict # its label. for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the prediction process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ # Run a forward pass through the network, like in the "train" function. # Input Layer self.update_input_layer(review.lower()) # Hidden layer layer_1 = self.layer_0.dot(self.weights_0_1) # Output layer layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2[0] >= 0.5): return "POSITIVE" else: return "NEGATIVE" ``` Run the following cell to recreate the network and train it. Notice we've gone back to the higher learning rate of `0.1`. ``` mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) mlp.train(reviews[:-1000],labels[:-1000]) mlp.test(reviews[-1000:],labels[-1000:]) ``` # End of Project 4 solution. ## Watch the next video to continue with Andrew's next lesson. # Analyzing Inefficiencies in our Network<a id='lesson_5'></a> ``` Image(filename='sentiment_network_sparse.png') layer_0 = np.zeros(10) layer_0 layer_0[4] = 1 layer_0[9] = 1 layer_0 weights_0_1 = np.random.randn(10,5) layer_0.dot(weights_0_1) indices = [4,9] layer_1 = np.zeros(5) for index in indices: layer_1 += (1 * weights_0_1[index]) layer_1 Image(filename='sentiment_network_sparse_2.png') layer_1 = np.zeros(5) for index in indices: layer_1 += (weights_0_1[index]) layer_1 ``` # Project 5: Making our Network More Efficient<a id='project_5'></a> **TODO:** Make the `SentimentNetwork` class more efficient by eliminating unnecessary multiplications and additions that occur during forward and backward propagation. To do that, you can do the following: * Copy the `SentimentNetwork` class from the previous project into the following cell. * Remove the `update_input_layer` function - you will not need it in this version. * Modify `init_network`: >* You no longer need a separate input layer, so remove any mention of `self.layer_0` >* You will be dealing with the old hidden layer more directly, so create `self.layer_1`, a two-dimensional matrix with shape 1 x hidden_nodes, with all values initialized to zero * Modify `train`: >* Change the name of the input parameter `training_reviews` to `training_reviews_raw`. This will help with the next step. >* At the beginning of the function, you'll want to preprocess your reviews to convert them to a list of indices (from `word2index`) that are actually used in the review. This is equivalent to what you saw in the video when Andrew set specific indices to 1. Your code should create a local `list` variable named `training_reviews` that should contain a `list` for each review in `training_reviews_raw`. Those lists should contain the indices for words found in the review. >* Remove call to `update_input_layer` >* Use `self`'s `layer_1` instead of a local `layer_1` object. >* In the forward pass, replace the code that updates `layer_1` with new logic that only adds the weights for the indices used in the review. >* When updating `weights_0_1`, only update the individual weights that were used in the forward pass. * Modify `run`: >* Remove call to `update_input_layer` >* Use `self`'s `layer_1` instead of a local `layer_1` object. >* Much like you did in `train`, you will need to pre-process the `review` so you can work with word indices, then update `layer_1` by adding weights for the indices used in the review. The following code is the same as the previous project, with project-specific changes marked with `"New for Project 5"` ``` import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training self.pre_process_data(reviews, labels) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) def pre_process_data(self, reviews, labels): # populate review_vocab with all of the words in the given reviews review_vocab = set() for review in reviews: for word in review.split(" "): review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights # These are the weights between the input layer and the hidden layer. self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) ## New for Project 5: Removed self.layer_0; added self.layer_1 # The input layer, a two-dimensional matrix with shape 1 x hidden_nodes self.layer_1 = np.zeros((1,hidden_nodes)) ## New for Project 5: Removed update_input_layer function def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) ## New for Project 5: changed name of first parameter form 'training_reviews' # to 'training_reviews_raw' def train(self, training_reviews_raw, training_labels): ## New for Project 5: pre-process training reviews so we can deal # directly with the indices of non-zero inputs training_reviews = list() for review in training_reviews_raw: indices = set() for word in review.split(" "): if(word in self.word2index.keys()): indices.add(self.word2index[word]) training_reviews.append(list(indices)) # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): # Get the next review and its correct label review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### ## New for Project 5: Removed call to 'update_input_layer' function # because 'layer_0' is no longer used # Hidden layer ## New for Project 5: Add in only the weights for non-zero items self.layer_1 *= 0 for index in review: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use 'self.layer_1' instead of 'local layer_1' layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights ## New for Project 5: changed to use 'self.layer_1' instead of local 'layer_1' self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step ## New for Project 5: Only update the weights that were used in the forward pass for index in review: self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step # Keep track of correct predictions. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): """ Attempts to predict the labels for the given testing_reviews, and uses the test_labels to calculate the accuracy of those predictions. """ # keep track of how many correct predictions we make correct = 0 # we'll time how many predictions per second we make start = time.time() # Loop through each of the given reviews and call run to predict # its label. for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the prediction process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ # Run a forward pass through the network, like in the "train" function. ## New for Project 5: Removed call to update_input_layer function # because layer_0 is no longer used # Hidden layer ## New for Project 5: Identify the indices used in the review and then add # just those weights to layer_1 self.layer_1 *= 0 unique_indices = set() for word in review.lower().split(" "): if word in self.word2index.keys(): unique_indices.add(self.word2index[word]) for index in unique_indices: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use self.layer_1 instead of local layer_1 layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2[0] >= 0.5): return "POSITIVE" else: return "NEGATIVE" ``` Run the following cell to recreate the network and train it once again. ``` mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1) mlp.train(reviews[:-1000],labels[:-1000]) ``` That should have trained much better than the earlier attempts. Run the following cell to test your model with 1000 predictions. ``` mlp.test(reviews[-1000:],labels[-1000:]) ``` # End of Project 5 solution. ## Watch the next video to continue with Andrew's next lesson. # Further Noise Reduction<a id='lesson_6'></a> ``` Image(filename='sentiment_network_sparse_2.png') # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] from bokeh.models import ColumnDataSource, LabelSet from bokeh.plotting import figure, show, output_file from bokeh.io import output_notebook output_notebook() hist, edges = np.histogram(list(map(lambda x:x[1],pos_neg_ratios.most_common())), density=True, bins=100, normed=True) p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="Word Positive/Negative Affinity Distribution") p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555") show(p) frequency_frequency = Counter() for word, cnt in total_counts.most_common(): frequency_frequency[cnt] += 1 hist, edges = np.histogram(list(map(lambda x:x[1],frequency_frequency.most_common())), density=True, bins=100, normed=True) p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="The frequency distribution of the words in our corpus") p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], line_color="#555555") show(p) ``` # Project 6: Reducing Noise by Strategically Reducing the Vocabulary<a id='project_6'></a> **TODO:** Improve `SentimentNetwork`'s performance by reducing more noise in the vocabulary. Specifically, do the following: * Copy the `SentimentNetwork` class from the previous project into the following cell. * Modify `pre_process_data`: >* Add two additional parameters: `min_count` and `polarity_cutoff` >* Calculate the positive-to-negative ratios of words used in the reviews. (You can use code you've written elsewhere in the notebook, but we are moving it into the class like we did with other helper code earlier.) >* Andrew's solution only calculates a postive-to-negative ratio for words that occur at least 50 times. This keeps the network from attributing too much sentiment to rarer words. You can choose to add this to your solution if you would like. >* Change so words are only added to the vocabulary if they occur in the vocabulary more than `min_count` times. >* Change so words are only added to the vocabulary if the absolute value of their postive-to-negative ratio is at least `polarity_cutoff` * Modify `__init__`: >* Add the same two parameters (`min_count` and `polarity_cutoff`) and use them when you call `pre_process_data` The following code is the same as the previous project, with project-specific changes marked with `"New for Project 6"` ``` import time import sys import numpy as np # Encapsulate our neural network in a class class SentimentNetwork: ## New for Project 6: added min_count and polarity_cutoff parameters def __init__(self, reviews,labels,min_count = 10,polarity_cutoff = 0.1,hidden_nodes = 10, learning_rate = 0.1): """Create a SentimenNetwork with the given settings Args: reviews(list) - List of reviews used for training labels(list) - List of POSITIVE/NEGATIVE labels associated with the given reviews min_count(int) - Words should only be added to the vocabulary if they occur more than this many times polarity_cutoff(float) - The absolute value of a word's positive-to-negative ratio must be at least this big to be considered. hidden_nodes(int) - Number of nodes to create in the hidden layer learning_rate(float) - Learning rate to use while training """ # Assign a seed to our random number generator to ensure we get # reproducable results during development np.random.seed(1) # process the reviews and their associated labels so that everything # is ready for training ## New for Project 6: added min_count and polarity_cutoff arguments to pre_process_data call self.pre_process_data(reviews, labels, polarity_cutoff, min_count) # Build the network to have the number of hidden nodes and the learning rate that # were passed into this initializer. Make the same number of input nodes as # there are vocabulary words and create a single output node. self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate) ## New for Project 6: added min_count and polarity_cutoff parameters def pre_process_data(self, reviews, labels, polarity_cutoff, min_count): ## ---------------------------------------- ## New for Project 6: Calculate positive-to-negative ratios for words before # building vocabulary # positive_counts = Counter() negative_counts = Counter() total_counts = Counter() for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 pos_neg_ratios = Counter() for term,cnt in list(total_counts.most_common()): if(cnt >= 50): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) pos_neg_ratios[term] = pos_neg_ratio for word,ratio in pos_neg_ratios.most_common(): if(ratio > 1): pos_neg_ratios[word] = np.log(ratio) else: pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01))) # ## end New for Project 6 ## ---------------------------------------- # populate review_vocab with all of the words in the given reviews review_vocab = set() for review in reviews: for word in review.split(" "): ## New for Project 6: only add words that occur at least min_count times # and for words with pos/neg ratios, only add words # that meet the polarity_cutoff if(total_counts[word] > min_count): if(word in pos_neg_ratios.keys()): if((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)): review_vocab.add(word) else: review_vocab.add(word) # Convert the vocabulary set to a list so we can access words via indices self.review_vocab = list(review_vocab) # populate label_vocab with all of the words in the given labels. label_vocab = set() for label in labels: label_vocab.add(label) # Convert the label vocabulary set to a list so we can access labels via indices self.label_vocab = list(label_vocab) # Store the sizes of the review and label vocabularies. self.review_vocab_size = len(self.review_vocab) self.label_vocab_size = len(self.label_vocab) # Create a dictionary of words in the vocabulary mapped to index positions self.word2index = {} for i, word in enumerate(self.review_vocab): self.word2index[word] = i # Create a dictionary of labels mapped to index positions self.label2index = {} for i, label in enumerate(self.label_vocab): self.label2index[label] = i def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Store the learning rate self.learning_rate = learning_rate # Initialize weights # These are the weights between the input layer and the hidden layer. self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes)) # These are the weights between the hidden layer and the output layer. self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) ## New for Project 5: Removed self.layer_0; added self.layer_1 # The input layer, a two-dimensional matrix with shape 1 x hidden_nodes self.layer_1 = np.zeros((1,hidden_nodes)) ## New for Project 5: Removed update_input_layer function def get_target_for_label(self,label): if(label == 'POSITIVE'): return 1 else: return 0 def sigmoid(self,x): return 1 / (1 + np.exp(-x)) def sigmoid_output_2_derivative(self,output): return output * (1 - output) ## New for Project 5: changed name of first parameter form 'training_reviews' # to 'training_reviews_raw' def train(self, training_reviews_raw, training_labels): ## New for Project 5: pre-process training reviews so we can deal # directly with the indices of non-zero inputs training_reviews = list() for review in training_reviews_raw: indices = set() for word in review.split(" "): if(word in self.word2index.keys()): indices.add(self.word2index[word]) training_reviews.append(list(indices)) # make sure out we have a matching number of reviews and labels assert(len(training_reviews) == len(training_labels)) # Keep track of correct predictions to display accuracy during training correct_so_far = 0 # Remember when we started for printing time statistics start = time.time() # loop through all the given reviews and run a forward and backward pass, # updating weights for every item for i in range(len(training_reviews)): # Get the next review and its correct label review = training_reviews[i] label = training_labels[i] #### Implement the forward pass here #### ### Forward pass ### ## New for Project 5: Removed call to 'update_input_layer' function # because 'layer_0' is no longer used # Hidden layer ## New for Project 5: Add in only the weights for non-zero items self.layer_1 *= 0 for index in review: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use 'self.layer_1' instead of 'local layer_1' layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) #### Implement the backward pass here #### ### Backward pass ### # Output error layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output. layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2) # Backpropagated error layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error # Update the weights ## New for Project 5: changed to use 'self.layer_1' instead of local 'layer_1' self.weights_1_2 -= self.layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step ## New for Project 5: Only update the weights that were used in the forward pass for index in review: self.weights_0_1[index] -= layer_1_delta[0] * self.learning_rate # update input-to-hidden weights with gradient descent step # Keep track of correct predictions. if(layer_2 >= 0.5 and label == 'POSITIVE'): correct_so_far += 1 elif(layer_2 < 0.5 and label == 'NEGATIVE'): correct_so_far += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the training process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) \ + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%") if(i % 2500 == 0): print("") def test(self, testing_reviews, testing_labels): """ Attempts to predict the labels for the given testing_reviews, and uses the test_labels to calculate the accuracy of those predictions. """ # keep track of how many correct predictions we make correct = 0 # we'll time how many predictions per second we make start = time.time() # Loop through each of the given reviews and call run to predict # its label. for i in range(len(testing_reviews)): pred = self.run(testing_reviews[i]) if(pred == testing_labels[i]): correct += 1 # For debug purposes, print out our prediction accuracy and speed # throughout the prediction process. elapsed_time = float(time.time() - start) reviews_per_second = i / elapsed_time if elapsed_time > 0 else 0 sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \ + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \ + " #Correct:" + str(correct) + " #Tested:" + str(i+1) \ + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%") def run(self, review): """ Returns a POSITIVE or NEGATIVE prediction for the given review. """ # Run a forward pass through the network, like in the "train" function. ## New for Project 5: Removed call to update_input_layer function # because layer_0 is no longer used # Hidden layer ## New for Project 5: Identify the indices used in the review and then add # just those weights to layer_1 self.layer_1 *= 0 unique_indices = set() for word in review.lower().split(" "): if word in self.word2index.keys(): unique_indices.add(self.word2index[word]) for index in unique_indices: self.layer_1 += self.weights_0_1[index] # Output layer ## New for Project 5: changed to use self.layer_1 instead of local layer_1 layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2)) # Return POSITIVE for values above greater-than-or-equal-to 0.5 in the output layer; # return NEGATIVE for other values if(layer_2[0] >= 0.5): return "POSITIVE" else: return "NEGATIVE" ``` Run the following cell to train your network with a small polarity cutoff. ``` mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.05,learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000]) ``` And run the following cell to test it's performance. ``` mlp.test(reviews[-1000:],labels[-1000:]) ``` Run the following cell to train your network with a much larger polarity cutoff. ``` mlp = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=20,polarity_cutoff=0.8,learning_rate=0.01) mlp.train(reviews[:-1000],labels[:-1000]) ``` And run the following cell to test it's performance. ``` mlp.test(reviews[-1000:],labels[-1000:]) ``` # End of Project 6 solution. ## Watch the next video to continue with Andrew's next lesson. # Analysis: What's Going on in the Weights?<a id='lesson_7'></a> ``` mlp_full = SentimentNetwork(reviews[:-1000],labels[:-1000],min_count=0,polarity_cutoff=0,learning_rate=0.01) mlp_full.train(reviews[:-1000],labels[:-1000]) Image(filename='sentiment_network_sparse.png') def get_most_similar_words(focus = "horrible"): most_similar = Counter() for word in mlp_full.word2index.keys(): most_similar[word] = np.dot(mlp_full.weights_0_1[mlp_full.word2index[word]],mlp_full.weights_0_1[mlp_full.word2index[focus]]) return most_similar.most_common() get_most_similar_words("excellent") get_most_similar_words("terrible") import matplotlib.colors as colors words_to_visualize = list() for word, ratio in pos_neg_ratios.most_common(500): if(word in mlp_full.word2index.keys()): words_to_visualize.append(word) for word, ratio in list(reversed(pos_neg_ratios.most_common()))[0:500]: if(word in mlp_full.word2index.keys()): words_to_visualize.append(word) pos = 0 neg = 0 colors_list = list() vectors_list = list() for word in words_to_visualize: if word in pos_neg_ratios.keys(): vectors_list.append(mlp_full.weights_0_1[mlp_full.word2index[word]]) if(pos_neg_ratios[word] > 0): pos+=1 colors_list.append("#00ff00") else: neg+=1 colors_list.append("#000000") from sklearn.manifold import TSNE tsne = TSNE(n_components=2, random_state=0) words_top_ted_tsne = tsne.fit_transform(vectors_list) p = figure(tools="pan,wheel_zoom,reset,save", toolbar_location="above", title="vector T-SNE for most polarized words") source = ColumnDataSource(data=dict(x1=words_top_ted_tsne[:,0], x2=words_top_ted_tsne[:,1], names=words_to_visualize, color=colors_list)) p.scatter(x="x1", y="x2", size=8, source=source, fill_color="color") word_labels = LabelSet(x="x1", y="x2", text="names", y_offset=6, text_font_size="8pt", text_color="#555555", source=source, text_align='center') p.add_layout(word_labels) show(p) # green indicates positive words, black indicates negative words ```
github_jupyter
# **CSE 7324 Lab 3: Extending Logistic Regression** ### *Thomas Adams, Suleiman Hijazeen, Nancy Le and Andrew Whigham* ------ ### **1. Preparation and Overview** ------ #### 1.1 Business Understanding --- Austin Animal Center is the largest no-kill shelter in the United States and provides shelter to more than 16,000 animals each year. As a no-kill shelter they refuse to euthanize any animal unless the animal has a terminal medical issue and is in pain or if the animal is a danger to the public or to the shelter staff. Although the shelter’s primary goal is to find ‘forever homes’ for each and every animal that comes through their doors, many animals end up staying in the shelter for a long time if they are not considered as desirable for adoption as other animals. In addition to adopting out animals, the Austin Animal Center partners with various other rescues and animal sanctuaries to try to find homes for their animals. The average annual cost per animal at the Austin Animal Center is approximately $715 [3] and with many animals staying at the facility for long periods of time, some for several years, the cost can add up quickly. The shelter has fixed financial support via legislation to cover costs for staffing the shelters and a few grants to cover veterinary staff and services, but the shelter primarily relies on donations to provide for food, bedding and toys for the animals. The shelter must try to minimize costs associated with each animal and try to have the animals leave the shelter through adoption or transfer to a sanctuary as quickly as possible. The Austin Animal Center keeps track of each animal that comes through their doors and keeps a record of the animal’s outcome; that is whether they were adopted, transferred to a partner shelter or sanctuary or one of many other outcomes. If the shelter could predict an animal’s outcome based on the animal’s characteristics, they could be much more efficient with having animals leave the shelter by knowing which animals they should be able to adopt out and which animals they should transfer to other shelters or sanctuaries. This added efficiency would result in the shelter’s ability to take in more animals which in return would lower the average cost per animal. This lab examines the Austin Animal Center animal outcome data set to specifically look at cats and the outcome of each cat and attempts to build an accurate model of predicting the outcome. If accurate, this model could serve the Austin Animal Center as well as other cities that are looking at issuing a no-kill ordinance for their shelters. #### 1.2 Data Preparation --- ``` # dependencies import pandas as pd import numpy as np import missingno as msno import matplotlib.pyplot as plt import re from sklearn.model_selection import train_test_split from textwrap import wrap from sklearn.preprocessing import StandardScaler import warnings warnings.filterwarnings("ignore") import math %matplotlib inline # import data shelter_outcomes = pd.read_csv("C:/Users/w47518657u/OneDrive/SMU Spring 2019/CSE 7318/Labs/Lab Three/aac_shelter_outcomes.csv") # filter animal type for just cats cats = shelter_outcomes[shelter_outcomes['animal_type'] == 'Cat'] #print(cats.head()) # remove age_upon_outcome and recalculate to standard units (days) age = cats.loc[:,['datetime', 'date_of_birth']] # convert to datetime age.loc[:,'datetime'] = pd.to_datetime(age['datetime']) age.loc[:,'date_of_birth'] = pd.to_datetime(age['date_of_birth']) # calculate cat age in days cats.loc[:,'age'] = (age.loc[:,'datetime'] - age.loc[:,'date_of_birth']).dt.days # get dob info cats['dob_month'] = age.loc[:, 'date_of_birth'].dt.month cats['dob_day'] = age.loc[:, 'date_of_birth'].dt.day cats['dob_dayofweek'] = age.loc[:, 'date_of_birth'].dt.dayofweek # get month from datetime cats['month'] = age.loc[:,'datetime'].dt.month # get day of month cats['day'] = age.loc[:,'datetime'].dt.day # get day of week cats['dayofweek'] = age.loc[:, 'datetime'].dt.dayofweek # get hour of day cats['hour'] = age.loc[:, 'datetime'].dt.hour # get quarter cats['quarter'] = age.loc[:, 'datetime'].dt.quarter # clean up breed attribute # get breed attribute for processing # convert to lowercase, remove mix and strip whitespace # remove space in 'medium hair' to match 'longhair' and 'shorthair' # split on either space or '/' breed = cats.loc[:, 'breed'].str.lower().str.replace('mix', '').str.replace('medium hair', 'mediumhair').str.strip().str.split('/', expand=True) cats['breed'] = breed[0] cats['breed1'] = breed[1] # clean up color attribute # convert to lowercase # strip spaces # split on '/' color = cats.loc[:, 'color'].str.lower().str.strip().str.split('/', expand=True) cats['color'] = color[0] cats['color1'] = color[1] # clean up sex_upon_outcome sex = cats['sex_upon_outcome'].str.lower().str.strip().str.split(' ', expand=True) sex[0].replace('spayed', True, inplace=True) sex[0].replace('neutered', True, inplace=True) sex[0].replace('intact', False, inplace=True) sex[1].replace(np.nan, 'unknown', inplace=True) cats['spayed_neutered'] = sex[0] cats['sex'] = sex[1] # add in domesticated attribute cats['domestic'] = np.where(cats['breed'].str.contains('domestic'), 1, 0) # combine outcome and outcome subtype into a single attribute cats['outcome_subtype'] = cats['outcome_subtype'].str.lower().str.replace(' ', '-').fillna('unknown') cats['outcome_type'] = cats['outcome_type'].str.lower().str.replace(' ', '-').fillna('unknown') cats['outcome'] = cats['outcome_type'] + '_' + cats['outcome_subtype'] # drop unnecessary columns cats.drop(columns=['animal_id', 'name', 'animal_type', 'age_upon_outcome', 'date_of_birth', 'datetime', 'monthyear', 'sex_upon_outcome', 'outcome_subtype', 'outcome_type'], inplace=True) #print(cats['outcome'].value_counts()) cats.head() ``` Not all information included in this data set is necessary to the targeted prediction of outcome type. Some animals that were adopted were returned to the shelter as runaways before being returned to their owners. These instances have no impact on trying to predict outcome and will be removed from the data set. #### 1.3 Data Description --- ``` print("Default datatypes of shelter cat outcomes:\n") print(cats.dtypes) print("\nBelow is a description of the attributes in the cats dataframe:\n") ``` Attribute | Description | Scale | Datatype --- | --- | --- | --- Breed | Primary breed of the cat | Nominal | Object Color | Primary color of the cat | Nominal | Object Age | Age of cat in days | Ordinal | int64 DOB_Month | Date of birth month (1-12) for the cat | Ordinal | int64 DOB_Day | Date of birth day (1-31) for the cat | Ordinal | int64 DOB_DayOfWeek | Date of birth day of week (1-7) for the cat | Ordinal | int64 Month | Month (1-12) of the outcome | Ordinal | int64 Day | Day of month (1-31) of the outcome | Ordinal | int64 DayOfWeek | Day of week (1-7) of the outcome | Ordinal | int64 Hour | Hour during the day (0-23) of the outcome | Ordinal | int64 Quarter | Quarter during the year (1-4) of the outcome | Ordinal | int64 Breed1 | Secondary breed of the cat | Nominal | Object Color1 | Secondary color of the cat | Nominal | Object Spayed_Neutered | Is the cat spayed/netured or not | Nominal | bool Sex | Sex of the cat | Nominal | bool Domestic | Is the cat domesticated | Nominal | bool Outcome | The outcome of the animal | nominal | object ``` print('Below is a listing of the target classes and their distributions:') cats['outcome'].value_counts() ``` Each feature has a different count, a low count per feature decrease the accuracy and the efficiency of the logistic regression method used, so all features with low count was not taken into account in traning the classfier ``` # examine missing data msno.matrix(cats) ``` Since the missing data shows that breed1 will have little impact on the prediction since there are only two records that have a value, it will be removed from the data set. The missing data in color1 should be handled when one hot encoding is performed on it. #### 1.4 One hot encoding of data and splitting into training and testing sets ``` cats.drop(columns=['breed1'], inplace=True) # Breed, Color, Color1, Spayed_Netured and Sex attributes need to be one hot encoded cats_ohe = pd.get_dummies(cats, columns=['breed', 'color', 'color1', 'spayed_neutered', 'sex']) cats_ohe.head() out_t={'euthanasia_suffering' : 0, 'died_in-kennel' : 0, 'return-to-owner_unknown' : 0, 'transfer_partner' : 1, 'euthanasia_at-vet' : 2, 'adoption_foster' : 3, 'died_in-foster' : 0, 'transfer_scrp' : 4, 'euthanasia_medical' : 0, 'transfer_snr' : 0, 'died_enroute' : 0, 'rto-adopt_unknown' : 0, 'missing_in-foster' : 0, 'adoption_offsite' : 0, 'adoption_unknown' :5,'euthanasia_rabies-risk' : 0, 'unknown_unknown' : 0, 'adoption_barn' : 0, 'died_unknown' : 0, 'died_in-surgery' : 0, 'euthanasia_aggressive' : 0, 'euthanasia_unknown' : 0, 'missing_unknown' : 0, 'missing_in-kennel' : 0, 'missing_possible-theft' : 0, 'died_at-vet' : 0, 'disposal_unknown' : 0, 'euthanasia_underage' : 0, 'transfer_barn' : 0} #output is converted from string to catogries 0 to 5 represent each output # separate outcome from data outcome = cats_ohe['outcome'] cats_ohe.drop(columns=['outcome']) print(cats_ohe.head()) # split the data X_train, X_test, y_train, y_test = train_test_split(cats_ohe, outcome, test_size=0.2, random_state=0) X_train.drop(columns=['outcome'], inplace=True) y_train = [out_t[item] for item in y_train] #print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) ``` One hot encoding is used for the cat breed, color, spayed/neutered and sex attributes to convert the categorical variables into a form that should play nicer with logistic regression. Although spayed_neutered and sex are essentially boolean attributes, they had to be converted because there were many unknown values in each. The data is split with an 80/20 train/test ratio using the train_test_split function in the cross validation functions in Skikit Learn's cross validation package. Although this was an easy method to split the data into training and test sets, it was not a good way to split the data for this dataset. As shown above, the target distribution is skewed and some targets have very few instances. It would have been better to select an 80/20 ratio for each class. ### **2. Modeling** ------ ``` import numpy as np class BinaryLogisticRegressionBase: # private: def __init__(self, eta, iterations, C,reg): self.eta = eta self.iters = iterations self.C=C self.reg=reg # internally we will store the weights as self.w_ to keep with sklearn conventions def __str__(self): return 'Base Binary Logistic Regression Object, Not Trainable' # convenience, private and static: @staticmethod def _sigmoid(theta): return 1/(1+np.exp(-theta)) @staticmethod def _add_bias(X): return np.hstack((np.ones((X.shape[0],1)),X)) # add bias term # public: def predict_proba(self,X,add_bias=True): # add bias term if requested Xb = self._add_bias(X) if add_bias else X return self._sigmoid(Xb @ self.w_) # return the probability y=1 def predict(self,X): return (self.predict_proba(X)>0.5) #return the actual prediction # inherit from base class class BinaryLogisticRegression(BinaryLogisticRegressionBase): #private: def __str__(self): if(hasattr(self,'w_')): return 'Binary Logistic Regression Object with coefficients:\n'+ str(self.w_) # is we have trained the object else: return 'Untrained Binary Logistic Regression Object' def _get_gradient(self,X,y): # programming \sum_i (yi-g(xi))xi gradient = np.zeros(self.w_.shape) # set gradient to zero for (xi,yi) in zip(X,y): # the actual update inside of sum gradi = (yi - self.predict_proba(xi,add_bias=False))*xi # reshape to be column vector and add to gradient gradient += gradi.reshape(self.w_.shape) return gradient/float(len(y)) # public: def fit(self, X, y): Xb = self._add_bias(X) # add bias term num_samples, num_features = Xb.shape self.w_ = np.zeros((num_features,1)) # init weight vector to zeros # for as many as the max iterations for _ in range(self.iters): gradient = self._get_gradient(Xb,y) self.w_ += gradient*self.eta # multiply by learning rate import numpy as np from scipy.special import expit class VectorBinaryLogisticRegression(BinaryLogisticRegression): # inherit from our previous class to get same functionality @staticmethod def _sigmoid(theta): # increase stability, redefine sigmoid operation return expit(theta) #1/(1+np.exp(-theta)) # but overwrite the gradient calculation def _get_gradient(self,X,y): ydiff = y-self.predict_proba(X,add_bias=False).ravel() # get y difference gradient = np.mean(X * ydiff[:,np.newaxis], axis=0) # make ydiff a column vector and multiply through gradient = gradient.reshape(self.w_.shape) if self.reg=='L2': gradient[1:] += -2 * self.w_[1:] * self.C if self.reg=='L1': gradient[1:] += -self.C # the deravtiv of C*abs(W), which should be dx(abs(w))= w/abs(w) if self.reg=='L1L2': gradient[1:] += -self.C-2 * self.w_[1:] * self.C if self.reg=='none': gradient[1:] return gradient from scipy.optimize import minimize_scalar import copy class LineSearchLogisticRegression(VectorBinaryLogisticRegression): # define custom line search for problem @staticmethod def objective_function(eta,X,y,w,grad,C=0.001): wnew = w - grad*eta g = expit(X @ wnew) return -np.sum(np.log(g[y==1]))-np.sum(np.log(1-g[y==0])) + C*sum(wnew**2) def fit(self, X, y): Xb = self._add_bias(X) # add bias term num_samples, num_features = Xb.shape self.w_ = np.zeros((num_features,1)) # init weight vector to zeros # for as many as the max iterations for _ in range(self.iters): gradient = -self._get_gradient(Xb,y) # minimization inopposite direction # do line search in gradient direction, using scipy function opts = {'maxiter':self.iters/50} # unclear exactly what this should be res = minimize_scalar(self.objective_function, # objective function to optimize bounds=(self.eta/1000,self.eta*10), #bounds to optimize args=(Xb,y,self.w_,gradient,0.001), # additional argument for objective function method='bounded', # bounded optimization for speed options=opts) # set max iterations eta = res.x # get optimal learning rate self.w_ -= gradient*eta # set new function values # subtract to minimize class StochasticLogisticRegression(BinaryLogisticRegression): # stochastic gradient calculation def _get_gradient(self,X,y): idx = int(np.random.rand()*len(y)) # grab random instance ydiff = y[idx]-self.predict_proba(X[idx],add_bias=False) # get y difference (now scalar) gradient = X[idx] * ydiff[:,np.newaxis] # make ydiff a column vector and multiply through gradient = gradient.reshape(self.w_.shape) if self.reg=='L2': gradient[1:] += -2 * self.w_[1:] * self.C if self.reg=='L1': gradient[1:] += -self.C # the deravtiv of C*abs(W), which should be dx(abs(w))= w/abs(w) if self.reg=='L1L2': gradient[1:] += -self.C-(2 * self.w_[1:] * self.C) if self.reg=='none': gradient[1:] return gradient from scipy.optimize import fmin_bfgs class BFGSBinaryLogisticRegression(BinaryLogisticRegression): @staticmethod def objective_function(w,X,y,C,reg): g = expit(X @ w) return -np.sum(np.log(g[y==1]))-np.sum(np.log(1-g[y==0])) + C*sum(w**2) #-np.sum(y*np.log(g)+(1-y)*np.log(1-g)) @staticmethod def objective_gradient(w,X,y,C,reg): g = expit(X @ w) ydiff = y-g # get y difference gradient = np.mean(X * ydiff[:,np.newaxis], axis=0) gradient = gradient.reshape(w.shape) if reg=='L2': gradient[1:] += -2 * w[1:] * C if reg=='L1': gradient[1:] += - C # the deravtiv of C*abs(W), which should be dx(abs(w))= w/abs(w) if reg=='L1L2': gradient[1:] +=(-2 * w[1:] * C) - C if reg=='none': gradient[1:] return -gradient # just overwrite fit function def fit(self, X, y): Xb = self._add_bias(X) # add bias term num_samples, num_features = Xb.shape self.w_ = fmin_bfgs(self.objective_function, # what to optimize np.zeros((num_features,1)), # starting point fprime=self.objective_gradient, # gradient function args=(Xb,y,self.C,self.reg), # extra args for gradient and objective function gtol=1e-03, # stopping criteria for gradient, |v_k| maxiter=self.iters, # stopping criteria iterations disp=False) self.w_ = self.w_.reshape((num_features,1)) from numpy.linalg import pinv class HessianBinaryLogisticRegression(BinaryLogisticRegression): # just overwrite gradient function def _get_gradient(self,X,y): g = self.predict_proba(X,add_bias=False).ravel() # get sigmoid value for all classes hessian = X.T @ np.diag(g*(1-g)) @ X - 2 * self.C # calculate the hessian ydiff = y-g # get y difference gradient = np.sum(X * ydiff[:,np.newaxis], axis=0) # make ydiff a column vector and multiply through gradient = gradient.reshape(self.w_.shape) if self.reg=='L2': gradient[1:] += -2 * self.w_[1:] * self.C if self.reg=='L1': # the deravtiv of C*abs(W), which should be dx(abs(w))= w/abs(w) gradient[1:] += -self.C if self.reg=='L1L2': gradient[1:] += -self.C-2 * self.w_[1:] * self.C if self.reg=='none': gradient[1:] return pinv(hessian) @ gradient from scipy.optimize import minimize_scalar import copy class LogisticRegression: def __init__(self, eta, iterations,solver='leaner', C=0.001,reg='L2'): self.eta = eta self.iters = iterations self.slv = solver self.C=C self.reg=reg # internally we will store the weights as self.w_ to keep with sklearn conventions def __str__(self): if(hasattr(self,'w_')): return 'MultiClass Logistic Regression Object with coefficients:\n'+ str(self.w_) # is we have trained the object else: return 'Untrained MultiClass Logistic Regression Object' def fit(self,X,y): num_samples, num_features = X.shape self.unique_ = np.sort(np.unique(y)) # get each unique class value num_unique_classes = len(self.unique_) self.classifiers_ = [] # will fill this array with binary classifiers for i,yval in enumerate(self.unique_): # for each unique value y_binary = (y==yval) # create a binary problem # train the binary classifier for this class if self.slv=='stochastic': slr = StochasticLogisticRegression(self.eta,self.iters,self.C,self.reg) slr.fit(X,y_binary) self.classifiers_.append(slr) if self.slv=='steepest': mls=LineSearchLogisticRegression(self.eta,self.iters,self.C,self.reg) mls.fit(X,y_binary) self.classifiers_.append(mls) if self.slv=='leaner': blr = VectorBinaryLogisticRegression(self.eta,self.iters,self.reg) blr.fit(X,y_binary) self.classifiers_.append(blr) if self.slv=='BFGS': bfgslr = BFGSBinaryLogisticRegression(self.eta,self.iters,self.C,self.reg) bfgslr.fit(X,y_binary) self.classifiers_.append(bfgslr) if self.slv=='newton': newt = HessianBinaryLogisticRegression(self.eta,self.iters,self.C,self.reg) newt.fit(X,y_binary) self.classifiers_.append(newt) # add the trained classifier to the list # save all the weights into one matrix, separate column for each class self.w_ = np.hstack([x.w_ for x in self.classifiers_]).T def predict_proba(self,X): probs = [] for blr in self.classifiers_: probs.append(blr.predict_proba(X)) # get probability for each classifier return np.hstack(probs) # make into single matrix def predict(self,X): return np.argmax(self.predict_proba(X),axis=1) # take argmax along row ``` We chose some initial parameters to try on based on professor's suggestions. After several trials, we decided to use these parameters because they did not require much time consumption and they provided the best results among trials. Using trial and error to obtain optimized parameters for classification does not seem to be "data snooping" of the negative kind in the typical sense. Data snooping is the "misuse of data analysis to find patterns in data that can be presented as statistically significant when in fact there is no real underlying effect." (https://en.wikipedia.org/wiki/Data_dredging) In our case, we are pursuing optimal parameters that enable the highest accuracy classification possible. No matter what methods we use to obtain a model that accurately predicts classes for training data, it does not matter if the same is not also true for new, untrained data. If our classification algorithm is negatively impacted by our tweaking of parameters for new instances of data, then the tweaking of these parameters would be unjustified. However, since our goal is only to build the best classification tool possible, any means that improve that capability would be permissible, including adjusting parameters such as the regularization term - assuming they contribute to an increase in overall classification performance for untrained data. ``` %%time from sklearn.metrics import accuracy_score x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr = LogisticRegression(.01,1000,'stochastic',1,'L1') lr.fit(x_train_ar,y_target_ar) print(lr) yhat = lr.predict(x_train_ar) stoc1=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) %%time from sklearn.metrics import accuracy_score x_train_ar=X_train.values y_target_ar=np.asarray(y_train) #y_target_ar=y_train_b.transfer_partner.values #y_target_ar=y_train_b_v x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr = LogisticRegression(.01,1000,'stochastic',.0001,'L1L2') lr.fit(x_train_ar,y_target_ar) print(lr) yhat = lr.predict(x_train_ar) stoc2=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) %%time from sklearn.metrics import accuracy_score x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr = LogisticRegression(.1,50,'steepest',10,'L2') lr.fit(x_train_ar,y_target_ar) print(lr) yhat = lr.predict(x_train_ar) steep=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) %%time from sklearn.metrics import accuracy_score x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr = LogisticRegression(.1,50,'steepest',.0001,'L2') lr.fit(x_train_ar,y_target_ar) print(lr) yhat = lr.predict(x_train_ar) steep1=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) %%time from sklearn.metrics import accuracy_score x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr = LogisticRegression(.1,10,'BFGS',.0001,'L2') lr.fit(x_train_ar,y_target_ar) print(lr) yhat = lr.predict(x_train_ar) BFGS1=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) %%time from sklearn.metrics import accuracy_score x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr = LogisticRegression(.1,10,'BFGS',10,'L2') lr.fit(x_train_ar,y_target_ar) print(lr) yhat = lr.predict(x_train_ar) BFGS2=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) %%time from sklearn.metrics import accuracy_score x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr = LogisticRegression(.1,3,'newton',.0001,'L1') lr.fit(x_train_ar,y_target_ar) print(lr) yhat = lr.predict(x_train_ar) newton1=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) %%time from sklearn.metrics import accuracy_score x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr = LogisticRegression(.01,3,'newton',.0001,'L1') lr.fit(x_train_ar,y_target_ar) print(lr) yhat = lr.predict(x_train_ar) newton2=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) import numpy as np import matplotlib.pyplot as plt from textwrap import wrap para=['1-ata=.01,iter=1000, type= stochastic, C=.0001,L1L2 ','2-ata=.1,iter=1000, type= stochastic, C=.01,L1L2 ','ata=.1,iter=50, type= steepest, C=10,L2 ','ata=.1,iter=50, type= steepest, C=.0001,L2 ','ata=.1,iter=10,type=BFGS,C=.001,L2','ata=.1,iter=10,type=BFGS,C=10,L2','ata=.01, iter=3, type=newton, C=.0001, L2','ata=.01,iter=10,type=newton,C=.0001,L1' ] acc=[stoc1,stoc2,steep,steep1,BFGS1, BFGS2,newton1,newton2] plt.subplots(figsize=(17, 7)) x=[0,1,2,3,4,5,6,7] z=np.polyfit(x, acc, 1) labels = [ '\n'.join(wrap(l, 18)) for l in para ] labels = [ '\n'.join(wrap(l, 18)) for l in para ] plt.xlabel('Optimization', fontweight='bold') plt.ylabel('Accuracy', fontweight='bold') p = np.poly1d(z) plt.bar(labels,acc) ``` Since the time consumption for newton technique is long (over 2 minutes) and newton technique is not the best optimization technique for logistic regression, we decided not to include it in the following graph. ``` import numpy as np import matplotlib.pyplot as plt from textwrap import wrap # set width of bar barWidth = 0.45 # set height of bar bars1 = [stoc1,stoc2,steep,steep1,BFGS1, BFGS2] bars2 = [.013,.0129,.27,.21,.05,.32] # Set position of bar on X axis r1 = np.arange(len(bars1)) r2 = [x + barWidth for x in r1] #r3 = [x + barWidth for x in r2] plt.subplots(figsize=(17, 7)) # Make the plot plt.bar(r1, bars1, color='#7f6d5f', width=barWidth, edgecolor='white', label='accuracy') plt.bar(r2, bars2, color='#557f2d', width=barWidth, edgecolor='white', label='time (m)') plt.plot(x,p(x),color='black') # Add xticks on the middle of the group bars plt.xlabel('Optimization', fontweight='bold') plt.xticks([r + barWidth for r in range(len(bars1))], labels) # Create legend & Show graphic plt.legend() plt.show() ``` ### Observations: 1-the figures above shows the accuracy and time consumption for various optimization techniques, BFGS with 10 iterations has the highest accuracy with a low delay and L2 regulation. 2- stochastic has different accuracy values for different runs with the same parameters which mean the logistic regression got stuck in a local minimum depending on the initial value of the gradient and the direction 3- steepest decent and most of the other optimization techniques returned a better accuracy for a lower C which means stronger regularization prevent from overfitting the data and in return has a better performance 4- BFGS did better than hessian in terms of accuracy and time consumption which means that hessian is highly computational epically in calculating the second order derivative and inverting the hessian matrix 5- the output classification had a count per classifier shown in the figure above in the data understanding section all the low count have been given the same class because there is not enough data to train a classifier 6- the data is preprocessed such that its distribution will have a mean value 0 and standard deviation of 1. Given the distribution of the data, each value in the dataset will have the sample mean value subtracted, and then divided by the standard deviation of the whole dataset. Based on the observations above, the best best logistic regression optimization technique is BFGS. So we will use BFGS to compare with LBFGS from sklearn. ``` %%time from sklearn.linear_model import LogisticRegression lr_sk = LogisticRegression(solver='lbfgs',n_jobs=2,C=.0001, max_iter=10) x_train_ar=X_train.values y_target_ar=np.asarray(y_train) x_train_ar = StandardScaler().fit(x_train_ar).transform(x_train_ar) lr_sk.fit(x_train_ar,y_target_ar) print(np.hstack((lr_sk.intercept_[:,np.newaxis],lr_sk.coef_))) yhat = lr_sk.predict(x_train_ar) newtsk=accuracy_score(y_target_ar,yhat) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) para=['SK learn, iter=10,type=lbfgs,C=.0001, L2','ata=.1,iter=10,type=BFGS,C=.0001,L2' ] x=[0,1] acc=[ newtsk,BFGS1] time=[1.5, 1.6] plt.subplots(figsize=(10, 7)) z=np.polyfit(x, acc, 1) labels = [ '\n'.join(wrap(l, 18)) for l in para ] plt.bar(labels,acc) #ax.bar(labels,time,width=0.2,color='g') plt.xlabel('Optimization', fontweight='bold') plt.ylabel('Accuracy', fontweight='bold') p = np.poly1d(z) plt.plot(x,1.01*p(x),color='black') ``` BFGS and steepest decend optimization Techniques resulted in a better accuracy than SK learning BFGS for the same itration, as shown from the blake trend line. In the next plot, time will be introduced. ``` import numpy as np import matplotlib.pyplot as plt # set width of bar barWidth = 0.25 # set height of bar bars1 = [newtsk, BFGS1] bars2 = [.23,.05] #bars3 = [29, 3, 24, 25, 17] # Set position of bar on X axis r1 = np.arange(len(bars1)) r2 = [x + barWidth for x in r1] #r3 = [x + barWidth for x in r2] plt.subplots(figsize=(10, 7)) # Make the plot plt.bar(r1, bars1, color='#7f6d5f', width=barWidth, edgecolor='white', label='accuracy') plt.bar(r2, bars2, color='#557f2d', width=barWidth, edgecolor='white', label='time (m)') plt.plot(x,p(x),color='black') # Add xticks on the middle of the group bars plt.xlabel('Optimization', fontweight='bold') plt.xticks([r + barWidth for r in range(len(bars1))], labels) # Create legend & Show graphic plt.legend() plt.show() ``` ### **3. Deployment** ------ Among all the techniques we tested above, BFGS is the best optimization technique for logistic regression. Comparing BFGS to LBFGS from the sklearn, we see that BFGS has higher accuracy with lower time consumption (2.99s vs. 13.9s) In our opinion,the best method to use is BFGS method since it produces the most accuracy and low time consumption. Type Markdown and LaTeX: 𝛼2 ### **4. Optimization Using Mean Squared Error** ------ ``` %%time # from last time, our logistic regression algorithm is given by (including everything we previously had): class BinaryLogisticRegressionForMSE: def __init__(self, eta, iterations=1, C=0.001): self.eta = eta self.iters = iterations self.C = C # internally we will store the weights as self.w_ to keep with sklearn conventions def __str__(self): if(hasattr(self,'w_')): return 'Binary Logistic Regression Object with coefficients:\n'+ str(self.w_) # is we have trained the object else: return 'Untrained Binary Logistic Regression Object' # convenience, private: @staticmethod def _add_bias(X): return np.hstack((np.ones((X.shape[0],1)),X)) # add bias term @staticmethod def _sigmoid(theta): # increase stability, redefine sigmoid operation return expit(theta) #1/(1+np.exp(-theta)) # vectorized gradient calculation with regularization using L2 Norm def _get_gradient(self,X,y): ydiff = y-self.predict_proba(X,add_bias=False).ravel() # get y difference gradient = np.mean(X * ydiff[:,np.newaxis], axis=0) # make ydiff a column vector and multiply through gradient = gradient.reshape(self.w_.shape) gradient[1:] += -2 * self.w_[1:] * self.C return gradient # public: def predict_proba(self,X,add_bias=True): # add bias term if requested Xb = self._add_bias(X) if add_bias else X return self._sigmoid(Xb @ self.w_) # return the probability y=1 def predict(self,X): return (self.predict_proba(X)>0.5) #return the actual prediction def fit(self, X, y): Xb = self._add_bias(X) # add bias term num_samples, num_features = Xb.shape self.w_ = np.zeros((num_features,1)) # init weight vector to zeros # for as many as the max iterations for _ in range(self.iters): gradient = self._get_gradient(Xb,y) self.w_ += gradient*self.eta # multiply by learning rate blr = BinaryLogisticRegressionForMSE(eta=0.1,iterations=500,C=0.001) x_train_ar=X_train.values y_target_ar=np.asarray(y_train) blr.fit(x_train_ar,y_target_ar) print(blr) yhat = blr.predict(x_train_ar) print('Accuracy of: ',accuracy_score(y_target_ar,yhat)) class LineSearchLogisticRegressionWithMSE(BinaryLogisticRegressionForMSE): # define custom line search for problem @staticmethod def objective_function(eta,X,y,w,grad,C=0.001): wnew = w - grad * eta # subtract grad*eta.. from class # 02.21.19 - 10.m4v timestamp: 23:00 yhat = (1/(1+np.exp(-X @ wnew))) >0.5 return np.mean((y-yhat)**2) + C*np.mean(wnew**2) # add regularization term, don't subtract.. from class # 02.21.19 - 10.m4v timestamp: 17:40 def fit(self, X, y): Xb = self._add_bias(X) # add bias term num_samples, num_features = Xb.shape self.w_ = np.zeros((num_features,1)) # init weight vector to zeros # for as many as the max iterations for _ in range(self.iters): gradient = -self._get_gradient(Xb,y) # minimization inopposite direction # do line search in gradient direction, using scipy function opts = {'maxiter':self.iters} # unclear exactly what this should be res = minimize_scalar(self.objective_function, # objective function to optimize bounds=(self.eta/1000,self.eta*10), #bounds to optimize args=(Xb,y,self.w_,gradient,0.001), # additional argument for objective function method='bounded', # bounded optimization for speed options=opts) # set max iterations eta = res.x # get optimal learning rate self.w_ -= gradient*eta # set new function values # subtract to minimize ``` ### **5. References** ------ 1. Austin Animal Center Shelter Outcomes (Kaggle) https://www.kaggle.com/aaronschlegel/austin-animal-center-shelter-outcomes-and/version/1#aac_shelter_outcomes.csv 2. Austin Animal Center. (n.d.). Retrieved March 10, 2019, from http://www.austintexas.gov/department/aac 3. Hawes, Sloane; Ikizler, Devrim; Loughney, Katy; Tedeschi, Philip; and Morris, Kevin, "Legislating Components of a Humane City: The Economic Impacts of the Austin, Texas "No Kill" Resolution (City of Austin Resolution 20091105-040)" (2017). Animal Law and Legislation. 1. https://animalstudiesrepository.org/anilleg/1
github_jupyter
``` import time import cv2 import numpy as np from random import * from keras.models import load_model model = load_model('keras_model.h5') cap = cv2.VideoCapture(0) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) score = [a for a in range(2)] #2 scores in one variable score[1] = 0 playerScore = score[0] pcScore = score[1] playerGuess = "Blank" playing = True def countdown(seconds): while seconds: print(seconds) time.sleep(1) seconds -= 1 def randomGuess(): number = random() if number < 0.3: pcGuess = "Rock" elif number > 0.6: pcGuess = "Paper" else: pcGuess = "Scissors" def winCondition(): if playerGuess== 'Rock' and pcGuess== 'Scissors' or playerGuess== 'Scissors' and pcGuess== 'Paper' or playerGuess== 'Paper' and pcGuess== 'Rock': print("You Win!") score[playerScore] +1 score[pcScore] -1 else: print("You Lose!") score[playerScore] -1 score[pcScore] +1 def game(): name = input("Whats your name? ") print(name, "vs computer") time.sleep(1) print("Ready? Ok") time.sleep(1) countdown(3) number = random() if number < 0.3: pcGuess = "Rock" elif number > 0.6: pcGuess = "Paper" else: pcGuess = "Scissors" ret, frame = cap.read() resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA) image_np = np.array(resized_frame) normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image data[0] = normalized_image prediction = model.predict(data) cv2.imshow('frame', frame) while True: ret, frame = cap.read() resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA) image_np = np.array(resized_frame) normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image data[0] = normalized_image prediction = model.predict(data) cv2.imshow('frame', frame) if prediction[0][0] > 0.5: playerGuess = "Rock" elif prediction[0][1] > 0.5: playerGuess = "Paper" elif prediction[0][2] > 0.5: playerGuess = "Scissors" else: prediction[0][3] > 0.5 playerGuess = "Waiting..." print("I played ", pcGuess) time.sleep(1) print("You played " + playerGuess) time.sleep(1) winCondition() time.sleep(2) print(name, "Score: ", score[playerScore]) print("Computer Score: ", score[pcScore]) if cv2.waitKey(1) & 0xFF == ord('q'): break # After the loop release the cap object cap.release() # Destroy all the windows cv2.destroyAllWindows() game() #GAME FUNCTION def game(): name = input("Hi, want to play? Whats your name? ") print(name, "vs Computer") print("Ready? Ok") print("GO!") game() print("Game Off") #COUNTDOWN FUNCTION import time def countdown(seconds): while seconds: print(seconds) time.sleep(1) seconds -= 1 countdown(3) print("GO!") # USER INPUT + PREDICTION import cv2 from keras.models import load_model import numpy as np model = load_model('keras_model.h5') cap = cv2.VideoCapture(0) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) while True: ret, frame = cap.read() resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA) image_np = np.array(resized_frame) normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image data[0] = normalized_image prediction = model.predict(data) cv2.imshow('frame', frame) #playerGuess = input(cv2.VideoCapture(0)) if prediction[0][0] > 0.5: playerGuess = "Rock" elif prediction[0][1] > 0.5: playerGuess = "Paper" elif prediction[0][2] > 0.5: playerGuess = "Scissors" else: prediction[0][3] > 0.5 playerGuess = "Waiting..." print(playerGuess) if cv2.waitKey(1) & 0xFF == ord('q'): break # After the loop release the cap object cap.release() # Destroy all the windows cv2.destroyAllWindows() #COMPUTER GUESS FUNCTION from random import * def randomGuess(): number = random() if number < 0.3: pcGuess = "Rock" elif number > 0.6: pcGuess = "Paper" else: pcGuess = "Scissors" print("I played ", pcGuess) randomGuess() # GAME LOGIC FUNCTION pcGuess = 'Paper' playerGuess = 'Rock' score = [0, 0] def winCondition(): if playerGuess== 'Rock' and pcGuess== 'Scissors' or playerGuess== 'Scissors' and pcGuess== 'Paper' or playerGuess== 'Paper' and pcGuess== 'Rock': print("You Win!") score[0] =+ 1 score[1] =- 1 else: print("You Lose!") score[0] =- 1 score[1] =+ 1 winCondition() print(score[0], '/', score[1]) # WEBCAM FUNCTION import cv2 from keras.models import load_model import numpy as np model = load_model('keras_model.h5') cap = cv2.VideoCapture(0) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) def webcam(): while True: ret, frame = cap.read() resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA) image_np = np.array(resized_frame) normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image data[0] = normalized_image prediction = model.predict(data) cv2.imshow('frame', frame) # Press q to close the window if cv2.waitKey(1) & 0xFF == ord('q'): break # After the loop release the cap object cap.release() # Destroy all the windows cv2.destroyAllWindows() webcam() # CAMERA ISSUES pcGuess = 'Paper' playerGuess = 'Rock' score = [0, 0] def randomGuess(): number = random() if number < 0.3: pcGuess = "Rock" elif number > 0.6: pcGuess = "Paper" else: pcGuess = "Scissors" print("I played ", pcGuess) def winCondition(): if playerGuess== 'Rock' and pcGuess== 'Scissors' or playerGuess== 'Scissors' and pcGuess== 'Paper' or playerGuess== 'Paper' and pcGuess== 'Rock': print("You Win!") score[0] =+ 1 score[1] =- 1 else: print("You Lose!") score[0] =- 1 score[1] =+ 1 def userPrediction(): if prediction[0][0] > 0.5: playerGuess = "Rock" elif prediction[0][1] > 0.5: playerGuess = "Paper" elif prediction[0][2] > 0.5: playerGuess = "Scissors" else: prediction[0][3] > 0.5 playerGuess = "Waiting..." print ("You played ", playerGuess) # After the loop release the cap object cap.release() # Destroy all the windows cv2.destroyAllWindows() webcam() userPrediction() randomGuess() winCondition() #print(score[0], '/', score[1]) print("end") ```
github_jupyter
``` import os import numpy as np import itertools import matplotlib.pyplot as plt import pandas as pd from sklearn.utils import shuffle from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from tensorflow import keras from tensorflow.keras import layers from sklearn.metrics import confusion_matrix df15 = pd.read_csv("../Dataset/21-02-2018.csv", low_memory = False) df15 = df15.drop([0,1]) df15 df16Aux = pd.read_csv("../Dataset/15-02-2018.csv", low_memory = False) df16Aux = df16Aux.drop([0,1]) df16Aux listOrd = df15.columns.tolist() df16 = pd.DataFrame() for colu in listOrd: df16[colu] = df16Aux[colu] df16 df16Aux = None input_label15 = np.array(df15.loc[:, df15.columns != "Label"]).astype(np.float) output_label15 = np.array(df15["Label"]) out = [] for o in output_label15: if(o == "Benign"):out.append(0) else: out.append(1) output_label15 = out input_label16 = np.array(df16.loc[:, df16.columns != "Label"]).astype(np.float) output_label16 = np.array(df16["Label"]) out = [] for o in output_label16: if(o == "Benign"):out.append(0) else: out.append(1) output_label16 = out dfAE = pd.concat([df15, df16]) input_labelAE = np.array(dfAE.loc[:, dfAE.columns != "Label"]).astype(np.float) output_labelAE = np.array(dfAE["Label"]) out = [] for o in output_labelAE: if(o == "Benign"):out.append(0) else: out.append(1) output_labelAE = out dfAE = None df15 = None df16 = None scaler = MinMaxScaler(feature_range=(0,1)) scaler.fit(input_labelAE) input_label15 = scaler.transform(input_label15) input_label16 = scaler.transform(input_label16) input_labelAE = scaler.transform(input_labelAE) input_labelAE, output_labelAE = shuffle(input_labelAE, output_labelAE) input_label15, output_label15 = shuffle(input_label15, output_label15) input_label16, output_label16 = shuffle(input_label16, output_label16) ``` ## AutoEncoder ``` inp_train,inp_test,out_train,out_test = train_test_split(input_labelAE, input_labelAE, test_size=0.2) input_model = keras.layers.Input(shape = (78,)) enc = keras.layers.Dense(units = 64, activation = "relu", use_bias = True)(input_model) enc = keras.layers.Dense(units = 36, activation = "relu", use_bias = True)(enc) enc = keras.layers.Dense(units = 18, activation = "relu")(enc) dec = keras.layers.Dense(units = 36, activation = "relu", use_bias = True)(enc) dec = keras.layers.Dense(units = 64, activation = "relu", use_bias = True)(dec) dec = keras.layers.Dense(units = 78, activation = "relu", use_bias = True)(dec) auto_encoder = keras.Model(input_model, dec) encoder = keras.Model(input_model, enc) decoder_input = keras.layers.Input(shape = (18,)) decoder_layer = auto_encoder.layers[-3](decoder_input) decoder_layer = auto_encoder.layers[-2](decoder_layer) decoder_layer = auto_encoder.layers[-1](decoder_layer) decoder = keras.Model(decoder_input, decoder_layer) auto_encoder.compile(optimizer=keras.optimizers.Adam(learning_rate=0.00025), loss = "mean_squared_error", metrics = ['accuracy']) train = auto_encoder.fit(x = np.array(inp_train), y = np.array(out_train),validation_split= 0.1, epochs = 10, verbose = 1, shuffle = True) predict = auto_encoder.predict(inp_test) losses = keras.losses.mean_squared_error(out_test, predict).numpy() total = 0 for loss in losses: total += loss print(total / len(losses)) inp_train = None out_train = None input_labelAE = None input_label15 = encoder.predict(input_label15).reshape(len(input_label15), 18, 1) input_label16 = encoder.predict(input_label16).reshape(len(input_label16), 18, 1) ``` ## Classificador ``` model = keras.Sequential([ keras.layers.Conv1D(filters = 16, input_shape = (18,1), kernel_size = 3, padding = "same", activation = "relu", use_bias = True), keras.layers.MaxPool1D(pool_size = 3), keras.layers.Conv1D(filters = 8, kernel_size = 3, padding = "same", activation = "relu", use_bias = True), keras.layers.MaxPool1D(pool_size = 3), keras.layers.Flatten(), keras.layers.Dense(units = 2, activation = "softmax") ]) model.compile(optimizer= keras.optimizers.Adam(learning_rate= 0.00025), loss="sparse_categorical_crossentropy", metrics=['accuracy']) model.fit(x = np.array(input_label15), y = np.array(output_label15), validation_split= 0.1, epochs = 10, shuffle = True,verbose = 1) res = [np.argmax(resu) for resu in model.predict(input_label16)] cm = confusion_matrix(y_true = np.array(output_label16).reshape(len(output_label16)), y_pred = np.array(res)) def plot_confusion_matrix(cm, classes, normaliza = False, title = "Confusion matrix", cmap = plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normaliza: cm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis] print("Normalized confusion matrix") else: print("Confusion matrix, without normalization") print(cm) thresh = cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i,j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') labels = ["Benign", "Dos"] plot_confusion_matrix(cm = cm, classes = labels, title = "Dos IDS") from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score output_label16 = np.array(output_label16).reshape(len(output_label16)) res = np.array(res) fpr, tpr, _ = roc_curve(output_label16, res) auc = roc_auc_score(output_label16, res) plt.plot(fpr, tpr, label="auc=" + str(auc)) plt.legend(loc=4) plt.show() ```
github_jupyter
Taken from fastai NLP "8-translation-transformer" FastText embeddings: https://fasttext.cc/docs/en/crawl-vectors.html ``` from fastai2.text.all import * from fastai2.callback.all import * from fastai2.basics import * import seaborn as sns from einops import rearrange import gc import csv path = Path('../data/irish/crosslang') path.ls(), path ``` ### Load saved dataset ``` df=pd.read_csv(path/'paracrawl_cleaned_en-ga.csv') print(len(df)) df.head() sns.distplot(df['ga_len'].values) print(f'Median length is: {np.median(df["ga_len"])}') import seaborn as sns sns.distplot(df['en_len'].values) print(f'Median length is: {np.median(df["en_len"])}') ``` ### Pre-processing **Remove long texts to make things easier** ``` # Word count 90th percentile np.percentile([o for o in df.en_len.values], 90), np.percentile([o for o in df.ga_len.values], 90) print(f'Removing {len(df.query("en_len > 60"))} EN samples where len was > 60') print(len(df)) df=df[~df.index.isin(df.query("en_len > 60").index)] print(len(df)) print(f'Removing {len(df.query("ga_len > 60"))} FR samples where len was > 60') print(len(df)) df=df[~df.index.isin(df.query("ga_len > 60").index)] print(len(df)) sns.distplot(df['en_len'].values), np.median(df['en_len']) ``` **Lowercase everything** ``` df['en'] = df['en'].apply(lambda x:x.lower()) df['ga'] = df['ga'].apply(lambda x:x.lower()) ``` Rules used as part of tokenization ``` proc_rules=defaults.text_proc_rules[:-1] + [partial(lowercase, add_eos=True)] proc_rules ``` ### Get Dataloaders Load vocab to speed up data loading ``` splits = ColSplitter()(df) tfms = [[Tokenizer.from_df(text_cols='en' , rules=proc_rules), attrgetter("text"), Numericalize(max_vocab=20000)], [Tokenizer.from_df(text_cols='ga', lang='ga', rules=proc_rules), attrgetter("text"), Numericalize(max_vocab=20000)]] dl = partial(SortedDL, shuffle=True) dsets = Datasets(df, tfms, splits=splits, dl_type=dl) # en_vocab=[] # ga_vocab=[] # with open('paracrawl_vocab_en.csv', newline='') as csvfile: # v_reader = csv.reader(csvfile, delimiter=',') # for row in v_reader: # en_vocab.append(row[0]) # with open('paracrawl_vocab_ga.csv', newline='') as csvfile: # v_reader = csv.reader(csvfile, delimiter=',') # for row in v_reader: # ga_vocab.append(row[0]) #len(en_vocab), len(ga_vocab), en_vocab[:10], ga_vocab[:10] len(dsets), splits, len(dsets[2][0]), len(dsets[2][1]), dsets[2] bs,sl = 48, 108 dls = dsets.dataloaders(bs=bs, seq_len=sl, before_batch=partial(pad_input, pad_fields=[0,1])) dls.show_batch() ``` Save vocab to speed up data loading ``` with open('paracrawl_vocab_en_v0.2_exp1.csv', 'w', newline='') as csvfile: v_writer = csv.writer(csvfile, delimiter=',') for l in dls.vocab[0]: v_writer.writerow([l]) with open('paracrawl_vocab_ga_v0.2_exp1.csv', 'w', newline='') as csvfile: v_writer = csv.writer(csvfile, delimiter=',') for l in dls.vocab[1]: v_writer.writerow([l]) len(dls.train_ds)+len(dls.valid_ds), len(dls.train), len(dls.valid) print(f'Vocab lengths are : {len(dls.vocab[0]), len(dls.vocab[1])}') o=dls.one_batch(); o[0].size(), o[1].size(), o ``` ## Transformer model ``` class PositionalEncoding(nn.Module): "Encode the position with a sinusoid." def __init__(self, d): super().__init__() self.register_buffer('freq', 1 / (10000 ** (torch.arange(0., d, 2.)/d))) def forward(self, pos): inp = torch.ger(pos, self.freq) enc = torch.cat([inp.sin(), inp.cos()], dim=-1) return enc class TransformerEmbedding(nn.Module): "Embedding + positional encoding + dropout" def __init__(self, vocab_sz, emb_sz, inp_p=0.): super().__init__() self.emb_sz = emb_sz self.embed = Embedding(vocab_sz, emb_sz) self.pos_enc = PositionalEncoding(emb_sz) self.drop = nn.Dropout(inp_p) def forward(self, inp): pos = torch.arange(0, inp.size(1), device=inp.device).float() return self.drop(self.embed(inp) * math.sqrt(self.emb_sz) + self.pos_enc(pos)) ``` ## PyTorch Transformer Simple Note: [src/tgt/memory]_mask should be filled with float(‘-inf’) for the masked positions and float(0.0) else. These masks ensure that predictions for position i depend only on the unmasked positions j and are applied identically for each sequence in a batch. [src/tgt/memory]_key_padding_mask should be a ByteTensor where True values are positions that should be masked with float(‘-inf’) and False values will be unchanged. This mask ensures that no information will be taken from position i if it is masked, and has a separate mask for each sequence in a batch. attn mask with -inf key_padding mask with True ### pt_Transformer ``` class pt_Transformer(Module): def __init__(self, src_vcbsz, trg_vcbsz, n_enc_layers=6, n_dec_layers=6, n_heads=8, d_model=256, d_head=32, d_inner=1024, p=0.1, bias=True, scale=True, double_drop=True, pad_idx=1): self.pad_idx = pad_idx self.enc_tfmr_emb = TransformerEmbedding(src_vcbsz, d_model, p) self.dec_tfmr_emb = TransformerEmbedding(trg_vcbsz, d_model, 0.) self.final = nn.Linear(d_model, trg_vcbsz) # !!! #self.final.weight = self.dec_tfmr_emb.embed.weight # !! What does this do? self.transformer_model=torch.nn.Transformer(d_model=d_model, nhead=n_heads, num_encoder_layers=n_enc_layers, num_decoder_layers=n_dec_layers, dim_feedforward=d_inner, dropout=p, activation='relu', custom_encoder=None, custom_decoder=None) def forward(self, src, trg, src_mask=None, tgt_mask=None, memory_mask=None, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None): enc_emb, dec_emb = self.enc_tfmr_emb(src), self.dec_tfmr_emb(trg) src_mask=self.transformer_model.generate_square_subsequent_mask(src.size(1)).cuda() trg_mask=self.transformer_model.generate_square_subsequent_mask(trg.size(1)).cuda() dec_out = self.transformer_model(enc_emb.permute(1,0,2), dec_emb.permute(1,0,2), src_mask=src_mask, tgt_mask=trg_mask, memory_mask=None, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None) out=self.final(dec_out) return out.permute(1,0,2) ``` ### Metric ``` class CorpusBLEUMetric(Metric): def __init__(self, vocab_sz=5000, axis=-1): """BLEU Metric calculated over the validation corpus""" self.pred_len, self.targ_len, self.corrects, self.counts = 0,0,[0]*4,[0]*4 self.axis, self.vocab_sz = axis, vocab_sz def reset(self): self.pred_len,self.targ_len,self.corrects,self.counts = 0,0,[0]*4,[0]*4 class NGram(): def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n def __eq__(self, other): if len(self.ngram) != len(other.ngram): return False return np.all(np.array(self.ngram) == np.array(other.ngram)) def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)])) def get_grams(self, x, n, max_n=5000): return x if n==1 else [self.NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)] def get_correct_ngrams(self, pred, targ, n, max_n=5000): pred_grams,targ_grams = self.get_grams(pred, n, max_n=max_n),self.get_grams(targ, n, max_n=max_n) pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams) return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams) def accumulate(self, learn): last_output = learn.pred.argmax(dim=self.axis) last_target = learn.y for pred,targ in zip(last_output.cpu().numpy(),last_target.cpu().numpy()): self.pred_len += len(pred) self.targ_len += len(targ) for i in range(4): c,t = self.get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz) self.corrects[i] += c self.counts[i] += t @property def value(self): if self.counts == 0: return None else: precs = [c/t for c,t in zip(self.corrects,self.counts)] len_penalty = exp(1 - self.targ_len/self.pred_len) if self.pred_len < self.targ_len else 1 return len_penalty * ((precs[0]*precs[1]*precs[2]*precs[3]) ** 0.25) ``` ### Callbacks #### Present Input and Target in a single tuple ``` class CombineInputOutputCallback(Callback): '''Callback to combine the input and target text into self.xb''' def __init__(self): pass def begin_batch(self): self.learn.xb = (self.xb[0], self.yb[0]) ``` Shifting and masking of y, from [Annotated Transformer](http://nlp.seas.harvard.edu/2018/04/03/attention.html#training): > We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position i can depend only on the known outputs at positions less than i. #### Shifting #### Target shift/offset explained **Taken from [@bentrevett's brilliant github repo "pytorch-seq2seq" tutorials](https://github.com/bentrevett/pytorch-seq2seq/blob/master/6%20-%20Attention%20is%20All%20You%20Need.ipynb):** As we want our model to predict the <eos> token but not have it be an input into our model we simply slice the <eos> token off the end of the sequence. Thus: $$\begin{align*}\text{trg} &= [sos, x_1, x_2, x_3, eos]\\\text{trg[:-1]} &= [sos, x_1, x_2, x_3]\end{align*}$$ $x_i$ denotes **actual** target sequence element. We then feed this into the model to get a predicted sequence that should hopefully predict the <eos> token: $$\begin{align*} \text{output} &= [y_1, y_2, y_3, eos] \end{align*}$$ $y_i$ denotes **predicted** target sequence element. We then calculate our loss using the original trg tensor with the <sos> token sliced off the front, leaving the <eos> token: $$\begin{align*} \text{output} &= [y_1, y_2, y_3, eos]\\ \text{trg[1:]} &= [x_1, x_2, x_3, eos] \end{align*}$$ We then calculate our losses and update our parameters as is standard. We don't want to punish the model for not translating the 'sos' token, but we do need it to predict/define the end of the sentence **RemoveEOSCallback** Cut the *EOS* token token from the **output_x** presented to the model as we are trying to predict the next word. Therefore don't want to model to try anything after the *EOS* token. So the last token given to the model will be the token before *EOS*. This callback is modifies the second element of our learn.xb, (which is the *copied* yb) But this should also ignore padding, as otherwise we'll be just cutting the last padding token and not the EOS ``` class RemoveEOSCallback(Callback): ''' Shift the target presented to the model during training to remove the "eos" token as we don't want the model to learn to translate EOS. When it sees EOS. In practice we actually mask the EOS token as due to batching the last token will often be a <pad> token, not EOS ''' def __init__(self, eos_idx): self.eos_idx=eos_idx def begin_batch(self): eos_mask=(self.learn.xb[1]!=self.eos_idx) sz=torch.tensor(self.learn.xb[1].size()) sz[1]=sz[1]-1 self.learn.xb = (self.learn.xb[0], self.learn.xb[1][eos_mask].view((sz[0],sz[1]))) ``` **LossTargetShiftCallback:** Shift the target shown to the loss to exclude the "eos" token, as translating "bos" is not part of our language translation objective ``` class LossTargetShiftCallback(Callback): ''' Shift the target shown to the loss to exclude the "bos" token as the first token we want predicted should be an actual word, not the "bos" token (as we have already given the model "bos" ) ''' def __init__(self): pass def after_pred(self): self.learn.yb = (self.learn.yb[0][:,1:],) ``` ### Model Transformer size from Annotated Transformer: N=6, d_model=512, d_ff=2048, h=8 ``` pad_idx=1 assert dls.vocab[1][pad_idx] == 'xxpad' n_x_vocab, n_y_vocab = len(dls.vocab[0]), len(dls.vocab[1]) d_model=512 n_heads=8 #12 d_inner=2048 #1024 #model = Transformer(n_x_vocab, n_y_vocab, d_model=d_model, n_heads=n_heads, pad_idx=pad_idx) model=pt_Transformer(src_vcbsz=n_x_vocab, trg_vcbsz=n_y_vocab, d_model=d_model, d_inner=d_inner) model ``` Kaiming_Normal works terrribly, at least if you apply it to everything except LayerNorm... DistilBERT works ok Could try xavier: ``` def initialize_weights(m): if hasattr(m, 'weight') and m.weight.dim() > 1: nn.init.xavier_uniform_(m.weight.data) model.apply(initialize_weights); ``` **DistilBERT initialisation** ``` # DistilERT HF init weights https://github.com/huggingface/transformers/blob/31e67dd19f1b3fe2bc9a13f86d814f3f7bba48e4/src/transformers/modeling_distilbert.py def distil_apply_leaf(m, f): "Apply `f` to children of `m`." c = m.children() if isinstance(m, nn.Module): f(m) for l in c: apply_leaf(l,f) def _distilbert_init_weights(module): """ Initialize the weights. """ if isinstance(module, nn.Embedding): if module.weight.requires_grad: module.weight.data.normal_(mean=0.0, std=0.02) #std=self.config.initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=0.02) #self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() distil_apply_leaf(model, _distilbert_init_weights) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') ``` ### Learner ``` cbs = [CombineInputOutputCallback, RemoveEOSCallback(eos_idx=3), LossTargetShiftCallback] pad_idx=1 assert dls.vocab[1][pad_idx] == 'xxpad' loss_func = CrossEntropyLossFlat(ignore_index=pad_idx) learn = Learner(dls, model, metrics=[accuracy, Perplexity(), CorpusBLEUMetric(vocab_sz=n_y_vocab)], cbs=cbs, loss_func=loss_func) #learn.load('paracrawl_en_ga_5e_5e-4') ``` # Training ``` learn.lr_find() learn.fit_one_cycle(5, 5e-4, div=5) learn.recorder.plot_loss() learn.save('paracrawl_en_ga_5e_5e-4_v0.2_exp1') ``` ## 5e results ``` generate(learn.model, "hello, how are you?", dls.vocab[1]) generate(learn.model, "Can you tell we where the bus station is please?", dls.vocab[1]) generate(learn.model, "Yesterday it rained, but tomorrow will be very sunny", dls.vocab[1]) generate(learn.model, "I had a great day, my translator is working", dls.vocab[1]) generate(learn.model, "So this is a story all about how my lift got flip turned \ upside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\ of belair", dls.vocab[1]) generate(learn.model, "dog", dls.vocab[1]) generate(learn.model, "cat", dls.vocab[1]) generate(learn.model, "tree", dls.vocab[1]) generate(learn.model, "building", dls.vocab[1]) generate(learn.model, "city", dls.vocab[1]) generate(learn.model, "woman", dls.vocab[1]) generate(learn.model, "man", dls.vocab[1]) generate(learn.model, "chocolate", dls.vocab[1]) generate(learn.model, "spaceship", dls.vocab[1]) ``` ## v0.1 - 5e Run ``` learn.fit_one_cycle(5, 5e-4, div=5) learn.save('paracrawl_en_ga_5e_5e-4') learn.export(fname='paracrawl_en_ga_5e_5e-4_learner.pkl') ``` ## Generation ``` def generate(model, sentence, vocab): #model = torch.load('output/transformer.pth') # lang_model = spacy.load('en') # with open('data/processed/en/freq_list.pkl', 'rb') as f: # en_freq_list = pickle.load(f) # with open('data/processed/fr/freq_list.pkl', 'rb') as f: # fr_freq_list = pickle.load(f) #sentence = input('Please enter your english sentence: ') #sentence = tokenize(sentence, en_freq_list, lang_model) model=model.eval() sentence=learn.dls.tokenizer[0][1].encodes(sentence) sentence=learn.dls.numericalize[0].encodes(sentence) translated_sentence = [2] # xxbos #translated_sentence = [fr_freq_list['[SOS]']] i = 0 while int(translated_sentence[-1]) != 3 and i < 75: # xxeos #while int(translated_sentence[-1]) != fr_freq_list['[EOS]'] and i < 15: #output = forward_model(model, sentence, translated_sentence).to('cuda') output = forward_model(model, sentence, translated_sentence).cuda() values, indices = torch.topk(output, 5) translated_sentence.append(int(indices[-1][0])) i+=1 detok_translated_sentence=detokenize(translated_sentence, vocab) print(' '.join(detok_translated_sentence)) def forward_model(model, src, tgt): src = torch.as_tensor(src).unsqueeze(0).long().cuda() tgt = torch.as_tensor(tgt).unsqueeze(0).cuda() tgt_mask = gen_nopeek_mask(tgt.shape[1]).cuda() output = model.forward(src, tgt, tgt_mask=tgt_mask, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None) #return output.squeeze(0).to('cpu') return output.squeeze(0).detach() # def tokenize(sentence, freq_list, lang_model): # punctuation = ['(', ')', ':', '"', ' '] # sentence = sentence.lower() # sentence = [tok.text for tok in lang_model.tokenizer(sentence) if tok.text not in punctuation] # return [freq_list[word] if word in freq_list else freq_list['[OOV]'] for word in sentence] def detokenize(sentence, vocab): #freq_list = {v: k for k, v in freq_list.items()} return [vocab[token] for token in sentence] #return [freq_list[token] for token in sentence] # def detokenize(sentence, freq_list): # freq_list = {v: k for k, v in freq_list.items()} # return [freq_list[token] for token in sentence] def gen_nopeek_mask(length): mask = rearrange(torch.triu(torch.ones(length, length)) == 1, 'h w -> w h') mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask ``` ## 5e results ``` generate(learn.model, "hello, how are you?", dls.vocab[1]) generate(learn.model, "Can you tell we where the bus station is please?", dls.vocab[1]) generate(learn.model, "Yesterday it rained, but tomorrow will be very sunny", dls.vocab[1]) generate(learn.model, "I had a great day, my translator is working", dls.vocab[1]) generate(learn.model, "So this is a story all about how my lift got flip turned \ upside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\ of belair", dls.vocab[1]) generate(learn.model, "dog", dls.vocab[1]) generate(learn.model, "cat", dls.vocab[1]) generate(learn.model, "tree", dls.vocab[1]) generate(learn.model, "building", dls.vocab[1]) generate(learn.model, "city", dls.vocab[1]) generate(learn.model, "woman", dls.vocab[1]) generate(learn.model, "man", dls.vocab[1]) generate(learn.model, "chocolate", dls.vocab[1]) generate(learn.model, "spaceship", dls.vocab[1]) ``` ## 20e Run ``` # 20e, added shuffle to sorteddl, PT Transformer, distilbert init, Adam, distilbert init # CONCLUSION: learn.fit_one_cycle(20, 5e-4, div=5) learn.save('paracrawl_en_ga_20e_5e-4') learn.export(fname='paracrawl_en_ga_20e_5e-4_learner.pkl') ``` ## Generation ``` def generate(model, sentence, vocab): #model = torch.load('output/transformer.pth') # lang_model = spacy.load('en') # with open('data/processed/en/freq_list.pkl', 'rb') as f: # en_freq_list = pickle.load(f) # with open('data/processed/fr/freq_list.pkl', 'rb') as f: # fr_freq_list = pickle.load(f) #sentence = input('Please enter your english sentence: ') #sentence = tokenize(sentence, en_freq_list, lang_model) model=model.eval() sentence=learn.dls.tokenizer[0][1].encodes(sentence) sentence=learn.dls.numericalize[0].encodes(sentence) translated_sentence = [2] # xxbos #translated_sentence = [fr_freq_list['[SOS]']] i = 0 while int(translated_sentence[-1]) != 3 and i < 75: # xxeos #while int(translated_sentence[-1]) != fr_freq_list['[EOS]'] and i < 15: #output = forward_model(model, sentence, translated_sentence).to('cuda') output = forward_model(model, sentence, translated_sentence).cuda() values, indices = torch.topk(output, 5) translated_sentence.append(int(indices[-1][0])) i+=1 detok_translated_sentence=detokenize(translated_sentence, vocab) print(' '.join(detok_translated_sentence)) def forward_model(model, src, tgt): src = torch.as_tensor(src).unsqueeze(0).long().cuda() tgt = torch.as_tensor(tgt).unsqueeze(0).cuda() tgt_mask = gen_nopeek_mask(tgt.shape[1]).cuda() output = model.forward(src, tgt, tgt_mask=tgt_mask, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None) #return output.squeeze(0).to('cpu') return output.squeeze(0).detach() # def tokenize(sentence, freq_list, lang_model): # punctuation = ['(', ')', ':', '"', ' '] # sentence = sentence.lower() # sentence = [tok.text for tok in lang_model.tokenizer(sentence) if tok.text not in punctuation] # return [freq_list[word] if word in freq_list else freq_list['[OOV]'] for word in sentence] def detokenize(sentence, vocab): #freq_list = {v: k for k, v in freq_list.items()} return [vocab[token] for token in sentence] #return [freq_list[token] for token in sentence] # def detokenize(sentence, freq_list): # freq_list = {v: k for k, v in freq_list.items()} # return [freq_list[token] for token in sentence] def gen_nopeek_mask(length): mask = rearrange(torch.triu(torch.ones(length, length)) == 1, 'h w -> w h') mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask ``` ## 20e results ``` generate(learn.model, "hello, how are you?", dls.vocab[1]) generate(learn.model, "Can you tell we where the bus station is please?", dls.vocab[1]) generate(learn.model, "Yesterday it rained, but tomorrow will be very sunny", dls.vocab[1]) generate(learn.model, "I had a great day, my translator is working", dls.vocab[1]) generate(learn.model, "So this is a story all about how my lift got flip turned \ upside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\ of belair", dls.vocab[1]) generate(learn.model, "dog", dls.vocab[1]) generate(learn.model, "cat", dls.vocab[1]) generate(learn.model, "tree", dls.vocab[1]) generate(learn.model, "building", dls.vocab[1]) generate(learn.model, "city", dls.vocab[1]) generate(learn.model, "woman", dls.vocab[1]) generate(learn.model, "man", dls.vocab[1]) generate(learn.model, "chocolate", dls.vocab[1]) generate(learn.model, "spaceship", dls.vocab[1]) ``` ## Alternative generation ``` # https://forums.fast.ai/t/fastai-v2-text/53529/334 from fastai2.text.all import * defaults.device = torch.device('cpu') path = Path('.') learner = load_learner("./export.pkl") f = open("/tmp/test.txt", "r") test_file_contents = f.read() _, _, losses = learner.predict(test_file_contents) cats = [learner.dls.categorize.decode(i) for i in range(len(losses))] predictions = sorted( zip(cats, map(float, losses)), key=lambda p: p[1], reverse=True ) print(predictions) # OR items = pd.read_csv("/tmp/test.txt", sep = '\t') test_dl = learner.dls.test_dl(items.values) learner.get_preds(dl=test_dl, with_decoded=False) ```
github_jupyter
# Met Office UKV high-resolution atmosphere model data :::{eval-rst} :opticon:`tag` :badge:`Urban,badge-primary` :badge:`Sensors,badge-secondary` ::: ## Context ### Purpose To load, plot, regrid and extract an urban region from the UKV gridded model data using the [Iris package](https://scitools-iris.readthedocs.io/en/stable/). ### Sensor description Met Office UKV model data is fairly high resolution (approximately 1 km horizontal) and available over the whole of the UK for a variety of atmospheric variables at surface and pressure levels. A selection of variables has been made openly available as part of the Met Office contribution to the COVID 19 modelling effort. A selection of variables at hourly and daily frequency in NetCDF format can be obtained from [this landing page](https://metdatasa.blob.core.windows.net/covid19-response-non-commercial/README.html). This notebook uses a single sample data file for 1.5 m temperature included with the notebook. ### Highlights * Data for the whole UK is loaded and plotted using Iris * Data is regridded to a geographic projection * A region over London is extracted ### Contributions #### Notebook * Samantha V. Adams (author), Met Office Informatics Lab, [@svadams](https://github.com/svadams) * Alejandro Coca-Castro (reviewer), The Alan Turing Institute, [@acocac](https://github.com/acocac), 01/10/21 (latest revision) #### Dataset originator/creator * Met Office Informatics Lab (creator) * Microsoft (support) * European Regional Development Fund (support) #### Dataset authors * Met Office #### Dataset documentation ```{bibliography} :style: plain :list: bullet :filter: topic % "urban-sensors-climate_ukv" ``` :::{note} Note this data should be used only for non-commercial purposes. ::: ## Install and load libraries ``` #!conda install -c conda-forge iris import os import iris import iris.analysis import iris.plot as iplt from iris.coords import DimCoord from iris.coord_systems import GeogCS from iris.cube import Cube from iris.fileformats.pp import EARTH_RADIUS import urllib.request import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` ## Set project structure ``` notebook_folder = '../sensors/urban-sensors-climate_ukv' if not os.path.exists(notebook_folder): os.makedirs(notebook_folder) ``` ## Retrieve and load a sample data file ``` filepath = 'https://metdatasa.blob.core.windows.net/covid19-response-non-commercial/metoffice_ukv_daily/t1o5m_mean/' filename = 'ukv_daily_t1o5m_mean_20150801.nc' urllib.request.urlretrieve(filepath+filename, os.path.join(notebook_folder, filename)) air_temp = iris.load_cube(os.path.join(notebook_folder, filename)) air_temp.coord('grid_latitude').guess_bounds() air_temp.coord('grid_longitude').guess_bounds() ``` ## Visualisation Here we use the Iris wrapper to matplotlib pyplot to plot the gridded data with added gridlines and coastline. ``` plt.figure(figsize=(30, 10)) iplt.pcolormesh(air_temp) plt.title("UKV Air temperature", fontsize="xx-large") cbar = plt.colorbar() cbar.set_label('Temperature (' + str(air_temp.units) + ')') ax = plt.gca() ax.coastlines(resolution="50m") ax.gridlines() ``` ## Regridding from Azimuthal equal-area projection to geographic ### Create a target cube with a lat-lon coord system for regrid It is filled with random data so we can plot it to check it looks correct. ``` latitude = DimCoord(np.linspace(48.5, 59.5, 1222), standard_name='latitude', coord_system = GeogCS(EARTH_RADIUS), units='degrees') longitude = DimCoord(np.linspace(-10.5, 2.0, 1389), standard_name='longitude', coord_system = GeogCS(EARTH_RADIUS), units='degrees') global_cube = Cube(np.random.uniform(low=0.0, high=1.0, size=(1222, 1389)), dim_coords_and_dims=[(latitude, 0), (longitude, 1)]) global_cube.coord('latitude').guess_bounds() global_cube.coord('longitude').guess_bounds() plt.figure(figsize=(30, 10)) iplt.pcolormesh(global_cube) plt.title("Target global cube", fontsize="xx-large") ax = plt.gca() ax.coastlines(resolution="50m") ax.gridlines() ``` ### Perform the regridding from source data cube to target cube ``` # Note we need to use extrapolation masking in case regridded source data is actually smaller # than the target cube extents global_air_temp = air_temp.regrid(global_cube, iris.analysis.Linear(extrapolation_mode="mask")) ``` ### Plot the regridded data to check it is correct ``` plt.figure(figsize=(30, 10)) iplt.pcolormesh(global_air_temp) plt.title("UKV Air temperature on a global grid", fontsize="xx-large") cbar = plt.colorbar() cbar.set_label('Temperature (' + str(global_air_temp.units) + ')') ax = plt.gca() ax.coastlines(resolution="50m") ax.gridlines() ``` ## Extract the London Region ### Use the Iris Intersection method and supply the region lat-lon bounds ``` min_lon = -0.52 min_lat = 51.3 max_lon = 0.3 max_lat = 51.7 air_temp_london = global_air_temp.intersection(longitude=(min_lon, max_lon), latitude=(min_lat, max_lat)) ``` ### Plot the results ``` plt.figure(figsize=(20, 5)) iplt.pcolormesh(air_temp_london) plt.title("UKV Air temperature for london", fontsize="xx-large") cbar = plt.colorbar() cbar.set_label('Temperature (' + str(air_temp_london.units) + ')') ax = plt.gca() ax.coastlines(resolution="50m") ax.gridlines() plt.show() ``` ### Save as a new NetCDF file ``` iris.save(air_temp_london, os.path.join(notebook_folder,'ukv_london_sample.nc')) ``` ## Summary This notebook has demonstrated the use of the Iris package to easily load, plot and manipulate gridded environmental NetCDF data. ## Version * Notebook: commit <mark>[74bbb54](https://github.com/acocac/environmental-ai-book/commits/master/book/urban/sensors/urban-sensors-climate_ukv.ipynb)</mark>
github_jupyter
ERROR: type should be string, got "https://www.kaggle.com/zhangyang/bldcv0708091?scriptVersionId=16901388\n\n# params\n\n```\ndbg = False\nif dbg:\n dbgsz = 500\n\nPRFX = 'CV070816' \np_o = f'../output/{PRFX}'\nSEED = 111\n\nBS = 128\nSZ = 224\nFP16 = False\n```\n\n# setup\n\n```\nimport fastai\nprint('fastai.__version__: ', fastai.__version__)\n\nimport random \nimport numpy as np\nimport torch\nimport os\n\ndef set_torch_seed(seed=SEED):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n \n if torch.cuda.is_available(): \n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) \n torch.backends.cudnn.deterministic = True \n torch.backends.cudnn.benchmark = False\n\nset_torch_seed()\n\nfrom fastai import *\nfrom fastai.vision import *\nfrom fastai.callbacks import *\n\nimport scipy as sp\nfrom sklearn.metrics import cohen_kappa_score\n\ndef quadratic_weighted_kappa(y1, y2):\n return cohen_kappa_score(y1, y2, weights='quadratic')\n```\n\n# preprocess\n\n```\nimg2grd = []\n\np = '../input/aptos2019-blindness-detection'\npp = Path(p)\ntrain = pd.read_csv(pp/'train.csv')\ntest = pd.read_csv(pp/'test.csv')\nlen_blnd = len(train)\nlen_blnd_test = len(test)\n\nimg2grd_blnd = [(f'{p}/train_images/{o[0]}.png',o[1]) for o in train.values]\n\nlen_blnd, len_blnd_test\nimg2grd += img2grd_blnd\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())\np = '../input/diabetic-retinopathy-detection'\npp = Path(p)\n\ntrain=pd.read_csv(pp/'trainLabels.csv')\nimg2grd_diab_train=[(f'../input/diabetic-retinopathy-detection/train_images/{o[0]}.jpeg',o[1]) for o in train.values]\nimg2grd += img2grd_diab_train\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())\n\n# test=pd.read_csv(pp/'retinopathy_solution.csv')\n# img2grd_diab_test=[(f'../input/diabetic-retinopathy-detection/test_images/{o[0]}.jpeg',o[1]) for o in test.values]\n# img2grd += img2grd_diab_test\n# display(len(img2grd))\n# display(Counter(o[1] for o in img2grd).most_common())\np = '../input/IDRID/B. Disease Grading'\npp = Path(p)\n\ntrain=pd.read_csv(pp/'2. Groundtruths/a. IDRiD_Disease Grading_Training Labels.csv')\nimg2grd_idrid_train=[(f'../input/IDRID/B. Disease Grading/1. Original Images/a. Training Set/{o[0]}.jpg',o[1]) for o in train.values]\nimg2grd += img2grd_idrid_train\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())\n\ntest=pd.read_csv(pp/'2. Groundtruths/b. IDRiD_Disease Grading_Testing Labels.csv')\nimg2grd_idrid_test=[(f'../input/IDRID/B. Disease Grading/1. Original Images/b. Testing Set/{o[0]}.jpg',o[1]) for o in test.values]\nimg2grd += img2grd_idrid_test\ndisplay(len(img2grd))\ndisplay(Counter(o[1] for o in img2grd).most_common())\nif not np.all([Path(o[0]).exists() for o in img2grd]): print('Some files are missing!!!')\ndf = pd.DataFrame(img2grd)\ndf.columns = ['fnm', 'target']\n\ndf.shape\nset_torch_seed()\nidx_blnd_train = np.where(df.fnm.str.contains('aptos2019-blindness-detection/train_images'))[0]\nidx_val = np.random.choice(idx_blnd_train, int(len_blnd*0.50), replace=False)\ndf['is_val']=False\ndf.loc[idx_val, 'is_val']=True\n\nif dbg:\n df=df.head(dbgsz)\n```\n\n# dataset\n\n```\n%%time\ntfms = get_transforms()\n\ndef get_data(sz, bs):\n src = (ImageList.from_df(df=df,path='./',cols='fnm') \n .split_from_df(col='is_val') \n .label_from_df(cols='target', \n label_cls=FloatList)\n )\n\n data= (src.transform(tfms, size=sz) #Data augmentation\n .databunch(bs=bs) #DataBunch\n .normalize(imagenet_stats) #Normalize \n )\n return data\n\nbs = BS \nsz = SZ\nset_torch_seed()\ndata = get_data(sz, bs)\n```\n\n# model\n\n```\n%%time\n# Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth\n\n# Making pretrained weights work without needing to find the default filename\nif not os.path.exists('/tmp/.cache/torch/checkpoints/'):\n os.makedirs('/tmp/.cache/torch/checkpoints/')\n!cp '../input/pytorch-vision-pretrained-models/resnet50-19c8e357.pth' '/tmp/.cache/torch/checkpoints/resnet50-19c8e357.pth'\nlearn = cnn_learner(data, \n base_arch = models.resnet50, \n path=p_o)\nlearn.loss = MSELossFlat\n\nif FP16: learn = learn.to_fp16()\nlearn.freeze()\nlen(learn.data.train_dl)\n%%time\nlearn.lr_find(start_lr=1e-4)\nlearn.recorder.plot(suggestion=True)\nset_torch_seed()\nlearn.fit_one_cycle(15, max_lr=5e-3, callbacks=[SaveModelCallback(learn, name='bestmodel_frozen')])\nlearn.recorder.plot_losses()\n# learn.recorder.plot_metrics()\nlearn.save('mdl-frozen')\n!nvidia-smi\nlearn.unfreeze()\n%%time\nlearn.lr_find()\nlearn.recorder.plot(suggestion=True)\nset_torch_seed()\nlearn.fit_one_cycle(10, max_lr=slice(5e-7,5e-5), callbacks=[SaveModelCallback(learn, name='bestmodel_finetune')])\nlearn.recorder.plot_losses()\n# learn.recorder.plot_metrics()\nlearn.save('mdl')\n!nvidia-smi\n```\n\n# validate and thresholding\n\n```\nlearn = learn.to_fp32()\n%%time\nset_torch_seed()\npreds_val, y_val = learn.get_preds(ds_type=DatasetType.Valid)\npreds_val = preds_val.numpy().squeeze()\ny_val= y_val.numpy()\nnp.save(f'{p_o}/preds_val.npy', preds_val)\nnp.save(f'{p_o}/y_val.npy', y_val)\n# https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/88773#latest-515044\n# We used OptimizedRounder given by hocop1. https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/76107#480970\n# put numerical value to one of bins\ndef to_bins(x, borders):\n for i in range(len(borders)):\n if x <= borders[i]:\n return i\n return len(borders)\n\nclass Hocop1OptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _loss(self, coef, X, y, idx):\n X_p = np.array([to_bins(pred, coef) for pred in X])\n ll = -quadratic_weighted_kappa(y, X_p)\n return ll\n\n def fit(self, X, y):\n coef = [1.5, 2.0, 2.5, 3.0]\n golden1 = 0.618\n golden2 = 1 - golden1\n ab_start = [(1, 2), (1.5, 2.5), (2, 3), (2.5, 3.5)]\n for it1 in range(10):\n for idx in range(4):\n # golden section search\n a, b = ab_start[idx]\n # calc losses\n coef[idx] = a\n la = self._loss(coef, X, y, idx)\n coef[idx] = b\n lb = self._loss(coef, X, y, idx)\n for it in range(20):\n # choose value\n if la > lb:\n a = b - (b - a) * golden1\n coef[idx] = a\n la = self._loss(coef, X, y, idx)\n else:\n b = b - (b - a) * golden2\n coef[idx] = b\n lb = self._loss(coef, X, y, idx)\n self.coef_ = {'x': coef}\n\n def predict(self, X, coef):\n X_p = np.array([to_bins(pred, coef) for pred in X])\n return X_p\n\n def coefficients(self):\n return self.coef_['x']\n# https://www.kaggle.com/c/petfinder-adoption-prediction/discussion/76107#480970\nclass AbhishekOptimizedRounder(object):\n def __init__(self):\n self.coef_ = 0\n\n def _kappa_loss(self, coef, X, y):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n\n ll = quadratic_weighted_kappa(y, X_p)\n return -ll\n\n def fit(self, X, y):\n loss_partial = partial(self._kappa_loss, X=X, y=y)\n initial_coef = [0.5, 1.5, 2.5, 3.5]\n self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead')\n\n def predict(self, X, coef):\n X_p = np.copy(X)\n for i, pred in enumerate(X_p):\n if pred < coef[0]:\n X_p[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n X_p[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n X_p[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n X_p[i] = 3\n else:\n X_p[i] = 4\n return X_p\n\n def coefficients(self):\n return self.coef_['x']\ndef bucket(preds_raw, coef = [0.5, 1.5, 2.5, 3.5]):\n preds = np.zeros(preds_raw.shape)\n for i, pred in enumerate(preds_raw):\n if pred < coef[0]:\n preds[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n preds[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n preds[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n preds[i] = 3\n else:\n preds[i] = 4\n return preds\noptnm2coefs = {'simple': [0.5, 1.5, 2.5, 3.5]}\n%%time\nset_torch_seed()\noptR = Hocop1OptimizedRounder()\noptR.fit(preds_val, y_val)\noptnm2coefs['hocop1'] = optR.coefficients()\n%%time\nset_torch_seed()\noptR = AbhishekOptimizedRounder()\noptR.fit(preds_val, y_val)\noptnm2coefs['abhishek'] = optR.coefficients()\noptnm2coefs\noptnm2preds_val_grd = {k: bucket(preds_val, coef) for k,coef in optnm2coefs.items()}\noptnm2qwk = {k: quadratic_weighted_kappa(y_val, preds) for k,preds in optnm2preds_val_grd.items()}\noptnm2qwk\nCounter(y_val).most_common()\npreds_val_grd = optnm2preds_val_grd['simple'].squeeze()\npreds_val_grd.mean()\nCounter(preds_val_grd).most_common()\nlist(zip(preds_val_grd, y_val))[:10]\n(preds_val_grd== y_val.squeeze()).mean()\npickle.dump(optnm2qwk, open(f'{p_o}/optnm2qwk.p', 'wb'))\npickle.dump(optnm2preds_val_grd, open(f'{p_o}/optnm2preds_val_grd.p', 'wb'))\npickle.dump(optnm2coefs, open(f'{p_o}/optnm2coefs.p', 'wb'))\n```\n\n# testing\n\n```\ndf_test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')\ndf_test.head()\nlearn.data.add_test(\n ImageList.from_df(df_test,\n '../input/aptos2019-blindness-detection',\n folder='test_images',\n suffix='.png'))\n%%time\nset_torch_seed()\npreds_tst, _ = learn.get_preds(ds_type=DatasetType.Test)\npreds_tst = preds_tst.numpy().squeeze()\nnp.save(f'{p_o}/preds_tst.npy', preds_tst)\ndef bucket(preds_raw, coef = [0.5, 1.5, 2.5, 3.5]):\n preds = np.zeros(preds_raw.shape)\n for i, pred in enumerate(preds_raw):\n if pred < coef[0]:\n preds[i] = 0\n elif pred >= coef[0] and pred < coef[1]:\n preds[i] = 1\n elif pred >= coef[1] and pred < coef[2]:\n preds[i] = 2\n elif pred >= coef[2] and pred < coef[3]:\n preds[i] = 3\n else:\n preds[i] = 4\n return preds\ncoef = optnm2coefs['simple']\npreds_tst_grd = bucket(preds_tst, coef)\npreds_tst_grd.squeeze()\nCounter(preds_tst_grd.squeeze()).most_common()\n```\n\n## submit\n\n```\nsubm = pd.read_csv(\"../input/aptos2019-blindness-detection/test.csv\")\nsubm['diagnosis'] = preds_tst_grd.squeeze().astype(int)\nsubm.head()\nsubm.diagnosis.value_counts()\nsubm.to_csv(f\"{p_o}/submission.csv\", index=False)\n```\n\n"
github_jupyter
# Deep Markov Model ## Introduction We're going to build a deep probabilistic model for sequential data: the deep markov model. The particular dataset we want to model is composed of snippets of polyphonic music. Each time slice in a sequence spans a quarter note and is represented by an 88-dimensional binary vector that encodes the notes at that time step. Since music is (obviously) temporally coherent, we need a model that can represent complex time dependencies in the observed data. It would not, for example, be appropriate to consider a model in which the notes at a particular time step are independent of the notes at previous time steps. One way to do this is to build a latent variable model in which the variability and temporal structure of the observations is controlled by the dynamics of the latent variables. One particular realization of this idea is a markov model, in which we have a chain of latent variables, with each latent variable in the chain conditioned on the previous latent variable. This is a powerful approach, but if we want to represent complex data with complex (and in this case unknown) dynamics, we would like our model to be sufficiently flexible to accommodate dynamics that are potentially highly non-linear. Thus a deep markov model: we allow for the transition probabilities governing the dynamics of the latent variables as well as the the emission probabilities that govern how the observations are generated by the latent dynamics to be parameterized by (non-linear) neural networks. The specific model we're going to implement is based on the following reference: [1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp; Rahul G. Krishnan, Uri Shalit, David Sontag Please note that while we do not assume that the reader of this tutorial has read the reference, it's definitely a good place to look for a more comprehensive discussion of the deep markov model in the context of other time series models. We've described the model, but how do we go about training it? The inference strategy we're going to use is variational inference, which requires specifying a parameterized family of distributions that can be used to approximate the posterior distribution over the latent random variables. Given the non-linearities and complex time-dependencies inherent in our model and data, we expect the exact posterior to be highly non-trivial. So we're going to need a flexible family of variational distributions if we hope to learn a good model. Happily, together PyTorch and Pyro provide all the necessary ingredients. As we will see, assembling them will be straightforward. Let's get to work. ## The Model A convenient way to describe the high-level structure of the model is with a graphical model. Here, we've rolled out the model assuming that the sequence of observations is of length three: $\{{\bf x}_1, {\bf x}_2, {\bf x}_3\}$. Mirroring the sequence of observations we also have a sequence of latent random variables: $\{{\bf z}_1, {\bf z}_2, {\bf z}_3\}$. The figure encodes the structure of the model. The corresponding joint distribution is $$p({\bf x}_{123} , {\bf z}_{123})=p({\bf x}_1|{\bf z}_1)p({\bf x}_2|{\bf z}_2)p({\bf x}_3|{\bf z}_3)p({\bf z}_1)p({\bf z}_2|{\bf z}_1)p({\bf z}_3|{\bf z}_2)$$ Conditioned on ${\bf z}_t$, each observation ${\bf x}_t$ is independent of the other observations. This can be read off from the fact that each ${\bf x}_t$ only depends on the corresponding latent ${\bf z}_t$, as indicated by the downward pointing arrows. We can also read off the markov property of the model: each latent ${\bf z}_t$, when conditioned on the previous latent ${\bf z}_{t-1}$, is independent of all previous latents $\{ {\bf z}_{t-2}, {\bf z}_{t-3}, ...\}$. This effectively says that everything one needs to know about the state of the system at time $t$ is encapsulated by the latent ${\bf z}_{t}$. We will assume that the observation likelihoods, i.e. the probability distributions $p({{\bf x}_t}|{{\bf z}_t})$ that control the observations, are given by the bernoulli distribution. This is an appropriate choice since our observations are all 0 or 1. For the probability distributions $p({\bf z}_t|{\bf z}_{t-1})$ that control the latent dynamics, we choose (conditional) gaussian distributions with diagonal covariances. This is reasonable since we assume that the latent space is continuous. The solid black squares represent non-linear functions parameterized by neural networks. This is what makes this a _deep_ markov model. Note that the black squares appear in two different places: in between pairs of latents and in between latents and observations. The non-linear function that connects the latent variables ('Trans' in Fig. 1) controls the dynamics of the latent variables. Since we allow the conditional probability distribution of ${\bf z}_{t}$ to depend on ${\bf z}_{t-1}$ in a complex way, we will be able to capture complex dynamics in our model. Similarly, the non-linear function that connects the latent variables to the observations ('Emit' in Fig. 1) controls how the observations depend on the latent dynamics. Some additional notes: - we can freely choose the dimension of the latent space to suit the problem at hand: small latent spaces for simple problems and larger latent spaces for problems with complex dynamics - note the parameter ${\bf z}_0$ in Fig. 1. as will become more apparent from the code, this is just a convenient way for us to parameterize the probability distribution $p({\bf z}_1)$ for the first time step, where there are no previous latents to condition on. ### The Gated Transition and the Emitter Without further ado, let's start writing some code. We first define the two PyTorch Modules that correspond to the black squares in Fig. 1. First the emission function: ```python class Emitter(nn.Module): """ Parameterizes the bernoulli observation likelihood p(x_t | z_t) """ def __init__(self, input_dim, z_dim, emission_dim): super().__init__() # initialize the three linear transformations used in the neural network self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim) self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim) self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim) # initialize the two non-linearities used in the neural network self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, z_t): """ Given the latent z at a particular time step t we return the vector of probabilities `ps` that parameterizes the bernoulli distribution p(x_t|z_t) """ h1 = self.relu(self.lin_z_to_hidden(z_t)) h2 = self.relu(self.lin_hidden_to_hidden(h1)) ps = self.sigmoid(self.lin_hidden_to_input(h2)) return ps ``` In the constructor we define the linear transformations that will be used in our emission function. Note that `emission_dim` is the number of hidden units in the neural network. We also define the non-linearities that we will be using. The forward call defines the computational flow of the function. We take in the latent ${\bf z}_{t}$ as input and do a sequence of transformations until we obtain a vector of length 88 that defines the emission probabilities of our bernoulli likelihood. Because of the sigmoid, each element of `ps` will be between 0 and 1 and will define a valid probability. Taken together the elements of `ps` encode which notes we expect to observe at time $t$ given the state of the system (as encoded in ${\bf z}_{t}$). Now we define the gated transition function: ```python class GatedTransition(nn.Module): """ Parameterizes the gaussian latent transition probability p(z_t | z_{t-1}) See section 5 in the reference for comparison. """ def __init__(self, z_dim, transition_dim): super().__init__() # initialize the six linear transformations used in the neural network self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim) self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim) self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim) self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim) self.lin_sig = nn.Linear(z_dim, z_dim) self.lin_z_to_loc = nn.Linear(z_dim, z_dim) # modify the default initialization of lin_z_to_loc # so that it's starts out as the identity function self.lin_z_to_loc.weight.data = torch.eye(z_dim) self.lin_z_to_loc.bias.data = torch.zeros(z_dim) # initialize the three non-linearities used in the neural network self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.softplus = nn.Softplus() def forward(self, z_t_1): """ Given the latent z_{t-1} corresponding to the time step t-1 we return the mean and scale vectors that parameterize the (diagonal) gaussian distribution p(z_t | z_{t-1}) """ # compute the gating function _gate = self.relu(self.lin_gate_z_to_hidden(z_t_1)) gate = self.sigmoid(self.lin_gate_hidden_to_z(_gate)) # compute the 'proposed mean' _proposed_mean = self.relu(self.lin_proposed_mean_z_to_hidden(z_t_1)) proposed_mean = self.lin_proposed_mean_hidden_to_z(_proposed_mean) # assemble the actual mean used to sample z_t, which mixes # a linear transformation of z_{t-1} with the proposed mean # modulated by the gating function loc = (1 - gate) * self.lin_z_to_loc(z_t_1) + gate * proposed_mean # compute the scale used to sample z_t, using the proposed # mean from above as input. the softplus ensures that scale is positive scale = self.softplus(self.lin_sig(self.relu(proposed_mean))) # return loc, scale which can be fed into Normal return loc, scale ``` This mirrors the structure of `Emitter` above, with the difference that the computational flow is a bit more complicated. This is for two reasons. First, the output of `GatedTransition` needs to define a valid (diagonal) gaussian distribution. So we need to output two parameters: the mean `loc`, and the (square root) covariance `scale`. These both need to have the same dimension as the latent space. Second, we don't want to _force_ the dynamics to be non-linear. Thus our mean `loc` is a sum of two terms, only one of which depends non-linearily on the input `z_t_1`. This way we can support both linear and non-linear dynamics (or indeed have the dynamics of part of the latent space be linear, while the remainder of the dynamics is non-linear). ### Model - a Pyro Stochastic Function So far everything we've done is pure PyTorch. To finish translating our model into code we need to bring Pyro into the picture. Basically we need to implement the stochastic nodes (i.e. the circles) in Fig. 1. To do this we introduce a callable `model()` that contains the Pyro primitive `pyro.sample`. The `sample` statements will be used to specify the joint distribution over the latents ${\bf z}_{1:T}$. Additionally, the `obs` argument can be used with the `sample` statements to specify how the observations ${\bf x}_{1:T}$ depend on the latents. Before we look at the complete code for `model()`, let's look at a stripped down version that contains the main logic: ```python def model(...): z_prev = self.z_0 # sample the latents z and observed x's one time step at a time for t in range(1, T_max + 1): # the next two lines of code sample z_t ~ p(z_t | z_{t-1}). # first compute the parameters of the diagonal gaussian # distribution p(z_t | z_{t-1}) z_loc, z_scale = self.trans(z_prev) # then sample z_t according to dist.Normal(z_loc, z_scale) z_t = pyro.sample("z_%d" % t, dist.Normal(z_loc, z_scale)) # compute the probabilities that parameterize the bernoulli likelihood emission_probs_t = self.emitter(z_t) # the next statement instructs pyro to observe x_t according to the # bernoulli distribution p(x_t|z_t) pyro.sample("obs_x_%d" % t, dist.Bernoulli(emission_probs_t), obs=mini_batch[:, t - 1, :]) # the latent sampled at this time step will be conditioned upon # in the next time step so keep track of it z_prev = z_t ``` The first thing we need to do is sample ${\bf z}_1$. Once we've sampled ${\bf z}_1$, we can sample ${\bf z}_2 \sim p({\bf z}_2|{\bf z}_1)$ and so on. This is the logic implemented in the `for` loop. The parameters `z_loc` and `z_scale` that define the probability distributions $p({\bf z}_t|{\bf z}_{t-1})$ are computed using `self.trans`, which is just an instance of the `GatedTransition` module defined above. For the first time step at $t=1$ we condition on `self.z_0`, which is a (trainable) `Parameter`, while for subsequent time steps we condition on the previously drawn latent. Note that each random variable `z_t` is assigned a unique name by the user. Once we've sampled ${\bf z}_t$ at a given time step, we need to observe the datapoint ${\bf x}_t$. So we pass `z_t` through `self.emitter`, an instance of the `Emitter` module defined above to obtain `emission_probs_t`. Together with the argument `dist.Bernoulli()` in the `sample` statement, these probabilities fully specify the observation likelihood. Finally, we also specify the slice of observed data ${\bf x}_t$: `mini_batch[:, t - 1, :]` using the `obs` argument to `sample`. This fully specifies our model and encapsulates it in a callable that can be passed to Pyro. Before we move on let's look at the full version of `model()` and go through some of the details we glossed over in our first pass. ```python def model(self, mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor=1.0): # this is the number of time steps we need to process in the mini-batch T_max = mini_batch.size(1) # register all PyTorch (sub)modules with pyro # this needs to happen in both the model and guide pyro.module("dmm", self) # set z_prev = z_0 to setup the recursive conditioning in p(z_t | z_{t-1}) z_prev = self.z_0.expand(mini_batch.size(0), self.z_0.size(0)) # we enclose all the sample statements in the model in a plate. # this marks that each datapoint is conditionally independent of the others with pyro.plate("z_minibatch", len(mini_batch)): # sample the latents z and observed x's one time step at a time for t in range(1, T_max + 1): # the next chunk of code samples z_t ~ p(z_t | z_{t-1}) # note that (both here and elsewhere) we use poutine.scale to take care # of KL annealing. we use the mask() method to deal with raggedness # in the observed data (i.e. different sequences in the mini-batch # have different lengths) # first compute the parameters of the diagonal gaussian # distribution p(z_t | z_{t-1}) z_loc, z_scale = self.trans(z_prev) # then sample z_t according to dist.Normal(z_loc, z_scale). # note that we use the reshape method so that the univariate # Normal distribution is treated as a multivariate Normal # distribution with a diagonal covariance. with poutine.scale(None, annealing_factor): z_t = pyro.sample("z_%d" % t, dist.Normal(z_loc, z_scale) .mask(mini_batch_mask[:, t - 1:t]) .to_event(1)) # compute the probabilities that parameterize the bernoulli likelihood emission_probs_t = self.emitter(z_t) # the next statement instructs pyro to observe x_t according to the # bernoulli distribution p(x_t|z_t) pyro.sample("obs_x_%d" % t, dist.Bernoulli(emission_probs_t) .mask(mini_batch_mask[:, t - 1:t]) .to_event(1), obs=mini_batch[:, t - 1, :]) # the latent sampled at this time step will be conditioned upon # in the next time step so keep track of it z_prev = z_t ``` The first thing to note is that `model()` takes a number of arguments. For now let's just take a look at `mini_batch` and `mini_batch_mask`. `mini_batch` is a three dimensional tensor, with the first dimension being the batch dimension, the second dimension being the temporal dimension, and the final dimension being the features (88-dimensional in our case). To speed up the code, whenever we run `model` we're going to process an entire mini-batch of sequences (i.e. we're going to take advantage of vectorization). This is sensible because our model is implicitly defined over a single observed sequence. The probability of a set of sequences is just given by the products of the individual sequence probabilities. In other words, given the parameters of the model the sequences are conditionally independent. This vectorization introduces some complications because sequences can be of different lengths. This is where `mini_batch_mask` comes in. `mini_batch_mask` is a two dimensional 0/1 mask of dimensions `mini_batch_size` x `T_max`, where `T_max` is the maximum length of any sequence in the mini-batch. This encodes which parts of `mini_batch` are valid observations. So the first thing we do is grab `T_max`: we have to unroll our model for at least this many time steps. Note that this will result in a lot of 'wasted' computation, since some of the sequences will be shorter than `T_max`, but this is a small price to pay for the big speed-ups that come with vectorization. We just need to make sure that none of the 'wasted' computations 'pollute' our model computation. We accomplish this by passing the mask appropriate to time step $t$ to the `mask` method (which acts on the distribution that needs masking). Finally, the line `pyro.module("dmm", self)` is equivalent to a bunch of `pyro.param` statements for each parameter in the model. This lets Pyro know which parameters are part of the model. Just like for the `sample` statement, we give the module a unique name. This name will be incorporated into the name of the `Parameters` in the model. We leave a discussion of the KL annealing factor for later. ## Inference At this point we've fully specified our model. The next step is to set ourselves up for inference. As mentioned in the introduction, our inference strategy is going to be variational inference (see [SVI Part I](svi_part_i.ipynb) for an introduction). So our next task is to build a family of variational distributions appropriate to doing inference in a deep markov model. However, at this point it's worth emphasizing that nothing about the way we've implemented `model()` ties us to variational inference. In principle we could use _any_ inference strategy available in Pyro. For example, in this particular context one could imagine using some variant of Sequential Monte Carlo (although this is not currently supported in Pyro). ### Guide The purpose of the guide (i.e. the variational distribution) is to provide a (parameterized) approximation to the exact posterior $p({\bf z}_{1:T}|{\bf x}_{1:T})$. Actually, there's an implicit assumption here which we should make explicit, so let's take a step back. Suppose our dataset $\mathcal{D}$ consists of $N$ sequences $\{ {\bf x}_{1:T_1}^1, {\bf x}_{1:T_2}^2, ..., {\bf x}_{1:T_N}^N \}$. Then the posterior we're actually interested in is given by $p({\bf z}_{1:T_1}^1, {\bf z}_{1:T_2}^2, ..., {\bf z}_{1:T_N}^N | \mathcal{D})$, i.e. we want to infer the latents for _all_ $N$ sequences. Even for small $N$ this is a very high-dimensional distribution that will require a very large number of parameters to specify. In particular if we were to directly parameterize the posterior in this form, the number of parameters required would grow (at least) linearly with $N$. One way to avoid this nasty growth with the size of the dataset is *amortization* (see the analogous discussion in [SVI Part II](svi_part_ii.ipynb)). #### Aside: Amortization This works as follows. Instead of introducing variational parameters for each sequence in our dataset, we're going to learn a single parametric function $f({\bf x}_{1:T})$ and work with a variational distribution that has the form $\prod_{n=1}^N q({\bf z}_{1:T_n}^n | f({\bf x}_{1:T_n}^n))$. The function $f(\cdot)$&mdash;which basically maps a given observed sequence to a set of variational parameters tailored to that sequence&mdash;will need to be sufficiently rich to capture the posterior accurately, but now we can handle large datasets without having to introduce an obscene number of variational parameters. So our task is to construct the function $f(\cdot)$. Since in our case we need to support variable-length sequences, it's only natural that $f(\cdot)$ have a RNN in the loop. Before we look at the various component parts that make up our $f(\cdot)$ in detail, let's look at a computational graph that encodes the basic structure: <p> At the bottom of the figure we have our sequence of three observations. These observations will be consumed by a RNN that reads the observations from right to left and outputs three hidden states $\{ {\bf h}_1, {\bf h}_2,{\bf h}_3\}$. Note that this computation is done _before_ we sample any latent variables. Next, each of the hidden states will be fed into a `Combiner` module whose job is to output the mean and covariance of the the conditional distribution $q({\bf z}_t | {\bf z}_{t-1}, {\bf x}_{t:T})$, which we take to be given by a diagonal gaussian distribution. (Just like in the model, the conditional structure of ${\bf z}_{1:T}$ in the guide is such that we sample ${\bf z}_t$ forward in time.) In addition to the RNN hidden state, the `Combiner` also takes the latent random variable from the previous time step as input, except for $t=1$, where it instead takes the trainable (variational) parameter ${\bf z}_0^{\rm{q}}$. #### Aside: Guide Structure Why do we setup the RNN to consume the observations from right to left? Why not left to right? With this choice our conditional distribution $q({\bf z}_t |...)$ depends on two things: - the latent ${\bf z}_{t-1}$ from the previous time step; and - the observations ${\bf x}_{t:T}$, i.e. the current observation together with all future observations We are free to make other choices; all that is required is that that the guide is a properly normalized distribution that plays nice with autograd. This particular choice is motivated by the dependency structure of the true posterior: see reference [1] for a detailed discussion. In brief, while we could, for example, condition on the entire sequence of observations, because of the markov structure of the model everything that we need to know about the previous observations ${\bf x}_{1:t-1}$ is encapsulated by ${\bf z}_{t-1}$. We could condition on more things, but there's no need; and doing so will probably tend to dilute the learning signal. So running the RNN from right to left is the most natural choice for this particular model. Let's look at the component parts in detail. First, the `Combiner` module: ```python class Combiner(nn.Module): """ Parameterizes q(z_t | z_{t-1}, x_{t:T}), which is the basic building block of the guide (i.e. the variational distribution). The dependence on x_{t:T} is through the hidden state of the RNN (see the pytorch module `rnn` below) """ def __init__(self, z_dim, rnn_dim): super().__init__() # initialize the three linear transformations used in the neural network self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim) self.lin_hidden_to_loc = nn.Linear(rnn_dim, z_dim) self.lin_hidden_to_scale = nn.Linear(rnn_dim, z_dim) # initialize the two non-linearities used in the neural network self.tanh = nn.Tanh() self.softplus = nn.Softplus() def forward(self, z_t_1, h_rnn): """ Given the latent z at at a particular time step t-1 as well as the hidden state of the RNN h(x_{t:T}) we return the mean and scale vectors that parameterize the (diagonal) gaussian distribution q(z_t | z_{t-1}, x_{t:T}) """ # combine the rnn hidden state with a transformed version of z_t_1 h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn) # use the combined hidden state to compute the mean used to sample z_t loc = self.lin_hidden_to_loc(h_combined) # use the combined hidden state to compute the scale used to sample z_t scale = self.softplus(self.lin_hidden_to_scale(h_combined)) # return loc, scale which can be fed into Normal return loc, scale ``` This module has the same general structure as `Emitter` and `GatedTransition` in the model. The only thing of note is that because the `Combiner` needs to consume two inputs at each time step, it transforms the inputs into a single combined hidden state `h_combined` before it computes the outputs. Apart from the RNN, we now have all the ingredients we need to construct our guide distribution. Happily, PyTorch has great built-in RNN modules, so we don't have much work to do here. We'll see where we instantiate the RNN later. Let's instead jump right into the definition of the stochastic function `guide()`. ```python def guide(self, mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor=1.0): # this is the number of time steps we need to process in the mini-batch T_max = mini_batch.size(1) # register all PyTorch (sub)modules with pyro pyro.module("dmm", self) # if on gpu we need the fully broadcast view of the rnn initial state # to be in contiguous gpu memory h_0_contig = self.h_0.expand(1, mini_batch.size(0), self.rnn.hidden_size).contiguous() # push the observed x's through the rnn; # rnn_output contains the hidden state at each time step rnn_output, _ = self.rnn(mini_batch_reversed, h_0_contig) # reverse the time-ordering in the hidden state and un-pack it rnn_output = poly.pad_and_reverse(rnn_output, mini_batch_seq_lengths) # set z_prev = z_q_0 to setup the recursive conditioning in q(z_t |...) z_prev = self.z_q_0.expand(mini_batch.size(0), self.z_q_0.size(0)) # we enclose all the sample statements in the guide in a plate. # this marks that each datapoint is conditionally independent of the others. with pyro.plate("z_minibatch", len(mini_batch)): # sample the latents z one time step at a time for t in range(1, T_max + 1): # the next two lines assemble the distribution q(z_t | z_{t-1}, x_{t:T}) z_loc, z_scale = self.combiner(z_prev, rnn_output[:, t - 1, :]) z_dist = dist.Normal(z_loc, z_scale) # sample z_t from the distribution z_dist with pyro.poutine.scale(None, annealing_factor): z_t = pyro.sample("z_%d" % t, z_dist.mask(mini_batch_mask[:, t - 1:t]) .to_event(1)) # the latent sampled at this time step will be conditioned # upon in the next time step so keep track of it z_prev = z_t ``` The high-level structure of `guide()` is very similar to `model()`. First note that the model and guide take the same arguments: this is a general requirement for model/guide pairs in Pyro. As in the model, there's a call to `pyro.module` that registers all the parameters with Pyro. Also, the `for` loop has the same structure as the one in `model()`, with the difference that the guide only needs to sample latents (there are no `sample` statements with the `obs` keyword). Finally, note that the names of the latent variables in the guide exactly match those in the model. This is how Pyro knows to correctly align random variables. The RNN logic should be familar to PyTorch users, but let's go through it quickly. First we prepare the initial state of the RNN, `h_0`. Then we invoke the RNN via its forward call; the resulting tensor `rnn_output` contains the hidden states for the entire mini-batch. Note that because we want the RNN to consume the observations from right to left, the input to the RNN is `mini_batch_reversed`, which is a copy of `mini_batch` with all the sequences running in _reverse_ temporal order. Furthermore, `mini_batch_reversed` has been wrapped in a PyTorch `rnn.pack_padded_sequence` so that the RNN can deal with variable-length sequences. Since we do our sampling in latent space in normal temporal order, we use the helper function `pad_and_reverse` to reverse the hidden state sequences in `rnn_output`, so that we can feed the `Combiner` RNN hidden states that are correctly aligned and ordered. This helper function also unpacks the `rnn_output` so that it is no longer in the form of a PyTorch `rnn.pack_padded_sequence`. ## Packaging the Model and Guide as a PyTorch Module At this juncture, we're ready to proceed to inference. But before we do so let's quickly go over how we packaged the model and guide as a single PyTorch Module. This is generally good practice, especially for larger models. ```python class DMM(nn.Module): """ This PyTorch Module encapsulates the model as well as the variational distribution (the guide) for the Deep Markov Model """ def __init__(self, input_dim=88, z_dim=100, emission_dim=100, transition_dim=200, rnn_dim=600, rnn_dropout_rate=0.0, num_iafs=0, iaf_dim=50, use_cuda=False): super().__init__() # instantiate pytorch modules used in the model and guide below self.emitter = Emitter(input_dim, z_dim, emission_dim) self.trans = GatedTransition(z_dim, transition_dim) self.combiner = Combiner(z_dim, rnn_dim) self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, nonlinearity='relu', batch_first=True, bidirectional=False, num_layers=1, dropout=rnn_dropout_rate) # define a (trainable) parameters z_0 and z_q_0 that help define # the probability distributions p(z_1) and q(z_1) # (since for t = 1 there are no previous latents to condition on) self.z_0 = nn.Parameter(torch.zeros(z_dim)) self.z_q_0 = nn.Parameter(torch.zeros(z_dim)) # define a (trainable) parameter for the initial hidden state of the rnn self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim)) self.use_cuda = use_cuda # if on gpu cuda-ize all pytorch (sub)modules if use_cuda: self.cuda() # the model p(x_{1:T} | z_{1:T}) p(z_{1:T}) def model(...): # ... as above ... # the guide q(z_{1:T} | x_{1:T}) (i.e. the variational distribution) def guide(...): # ... as above ... ``` Since we've already gone over `model` and `guide`, our focus here is on the constructor. First we instantiate the four PyTorch modules that we use in our model and guide. On the model-side: `Emitter` and `GatedTransition`. On the guide-side: `Combiner` and the RNN. Next we define PyTorch `Parameter`s for the initial state of the RNN as well as `z_0` and `z_q_0`, which are fed into `self.trans` and `self.combiner`, respectively, in lieu of the non-existent random variable $\bf z_0$. The important point to make here is that all of these `Module`s and `Parameter`s are attributes of `DMM` (which itself inherits from `nn.Module`). This has the consequence they are all automatically registered as belonging to the module. So, for example, when we call `parameters()` on an instance of `DMM`, PyTorch will know to return all the relevant parameters. It also means that when we invoke `pyro.module("dmm", self)` in `model()` and `guide()`, all the parameters of both the model and guide will be registered with Pyro. Finally, it means that if we're running on a GPU, the call to `cuda()` will move all the parameters into GPU memory. ## Stochastic Variational Inference With our model and guide at hand, we're finally ready to do inference. Before we look at the full logic that is involved in a complete experimental script, let's first see how to take a single gradient step. First we instantiate an instance of `DMM` and setup an optimizer. ```python # instantiate the dmm dmm = DMM(input_dim, z_dim, emission_dim, transition_dim, rnn_dim, args.rnn_dropout_rate, args.num_iafs, args.iaf_dim, args.cuda) # setup optimizer adam_params = {"lr": args.learning_rate, "betas": (args.beta1, args.beta2), "clip_norm": args.clip_norm, "lrd": args.lr_decay, "weight_decay": args.weight_decay} optimizer = ClippedAdam(adam_params) ``` Here we're using an implementation of the Adam optimizer that includes gradient clipping. This mitigates some of the problems that can occur when training recurrent neural networks (e.g. vanishing/exploding gradients). Next we setup the inference algorithm. ```python # setup inference algorithm svi = SVI(dmm.model, dmm.guide, optimizer, Trace_ELBO()) ``` The inference algorithm `SVI` uses a stochastic gradient estimator to take gradient steps on an objective function, which in this case is given by the ELBO (the evidence lower bound). As the name indicates, the ELBO is a lower bound to the log evidence: $\log p(\mathcal{D})$. As we take gradient steps that maximize the ELBO, we move our guide $q(\cdot)$ closer to the exact posterior. The argument `Trace_ELBO()` constructs a version of the gradient estimator that doesn't need access to the dependency structure of the model and guide. Since all the latent variables in our model are reparameterizable, this is the appropriate gradient estimator for our use case. (It's also the default option.) Assuming we've prepared the various arguments of `dmm.model` and `dmm.guide`, taking a gradient step is accomplished by calling ```python svi.step(mini_batch, ...) ``` That's all there is to it! Well, not quite. This will be the main step in our inference algorithm, but we still need to implement a complete training loop with preparation of mini-batches, evaluation, and so on. This sort of logic will be familiar to any deep learner but let's see how it looks in PyTorch/Pyro. ## The Black Magic of Optimization Actually, before we get to the guts of training, let's take a moment and think a bit about the optimization problem we've setup. We've traded Bayesian inference in a non-linear model with a high-dimensional latent space&mdash;a hard problem&mdash;for a particular optimization problem. Let's not kid ourselves, this optimization problem is pretty hard too. Why? Let's go through some of the reasons: - the space of parameters we're optimizing over is very high-dimensional (it includes all the weights in all the neural networks we've defined). - our objective function (the ELBO) cannot be computed analytically. so our parameter updates will be following noisy Monte Carlo gradient estimates - data-subsampling serves as an additional source of stochasticity: even if we wanted to, we couldn't in general take gradient steps on the ELBO defined over the whole dataset (actually in our particular case the dataset isn't so large, but let's ignore that). - given all the neural networks and non-linearities we have in the loop, our (stochastic) loss surface is highly non-trivial The upshot is that if we're going to find reasonable (local) optima of the ELBO, we better take some care in deciding how to do optimization. This isn't the time or place to discuss all the different strategies that one might adopt, but it's important to emphasize how decisive a good or bad choice in learning hyperparameters (the learning rate, the mini-batch size, etc.) can be. Before we move on, let's discuss one particular optimization strategy that we're making use of in greater detail: KL annealing. In our case the ELBO is the sum of two terms: an expected log likelihood term (which measures model fit) and a sum of KL divergence terms (which serve to regularize the approximate posterior): $\rm{ELBO} = \mathbb{E}_{q({\bf z}_{1:T})}[\log p({\bf x}_{1:T}|{\bf z}_{1:T})] - \mathbb{E}_{q({\bf z}_{1:T})}[ \log q({\bf z}_{1:T}) - \log p({\bf z}_{1:T})]$ This latter term can be a quite strong regularizer, and in early stages of training it has a tendency to favor regions of the loss surface that contain lots of bad local optima. One strategy to avoid these bad local optima, which was also adopted in reference [1], is to anneal the KL divergence terms by multiplying them by a scalar `annealing_factor` that ranges between zero and one: $\mathbb{E}_{q({\bf z}_{1:T})}[\log p({\bf x}_{1:T}|{\bf z}_{1:T})] - \rm{annealing\_factor} \times \mathbb{E}_{q({\bf z}_{1:T})}[ \log q({\bf z}_{1:T}) - \log p({\bf z}_{1:T})]$ The idea is that during the course of training the `annealing_factor` rises slowly from its initial value at/near zero to its final value at 1.0. The annealing schedule is arbitrary; below we will use a simple linear schedule. In terms of code, to scale the log likelihoods by the appropriate annealing factor we enclose each of the latent sample statements in the model and guide with a `pyro.poutine.scale` context. Finally, we should mention that the main difference between the DMM implementation described here and the one used in reference [1] is that they take advantage of the analytic formula for the KL divergence between two gaussian distributions (whereas we rely on Monte Carlo estimates). This leads to lower variance gradient estimates of the ELBO, which makes training a bit easier. We can still train the model without making this analytic substitution, but training probably takes somewhat longer because of the higher variance. Support for analytic KL divergences in Pyro is something we plan to add in the future. ## Data Loading, Training, and Evaluation First we load the data. There are 229 sequences in the training dataset, each with an average length of ~60 time steps. ```python jsb_file_loc = "./data/jsb_processed.pkl" data = pickle.load(open(jsb_file_loc, "rb")) training_seq_lengths = data['train']['sequence_lengths'] training_data_sequences = data['train']['sequences'] test_seq_lengths = data['test']['sequence_lengths'] test_data_sequences = data['test']['sequences'] val_seq_lengths = data['valid']['sequence_lengths'] val_data_sequences = data['valid']['sequences'] N_train_data = len(training_seq_lengths) N_train_time_slices = np.sum(training_seq_lengths) N_mini_batches = int(N_train_data / args.mini_batch_size + int(N_train_data % args.mini_batch_size > 0)) ``` For this dataset we will typically use a `mini_batch_size` of 20, so that there will be 12 mini-batches per epoch. Next we define the function `process_minibatch` which prepares a mini-batch for training and takes a gradient step: ```python def process_minibatch(epoch, which_mini_batch, shuffled_indices): if args.annealing_epochs > 0 and epoch < args.annealing_epochs: # compute the KL annealing factor appropriate # for the current mini-batch in the current epoch min_af = args.minimum_annealing_factor annealing_factor = min_af + (1.0 - min_af) * \ (float(which_mini_batch + epoch * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 # compute which sequences in the training set we should grab mini_batch_start = (which_mini_batch * args.mini_batch_size) mini_batch_end = np.min([(which_mini_batch + 1) * args.mini_batch_size, N_train_data]) mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end] # grab the fully prepped mini-batch using the helper function in the data loader mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths \ = poly.get_mini_batch(mini_batch_indices, training_data_sequences, training_seq_lengths, cuda=args.cuda) # do an actual gradient step loss = svi.step(mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor) # keep track of the training loss return loss ``` We first compute the KL annealing factor appropriate to the mini-batch (according to a linear schedule as described earlier). We then compute the mini-batch indices, which we pass to the helper function `get_mini_batch`. This helper function takes care of a number of different things: - it sorts each mini-batch by sequence length - it calls another helper function to get a copy of the mini-batch in reversed temporal order - it packs each reversed mini-batch in a `rnn.pack_padded_sequence`, which is then ready to be ingested by the RNN - it cuda-izes all tensors if we're on a GPU - it calls another helper function to get an appropriate 0/1 mask for the mini-batch We then pipe all the return values of `get_mini_batch()` into `elbo.step(...)`. Recall that these arguments will be further piped to `model(...)` and `guide(...)` during construction of the gradient estimator in `elbo`. Finally, we return a float which is a noisy estimate of the loss for that mini-batch. We now have all the ingredients required for the main bit of our training loop: ```python times = [time.time()] for epoch in range(args.num_epochs): # accumulator for our estimate of the negative log likelihood # (or rather -elbo) for this epoch epoch_nll = 0.0 # prepare mini-batch subsampling indices for this epoch shuffled_indices = np.arange(N_train_data) np.random.shuffle(shuffled_indices) # process each mini-batch; this is where we take gradient steps for which_mini_batch in range(N_mini_batches): epoch_nll += process_minibatch(epoch, which_mini_batch, shuffled_indices) # report training diagnostics times.append(time.time()) epoch_time = times[-1] - times[-2] log("[training epoch %04d] %.4f \t\t\t\t(dt = %.3f sec)" % (epoch, epoch_nll / N_train_time_slices, epoch_time)) ``` At the beginning of each epoch we shuffle the indices pointing to the training data. We then process each mini-batch until we've gone through the entire training set, accumulating the training loss as we go. Finally we report some diagnostic info. Note that we normalize the loss by the total number of time slices in the training set (this allows us to compare to reference [1]). ## Evaluation This training loop is still missing any kind of evaluation diagnostics. Let's fix that. First we need to prepare the validation and test data for evaluation. Since the validation and test datasets are small enough that we can easily fit them into memory, we're going to process each dataset batchwise (i.e. we will not be breaking up the dataset into mini-batches). [_Aside: at this point the reader may ask why we don't do the same thing for the training set. The reason is that additional stochasticity due to data-subsampling is often advantageous during optimization: in particular it can help us avoid local optima._] And, in fact, in order to get a lessy noisy estimate of the ELBO, we're going to compute a multi-sample estimate. The simplest way to do this would be as follows: ```python val_loss = svi.evaluate_loss(val_batch, ..., num_particles=5) ``` This, however, would involve an explicit `for` loop with five iterations. For our particular model, we can do better and vectorize the whole computation. The only way to do this currently in Pyro is to explicitly replicate the data `n_eval_samples` many times. This is the strategy we follow: ```python # package repeated copies of val/test data for faster evaluation # (i.e. set us up for vectorization) def rep(x): return np.repeat(x, n_eval_samples, axis=0) # get the validation/test data ready for the dmm: pack into sequences, etc. val_seq_lengths = rep(val_seq_lengths) test_seq_lengths = rep(test_seq_lengths) val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths = poly.get_mini_batch( np.arange(n_eval_samples * val_data_sequences.shape[0]), rep(val_data_sequences), val_seq_lengths, cuda=args.cuda) test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths = \ poly.get_mini_batch(np.arange(n_eval_samples * test_data_sequences.shape[0]), rep(test_data_sequences), test_seq_lengths, cuda=args.cuda) ``` With the test and validation data now fully prepped, we define the helper function that does the evaluation: ```python def do_evaluation(): # put the RNN into evaluation mode (i.e. turn off drop-out if applicable) dmm.rnn.eval() # compute the validation and test loss val_nll = svi.evaluate_loss(val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths) / np.sum(val_seq_lengths) test_nll = svi.evaluate_loss(test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths) / np.sum(test_seq_lengths) # put the RNN back into training mode (i.e. turn on drop-out if applicable) dmm.rnn.train() return val_nll, test_nll ``` We simply call the `evaluate_loss` method of `elbo`, which takes the same arguments as `step()`, namely the arguments that are passed to the model and guide. Note that we have to put the RNN into and out of evaluation mode to account for dropout. We can now stick `do_evaluation()` into the training loop; see [the source code](https://github.com/pyro-ppl/pyro/blob/dev/examples/dmm/dmm.py) for details. ## Results Let's make sure that our implementation gives reasonable results. We can use the numbers reported in reference [1] as a sanity check. For the same dataset and a similar model/guide setup (dimension of the latent space, number of hidden units in the RNN, etc.) they report a normalized negative log likelihood (NLL) of `6.93` on the testset (lower is better$)^{\S}$. This is to be compared to our result of `6.87`. These numbers are very much in the same ball park, which is reassuring. It seems that, at least for this dataset, not using analytic expressions for the KL divergences doesn't degrade the quality of the learned model (although, as discussed above, the training probably takes somewhat longer). In the figure we show how the test NLL progresses during training for a single sample run (one with a rather conservative learning rate). Most of the progress is during the first 3000 epochs or so, with some marginal gains if we let training go on for longer. On a GeForce GTX 1080, 5000 epochs takes about 20 hours. | `num_iafs` | test NLL | |---|---| | `0` | `6.87` | | `1` | `6.82` | | `2` | `6.80` | Finally, we also report results for guides with normalizing flows in the mix (details to be found in the next section). ${ \S\;}$ Actually, they seem to report two numbers—6.93 and 7.03—for the same model/guide and it's not entirely clear how the two reported numbers are different. ## Bells, whistles, and other improvements ### Inverse Autoregressive Flows One of the great things about a probabilistic programming language is that it encourages modularity. Let's showcase an example in the context of the DMM. We're going to make our variational distribution richer by adding normalizing flows to the mix (see reference [2] for a discussion). **This will only cost us four additional lines of code!** First, in the `DMM` constructor we add ```python iafs = [AffineAutoregressive(AutoRegressiveNN(z_dim, [iaf_dim])) for _ in range(num_iafs)] self.iafs = nn.ModuleList(iafs) ``` This instantiates `num_iafs` many bijective transforms of the `AffineAutoregressive` type (see references [3,4]); each normalizing flow will have `iaf_dim` many hidden units. We then bundle the normalizing flows in a `nn.ModuleList`; this is just the PyTorchy way to package a list of `nn.Module`s. Next, in the guide we add the lines ```python if self.iafs.__len__() > 0: z_dist = TransformedDistribution(z_dist, self.iafs) ``` Here we're taking the base distribution `z_dist`, which in our case is a conditional gaussian distribution, and using the `TransformedDistribution` construct we transform it into a non-gaussian distribution that is, by construction, richer than the base distribution. Voila! ### Checkpointing If we want to recover from a catastrophic failure in our training loop, there are two kinds of state we need to keep track of. The first is the various parameters of the model and guide. The second is the state of the optimizers (e.g. in Adam this will include the running average of recent gradient estimates for each parameter). In Pyro, the parameters can all be found in the `ParamStore`. However, PyTorch also keeps track of them for us via the `parameters()` method of `nn.Module`. So one simple way we can save the parameters of the model and guide is to make use of the `state_dict()` method of `dmm` in conjunction with `torch.save()`; see below. In the case that we have `AffineAutoregressive`'s in the loop, this is in fact the only option at our disposal. This is because the `AffineAutoregressive` module contains what are called 'persistent buffers' in PyTorch parlance. These are things that carry state but are not `Parameter`s. The `state_dict()` and `load_state_dict()` methods of `nn.Module` know how to deal with buffers correctly. To save the state of the optimizers, we have to use functionality inside of `pyro.optim.PyroOptim`. Recall that the typical user never interacts directly with PyTorch `Optimizers` when using Pyro; since parameters can be created dynamically in an arbitrary probabilistic program, Pyro needs to manage `Optimizers` for us. In our case saving the optimizer state will be as easy as calling `optimizer.save()`. The loading logic is entirely analagous. So our entire logic for saving and loading checkpoints only takes a few lines: ```python # saves the model and optimizer states to disk def save_checkpoint(): log("saving model to %s..." % args.save_model) torch.save(dmm.state_dict(), args.save_model) log("saving optimizer states to %s..." % args.save_opt) optimizer.save(args.save_opt) log("done saving model and optimizer checkpoints to disk.") # loads the model and optimizer states from disk def load_checkpoint(): assert exists(args.load_opt) and exists(args.load_model), \ "--load-model and/or --load-opt misspecified" log("loading model from %s..." % args.load_model) dmm.load_state_dict(torch.load(args.load_model)) log("loading optimizer states from %s..." % args.load_opt) optimizer.load(args.load_opt) log("done loading model and optimizer states.") ``` ## Some final comments A deep markov model is a relatively complex model. Now that we've taken the effort to implement a version of the deep markov model tailored to the polyphonic music dataset, we should ask ourselves what else we can do. What if we're handed a different sequential dataset? Do we have to start all over? Not at all! The beauty of probalistic programming is that it enables&mdash;and encourages&mdash;modular approaches to modeling and inference. Adapting our polyphonic music model to a dataset with continuous observations is as simple as changing the observation likelihood. The vast majority of the code could be taken over unchanged. This means that with a little bit of extra work, the code in this tutorial could be repurposed to enable a huge variety of different models. See the complete code on [Github](https://github.com/pyro-ppl/pyro/blob/dev/examples/dmm/dmm.py). ## References [1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp; Rahul G. Krishnan, Uri Shalit, David Sontag [2] `Variational Inference with Normalizing Flows`, <br />&nbsp;&nbsp;&nbsp;&nbsp; Danilo Jimenez Rezende, Shakir Mohamed [3] `Improving Variational Inference with Inverse Autoregressive Flow`, <br />&nbsp;&nbsp;&nbsp;&nbsp; Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling [4] `MADE: Masked Autoencoder for Distribution Estimation`, <br />&nbsp;&nbsp;&nbsp;&nbsp; Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle [5] `Modeling Temporal Dependencies in High-Dimensional Sequences:` <br />&nbsp;&nbsp;&nbsp;&nbsp; `Application to Polyphonic Music Generation and Transcription`, <br />&nbsp;&nbsp;&nbsp;&nbsp; Boulanger-Lewandowski, N., Bengio, Y. and Vincent, P.
github_jupyter
``` %matplotlib inline import os # import wfdb as wf # le module n'est pas reconnu import numpy as np import pandas as pd # from pandas.compat import StringIO # pour pouvoir lire fichiers anotations mais ne fonctionne pas import seaborn as sns import matplotlib.pyplot as plt from sklearn import linear_model from sklearn import preprocessing from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from mpl_toolkits.mplot3d import Axes3D import scipy from scipy import ndimage from scipy import signal from pylab import * import time import math from copy import deepcopy #from datasets import mitdb as dm #from biosppy.signals import ecg df_normal =pd.read_csv('ptbdb_normal.csv', sep=',', header = None) df_normal.head(12) df_normal.describe() #len(df_normal) df_abnormal =pd.read_csv('ptbdb_abnormal.csv', sep=',', header = None) df_abnormal.info() df_abnormal.describe() df_abnormal.max(axis=0) #plt.figure(figsize = (20,16)) #sns.barplot(df_normal.max(axis=0)) df_abnorm2 = df_abnormal.copy() df_norm2.head() if len(df_abnorm2.columns) == 188 : df_abnorm2.drop(df_norm2.iloc[:,0:1],1,inplace=True) # on supprime des cols en début de tableau df_abnorm2.max(axis=0) plt.figure(figsize = (10,6)) plt.hist(df_abnorm2.max(axis=0), bins = 100) plt.title('Distribution des valeurs max sur un battement') #sns.barplot(x=df_norm2.max(axis=1), data=df_norm2) df_abnorm2.max(axis=1) maxValueIndex = df_abnorm2.idxmax(axis = 1) maxValueIndex.min() plt.figure(figsize = (10,6)) plt.hist(maxValueIndex, bins = 200) plt.title("Distribution du RR interval") # on trace le signal si le max est plus petit # exemples de battements anormaux sns.set_style('whitegrid') plt.figure(figsize = (20,8)) plt.plot(df_abnorm2.iloc[0, 0:187], color = 'red') plt.xlabel('Temps') plt.title('Exemple de batements cardiqaues de patients malades') plt.legend() plt.show() plt.figure(figsize = (20,8)) range(len(df_abnorm2)) #sns.relplot(x='g', y='pf', kind='line', data=df[df['g']<50]) ; j=0 for i in range(len(df_norm2)) : if (maxValueIndex.iloc[i] < 50) & (j<10) : j+=1 print(maxValueIndex.iloc[i]) plt.figure(figsize = (10,6)) plt.plot(df_abnorm2.iloc[i, 0:187], color = 'red') plt.xlabel('Temps') plt.title('Exemple de battements cardiaques de patients malades') plt.legend() plt.show() j=0 for i in range(len(df_abnorm2)) : if (maxValueIndex.iloc[i] > 50) & (maxValueIndex.iloc[i] < 150) & (j<10) : j+=1 print(maxValueIndex.iloc[i]) plt.figure(figsize = (10,6)) plt.plot(df_abnorm2.iloc[i, 0:187], color = 'red') plt.xlabel('Temps') plt.title('Exemple de battements cardiaques de patients malades') plt.legend() plt.show() j=0 for i in range(len(df_norm2)) : if (maxValueIndex.iloc[i] > 150) & (j<10) : j+=1 print(maxValueIndex.iloc[i]) plt.figure(figsize = (10,6)) plt.plot(df_norm2.iloc[i, 0:187], color = 'red') plt.xlabel('Temps') plt.title('Exemple de battements cardiaques de patients malades') plt.legend() plt.show() ```
github_jupyter
``` !pip install --upgrade language-check import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer,_preprocess,TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel,cosine_similarity from nltk.stem.snowball import SnowballStemmer from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import * from sklearn.model_selection import train_test_split data = pd.read_csv(r'data/medical_data.csv',low_memory=False) data = data.drop_duplicates().reset_index().drop('index',axis = 1) data punctuation='["\'?,\.]' # I will replace all these punctuation with '' abbr_dict={ "what's":"what is", "what're":"what are", "where's":"where is", "where're":"where are", "i'm":"i am", "we're":"we are", "it's":"it is", "that's":"that is", "there's":"there is", "there're":"there are", "i've":"i have", "who've":"who have", "would've":"would have", "not've":"not have", "i'll":"i will", "it'll":"it will", "isn't":"is not", "wasn't":"was not", "aren't":"are not", "weren't":"were not", "can't":"can not", "couldn't":"could not", "don't":"do not", "didn't":"did not", "shouldn't":"should not", "wouldn't":"would not", "doesn't":"does not", "haven't":"have not", "hasn't":"has not", "hadn't":"had not", "won't":"will not", punctuation:'', '\s+':' ', # replace multi space with one single space } def process_data(data): # Convert to lower case data.Phrase=data.Phrase.str.lower() data.Prompt=data.Prompt.str.lower() # convert to string data.Phrase=data.Phrase.astype(str) data.Prompt=data.Prompt.astype(str) # replace abbreviations data.replace(abbr_dict,regex=True,inplace=True) #apply stemming stemmer = SnowballStemmer("english") data['stemmed_phrase'] = data['Phrase'].apply(lambda x : ' '.join([stemmer.stem(y) for y in x.split()])) display(data.head(10)) return data data = process_data(data) d2 = data[['stemmed_phrase','Prompt']] d2.to_csv('data/trial_data.csv') ailments = data['Prompt'].unique() dict_ail = {} # for a in ailments: # dict_ail[a] = 0 for k in data.index: name = data['Prompt'][k] dict_ail[name] = dict_ail.get(name,0) + 1 ailment_dict = {} for i,k in enumerate(dict_ail.keys()): ailment_dict[i] = k plt.figure(figsize = (18,8)) plt.title("Ailment Frequencies",fontsize=35) plt.barh(color = 'Red',y=[i for i in range(len(list(ailments)))], width = list(dict_ail.values()),tick_label = list(dict_ail.keys())) plt.tight_layout() Cv = CountVectorizer(stop_words='english',ngram_range = (1,3), max_df=0.7) transformed_count = Cv.fit_transform(data['stemmed_phrase']) TfIdf = TfidfVectorizer(stop_words = 'english', ngram_range= (1,3),max_df= 0.7) transformed_idf = TfIdf.fit_transform(data['stemmed_phrase']) input_text = ['I am experiencing pain in the leg from the past two days'] trial = TfIdf.transform(input_text) trial ``` ## Flow - Get the text input from the patient - This text input is processed first by the vectorizer and made into a list of frequeny counts using the learned vocabulary from the data provided - Now this list is passed into a model which generates the probabilities of which ailment does that sentence phrase correspond to - The final returned phrases are evaluated and the phrases having the least levenshtein distance are used for predictions - The two of the highest probability ailments are returned to the doctor with a wrapper sentence ## Output Tensor - We have a 25 element output vector which is the result from the model ``` ailment_dict ``` ## Input Tensor ``` # the query is first processed and made into lower case query = "From past few weeks feeling sad" def process_query(query): # Change to lower query = query.lower() # Removed abbreviations res = '' # print(query.split()) for k in query.split(): if k in abbr_dict: print(abbr_dict[k]) res+=' ' + abbr_dict[k] else: res+=' ' + k stemmer = SnowballStemmer('english') res = ' '.join([stemmer.stem(y) for y in res.split()]) return res print("Example query: ") print("Final query:",process_query(query)) processed = process_query(query) query =[processed] res = TfIdf.transform(query) sim = cosine_similarity(res,transformed_idf) res = list(np.argsort(sim))[0] res = res[::-1][:3] for k in res: print(data.loc[k]['Prompt']) def get_prediction(query): print("Query is :",query) processed = process_query(query) query = [processed] print("Processed :",query) res = TfIdf.transform(query) sim = cosine_similarity(res,transformed_idf) res = list(np.argsort(sim))[0] res = res[::-1][:20] print(sim[0][res[0]],sim[0][res[1]]) ailment =[] # let's find most similar sentences and then see # use levenshtein distance after you have got the result for k in res[:1]: ailment.append(data.loc[k]['Prompt']) print("Results :") return ailment ``` ## To - Do - Use document distance after you find the sentences to evaluate the best possible match for your query ``` for q in data['stemmed_phrase'][500:]: print(get_prediction(q)) ``` ## Use random forest ``` model = RandomForestClassifier(n_estimators=100,min_samples_leaf=2,bootstrap=True) ``` ## Generate Data first - First make a transformed matrix and associate each of the sentences with a numeric row and each prompt with a numeric dictionary value ``` data[:3] TfIdf = TfidfVectorizer(stop_words = 'english', ngram_range= (1,3),max_df= 0.7) X = TfIdf.fit_transform(data['stemmed_phrase']).toarray() ``` ## Generate the Y - Generate the class data ``` ailment_dict # ailment_dict ailment = {} for i,j in ailment_dict.items(): ailment[j] = i print(ailment) Y = data['Prompt'].map(ailment) Y ``` ## Got X and Y - Split in training and validation sets ``` X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size = 0.8, random_state = 43, shuffle = True) model.fit(X_train,Y_train) y_preds = model.predict(X_test) correct,incorrect =0,0 for k,i in zip(y_preds,Y_test): if(k==i): correct+=1 else: incorrect+=1 correct incorrect score =[] for est in range(10,50): model = RandomForestClassifier(n_estimators=est,min_samples_leaf=2) model.fit(X_train,Y_train) s = model.score(X_test,Y_test) score.append(s) plt.figure(figsize= (15,7)) plt.title("Accuracy of classification",fontsize=17) plt.xlabel("Number of estimators",fontsize = 14) plt.ylabel("Percentage",fontsize = 14) plt.plot([i for i in range(10,50)],score,color= 'red') ``` ## Now - Whenever you get a query, you need to transform it according to your vocabulary and then predict the class and then return the predicted class from model ``` def process_query(query): # Change to lower query = query.lower() # Removed abbreviations res = '' # print(query.split()) for k in query.split(): if k in abbr_dict: print(abbr_dict[k]) res+=' ' + abbr_dict[k] else: res+=' ' + k stemmer = SnowballStemmer('english') res = ' '.join([stemmer.stem(y) for y in res.split()]) return res # suppose I have the best model model = RandomForestClassifier(n_estimators=33,min_samples_leaf=2,bootstrap=True,max_features=300) model.fit(X_train,Y_train) for i,j in zip(X_test,Y_test): query = data.iloc['Phrase'][i] print("Query :",query) print("Original :",ailment_dict[j]) query = process_query(query) query = [query] #now transform the document according to the vectorizer query = TfIdf.transform(query) # now predict it pred = model.predict_proba(query) res = list(np.argsort(pred))[0] res = res[::-1][:3] for k in res: print(ailment_dict[k],end=',') print() ``` ## KNN ``` score =[] for est in range(3,40): model = KNeighborsClassifier(n_neighbors=est,metric='minkowski') model.fit(X_train,Y_train) s = model.score(X_test,Y_test) score.append(s) plt.figure(figsize= (15,7)) plt.title("Accuracy of classification",fontsize=17) plt.xlabel("Number of estimators",fontsize = 14) plt.ylabel("Percentage",fontsize = 14) plt.plot([i for i in range(3,40)],score,color= 'red') p = pd.DataFrame([[1,2],[2,3]],columns=['a','b']) p p = p.append([{'a':1,'b':23}],ignore_index=True) p.append([{'a':1,'b':2223}],ignore_index=True) ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Basic training loops <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/guide/basic_training_loops"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In the previous guides, you have learned about [tensors](./tensor.ipynb), [variables](./variable.ipynb), [gradient tape](autodiff.ipynb), and [modules](./intro_to_modules.ipynb). In this guide, you will fit these all together to train models. TensorFlow also includes the [tf.Keras API](https://www.tensorflow.org/guide/keras/overview), a high-level neural network API that provides useful abstractions to reduce boilerplate. However, in this guide, you will use basic classes. ## Setup ``` import tensorflow as tf ``` ## Solving machine learning problems Solving a machine learning problem usually consists of the following steps: - Obtain training data. - Define the model. - Define a loss function. - Run through the training data, calculating loss from the ideal value - Calculate gradients for that loss and use an *optimizer* to adjust the variables to fit the data. - Evaluate your results. For illustration purposes, in this guide you'll develop a simple linear model, $f(x) = x * W + b$, which has two variables: $W$ (weights) and $b$ (bias). This is the most basic of machine learning problems: Given $x$ and $y$, try to find the slope and offset of a line via [simple linear regression](https://en.wikipedia.org/wiki/Linear_regression#Simple_and_multiple_linear_regression). ## Data Supervised learning uses *inputs* (usually denoted as *x*) and *outputs* (denoted *y*, often called *labels*). The goal is to learn from paired inputs and outputs so that you can predict the value of an output from an input. Each input of your data, in TensorFlow, is almost always represented by a tensor, and is often a vector. In supervised training, the output (or value you'd like to predict) is also a tensor. Here is some data synthesized by adding Gaussian (Normal) noise to points along a line. ``` # The actual line TRUE_W = 3.0 TRUE_B = 2.0 NUM_EXAMPLES = 1000 # A vector of random x values x = tf.random.normal(shape=[NUM_EXAMPLES]) # Generate some noise noise = tf.random.normal(shape=[NUM_EXAMPLES]) # Calculate y y = x * TRUE_W + TRUE_B + noise # Plot all the data import matplotlib.pyplot as plt plt.scatter(x, y, c="b") plt.show() ``` Tensors are usually gathered together in *batches*, or groups of inputs and outputs stacked together. Batching can confer some training benefits and works well with accelerators and vectorized computation. Given how small this dataset is, you can treat the entire dataset as a single batch. ## Define the model Use `tf.Variable` to represent all weights in a model. A `tf.Variable` stores a value and provides this in tensor form as needed. See the [variable guide](./variable.ipynb) for more details. Use `tf.Module` to encapsulate the variables and the computation. You could use any Python object, but this way it can be easily saved. Here, you define both *w* and *b* as variables. ``` class MyModel(tf.Module): def __init__(self, **kwargs): super().__init__(**kwargs) # Initialize the weights to `5.0` and the bias to `0.0` # In practice, these should be randomly initialized self.w = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.w * x + self.b model = MyModel() # List the variables tf.modules's built-in variable aggregation. print("Variables:", model.variables) # Verify the model works assert model(3.0).numpy() == 15.0 ``` The initial variables are set here in a fixed way, but Keras comes with any of a number of [initalizers](https://www.tensorflow.org/api_docs/python/tf/keras/initializers) you could use, with or without the rest of Keras. ### Define a loss function A loss function measures how well the output of a model for a given input matches the target output. The goal is to minimize this difference during training. Define the standard L2 loss, also known as the "mean squared" error: ``` # This computes a single loss value for an entire batch def loss(target_y, predicted_y): return tf.reduce_mean(tf.square(target_y - predicted_y)) ``` Before training the model, you can visualize the loss value by plotting the model's predictions in red and the training data in blue: ``` plt.scatter(x, y, c="b") plt.scatter(x, model(x), c="r") plt.show() print("Current loss: %1.6f" % loss(y, model(x)).numpy()) ``` ### Define a training loop The training loop consists of repeatedly doing three tasks in order: * Sending a batch of inputs through the model to generate outputs * Calculating the loss by comparing the outputs to the output (or label) * Using gradient tape to find the gradients * Optimizing the variables with those gradients For this example, you can train the model using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.keras.optimizers`. But in the spirit of building from first principles, here you will implement the basic math yourself with the help of `tf.GradientTape` for automatic differentiation and `tf.assign_sub` for decrementing a value (which combines `tf.assign` and `tf.sub`): ``` # Given a callable model, inputs, outputs, and a learning rate... def train(model, x, y, learning_rate): with tf.GradientTape() as t: # Trainable variables are automatically tracked by GradientTape current_loss = loss(y, model(x)) # Use GradientTape to calculate the gradients with respect to W and b dw, db = t.gradient(current_loss, [model.w, model.b]) # Subtract the gradient scaled by the learning rate model.w.assign_sub(learning_rate * dw) model.b.assign_sub(learning_rate * db) ``` For a look at training, you can send the same batch of *x* and *y* through the training loop, and see how `W` and `b` evolve. ``` model = MyModel() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) # Define a training loop def training_loop(model, x, y): for epoch in epochs: # Update the model with the single giant batch train(model, x, y, learning_rate=0.1) # Track this before I update Ws.append(model.w.numpy()) bs.append(model.b.numpy()) current_loss = loss(y, model(x)) print("Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f" % (epoch, Ws[-1], bs[-1], current_loss)) print("Starting: W=%1.2f b=%1.2f, loss=%2.5f" % (model.w, model.b, loss(y, model(x)))) # Do the training training_loop(model, x, y) # Plot it plt.plot(epochs, Ws, "r", epochs, bs, "b") plt.plot([TRUE_W] * len(epochs), "r--", [TRUE_B] * len(epochs), "b--") plt.legend(["W", "b", "True W", "True b"]) plt.show() # Visualize how the trained model performs plt.scatter(x, y, c="b") plt.scatter(x, model(x), c="r") plt.show() print("Current loss: %1.6f" % loss(model(x), y).numpy()) ``` ## The same solution, but with Keras It's useful to contrast the code above with the equivalent in Keras. Defining the model looks exactly the same if you subclass `tf.keras.Model`. Remember that Keras models inherit ultimately from module. ``` class MyModelKeras(tf.keras.Model): def __init__(self, **kwargs): super().__init__(**kwargs) # Initialize the weights to `5.0` and the bias to `0.0` # In practice, these should be randomly initialized self.w = tf.Variable(5.0) self.b = tf.Variable(0.0) def call(self, x): return self.w * x + self.b keras_model = MyModelKeras() # Reuse the training loop with a Keras model training_loop(keras_model, x, y) # You can also save a checkpoint using Keras's built-in support keras_model.save_weights("my_checkpoint") ``` Rather than write new training loops each time you create a model, you can use the built-in features of Keras as a shortcut. This can be useful when you do not want to write or debug Python training loops. If you do, you will need to use `model.compile()` to set the parameters, and `model.fit()` to train. It can be less code to use Keras implementations of L2 loss and gradient descent, again as a shortcut. Keras losses and optimizers can be used outside of these convenience functions, too, and the previous example could have used them. ``` keras_model = MyModelKeras() # compile sets the training parameters keras_model.compile( # By default, fit() uses tf.function(). You can # turn that off for debugging, but it is on now. run_eagerly=False, # Using a built-in optimizer, configuring as an object optimizer=tf.keras.optimizers.SGD(learning_rate=0.1), # Keras comes with built-in MSE error # However, you could use the loss function # defined above loss=tf.keras.losses.mean_squared_error, ) ``` Keras `fit` expects batched data or a complete dataset as a NumPy array. NumPy arrays are chopped into batches and default to a batch size of 32. In this case, to match the behavior of the hand-written loop, you should pass `x` in as a single batch of size 1000. ``` print(x.shape[0]) keras_model.fit(x, y, epochs=10, batch_size=1000) ``` Note that Keras prints out the loss after training, not before, so the first loss appears lower, but otherwise this shows essentially the same training performance. ## Next steps In this guide, you have seen how to use the core classes of tensors, variables, modules, and gradient tape to build and train a model, and further how those ideas map to Keras. This is, however, an extremely simple problem. For a more practical introduction, see [Custom training walkthrough](../tutorials/customization/custom_training_walkthrough.ipynb). For more on using built-in Keras training loops, see [this guide](https://www.tensorflow.org/guide/keras/train_and_evaluate). For more on training loops and Keras, see [this guide](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch). For writing custom distributed training loops, see [this guide](distributed_training.ipynb#using_tfdistributestrategy_with_basic_training_loops_loops).
github_jupyter
# Python Dictionary Python dictionary is a collection of key:value pairs. Each key:value pair maps the key to its associated value. A dictionary is a collection that is ordered, changeable, and does not allow duplicates. ``` # empty dictionary d = {} type(d) d2 = {'name':'John', 'last_name':'Doe', 'age':30} d2 d2['name'] # accesing items student_name = d2['name'] student_name # Other way: using get student_name = d2.get('name') student_name d2['last_name'] d2['age'] # Change value d2['age'] = 33 d2 # check if key exists 'name' in d2 'middle_name' in d2 ``` **Important**: dictionaries are accessed by key, not by the position of the items. It does not make sense to slice a dictionary. ``` d2['name':'last_name'] # This will raise an error ``` ## Python methods for working with dictionaries **len()**: lenght of the dictionary ``` len(d2) ``` **items()**: Returns a list of tuples containing each key, value pair ``` d2.items() ``` **keys()**: Returns a list containing the dictionary's keys ``` d2.keys() ``` **values()**: Returns a list of all the values in the dictionary ``` d2.values() ``` **Adding items** It is done by using a new key and assigning a value to it. ``` d2['weight'] = 65 d2 ``` **update()**: Updates the dictionary with the specified key:value pairs ``` d2.update({'height':5.8}) d2 ``` **pop()**: removes the item with specified key name ``` d2.pop('weight') d2 ``` **popitem()**: Removes the last inserted key:value pair ``` d2.popitem() ``` You cannot copy a dictionary simply by typing dict2 = dict1, because: dict2 will only be a reference to dict1, and changes made in dict1 will automatically also be made in dict2. If you want to copy the dict (which is rare), you have to do so explicitly with one of these two options: ``` d3 = dict(d2) d3 ``` **copy()**: makes a copy of a dictionary ``` d3 = d2.copy() d3 ``` **clear()**: empties the dictionary ``` d3.clear() d3 d2 ``` **del**: removes the item with the specified key name ``` del d2['name'] d2 ``` **del** can also delete the dictionary completely ``` del d2 d2 # This will raise an error ``` ### Nested Dictionaries ``` child1 = { 'name':'Hazel', 'year': 2001, 'gender':'F' } child2 = { 'name':'Helen', 'year': 2003, 'gender':'F' } child3 = { 'name':'Abel', 'year': 2006, 'gender':'M' } child4 = { 'name':'Diana', 'year': 2012, 'gender':'F' } child1 child1['name'] family = { 'child1':child1, 'child2':child2, 'child3':child3, 'child4':child4 } family ``` Accessing to 'Diana' using family dictionary: ``` family['child4'] family['child4']['name'] ```
github_jupyter
# Classifying Fashion-MNIST Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world. <img src='assets/fashion-mnist-sprite.png' width=500px> In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this. First off, let's load the dataset through torchvision. ``` import torch from torchvision import datasets, transforms import helper # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]) # Download and load the training data trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Download and load the test data testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True) ``` Here we can see one of the images. ``` image, label = next(iter(trainloader)) helper.imshow(image[0,:]); ``` ## Building the network Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers. ``` # TODO: Define your network architecture here from torch import nn import torch.nn.functional as F class Network(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(784, 256) self.fc2 = nn.Linear(256, 128) self.fc3 = nn.Linear(128, 64) self.out = nn.Linear(64, 10) def forward(self, x): x = x.view(x.shape[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.log_softmax(self.out(x), dim=1) return x ``` # Train the network Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`). Then write the training code. Remember the training pass is a fairly straightforward process: * Make a forward pass through the network to get the logits * Use the logits to calculate the loss * Perform a backward pass through the network with `loss.backward()` to calculate the gradients * Take a step with the optimizer to update the weights By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4. ``` # TODO: Create the network, define the criterion and optimizer from torch import optim model = Network() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.003) # TODO: Train the network here epochs = 5 for epoch in range(epochs): running_loss = 0 for images, labels in trainloader: logits = model(images) optimizer.zero_grad() loss = criterion(logits, labels) loss.backward() optimizer.step() running_loss += loss.item() else: print(f'training loss: {running_loss}') %matplotlib inline %config InlineBackend.figure_format = 'retina' import helper # Test out your network! dataiter = iter(testloader) images, labels = dataiter.next() img = images[0] # Convert 2D image to 1D vector img = img.resize_(1, 784) # TODO: Calculate the class probabilities (softmax) for img ps = torch.exp(model(img)) # Plot the image and probabilities helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion') ```
github_jupyter
``` import os import numpy as np import pandas as pd import json import pickle from scipy import sparse import scipy.io dataset_name = 'dblp' data_path = os.path.join('../dataset/raw/{}'.format(dataset_name)) citations = [] incomming = {} for i in range(4): fn = os.path.join(data_path, 'dblp-ref-{}.json'.format(i)) with open(fn) as in_fn: for line in in_fn: paper = json.loads(line.strip()) citations.append(paper) if 'references' in paper: for ref_id in paper['references']: if ref_id in incomming: incomming[ref_id].append(paper['id']) else: incomming[ref_id] = [paper['id']] df = pd.DataFrame(citations) is_first_line = True conferences = {} with open('../dataset/clean/dblp/venue_info.tsv') as in_csv: for line in in_csv: tokens = line.strip().split('\t') if is_first_line: #print(tokens) is_first_line = False else: conf_name = tokens[0] labels = [int(num_str) for num_str in tokens[2].split(',')] labels = [n-2 for n in labels if n > 1] # remove the first label (signal processing has too many documents) conferences[conf_name] = {'name': conf_name, 'label': labels} #conferences[conf_name] = {'name': conf_name, } max_labels = np.max([np.max(val['label']) for key, val in conferences.items()]) min_labels = np.min([np.min(val['label']) for key, val in conferences.items()]) num_labels = max_labels - min_labels + 1 print('label min:{} max:{} total:{}'.format(min_labels, max_labels, num_labels)) # remove any row that is not present in the selected venues def is_selected_venue(row): return (row in conferences) print("num paper (before): {}".format(len(df))) df = df[df.venue.apply(is_selected_venue)] print("num paper (after): {}".format(len(df))) cut_off_years = 2016 df_train = df[df.year < cut_off_years] df_test = df[df.year >= cut_off_years] num_trains = len(df_train) num_tests = len(df_test) print("num trains: {} num tests: {} ratio: {:.4f}".format(num_trains, num_tests, num_tests / num_trains)) #venue_count = df_train.groupby('venue').count().sort_values(['abstract'], ascending=False).abstract def assign_labels(venue): label_list = conferences[venue]['label'] return np.sum(np.eye(num_labels)[label_list], axis=0).astype(np.int) df_train = df_train.copy() df_train['label'] = df_train.venue.apply(assign_labels) df_train.set_index('id', inplace=True) # set paper as the row index df_test = df_test.copy() df_test['label'] = df_test.venue.apply(assign_labels) df_test.set_index('id', inplace=True) # set paper as the row index num_train_doc_per_labels = np.sum(np.array(list(df_train.label)), axis=0) num_test_doc_per_labels = np.sum(np.array(list(df_test.label)), axis=0) print(num_train_doc_per_labels) print(num_test_doc_per_labels) # remove any row that does not have abstract, title, paperId, or venue print("num paper = {}".format(len(df_train))) df_train.dropna(axis=0, subset=['abstract', 'venue', 'year', 'label'], inplace=True) print("num paper = {}".format(len(df_train))) # This method adds incoming edges to each node as well as removing any edge that points outside the train set def createEdges(row): if row.references is not np.nan: outgoing_edges = [r for r in row.references if r in df_train.index] else: outgoing_edges = [] if row.name in incomming: incomming_edges = [r for r in incomming[row.name] if r in df_train.index] else: incomming_edges = [] return outgoing_edges + incomming_edges df_train['links'] = df_train.apply(createEdges, axis=1) # Remove any row that has no link print("num paper = {}".format(len(df_train))) df_train = df_train[df_train.links.apply(len) > 0] print("num paper = {}".format(len(df_train))) # There must be no train nodes that references to non-train nodes def count_invalid_edges(refs): return len([r for r in refs if r not in df_train.index]) assert(len(df_train[df_train.links.apply(count_invalid_edges) > 0]) == 0) global_id_2_train_id = {node_id: idx for idx, node_id in enumerate(df_train.index)} def convert_2_train_id(ref): return [global_id_2_train_id[r] for r in ref] train_edges = df_train.links.apply(convert_2_train_id) train_graph = {} for node_id, value in train_edges.iteritems(): train_graph[global_id_2_train_id[node_id]] = value print('num train: {}'.format(len(train_graph))) ``` # Process Test Data ``` # remove any row that does not have abstract, title, paperId, or venue print("num paper = {}".format(len(df_test))) df_test.dropna(axis=0, subset=['abstract', 'venue', 'year', 'label'], inplace=True) print("num paper = {}".format(len(df_test))) # This method adds incoming edges to each node as well as removing any edge that points outside the train set def createEdges(row): if row.references is not np.nan: outgoing_edges = [r for r in row.references if r in df_train.index] else: outgoing_edges = [] if row.name in incomming: incomming_edges = [r for r in incomming[row.name] if r in df_train.index] else: incomming_edges = [] return outgoing_edges + incomming_edges df_test['links'] = df_test.apply(createEdges, axis=1) # Remove any row that has no link print("num paper = {}".format(len(df_test))) df_test = df_test[df_test.links.apply(len) > 0] print("num paper = {}".format(len(df_test))) # There must be no train nodes that references to non-train nodes def count_invalid_edges(refs): return len([r for r in refs if r not in df_train.index]) assert(len(df_test[df_test.links.apply(count_invalid_edges) > 0]) == 0) global_id_2_test_id = {node_id: idx for idx, node_id in enumerate(df_test.index)} # each link MUST point to the train nodes test_edges = df_test.links.apply(convert_2_train_id) test_graph = {} for node_id, value in test_edges.iteritems(): test_graph[global_id_2_test_id[node_id]] = value print('num test: {}'.format(len(test_graph))) ``` # Save Graph Data ``` data_path = '../dataset/clean/dblp' save_fn = os.path.join(data_path, 'ind.{}.train.graph.pk'.format(dataset_name)) pickle.dump(train_graph, open(save_fn, 'wb')) print('save graph data to {}'.format(save_fn)) save_fn = os.path.join(data_path, 'ind.{}.test.graph.pk'.format(dataset_name)) pickle.dump(test_graph, open(save_fn, 'wb')) print('save graph data to {}'.format(save_fn)) ``` # Process contents ``` from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', max_df=0.8, min_df=5, sublinear_tf=True, max_features=10000) train_feas = vectorizer.fit_transform(list(df_train.abstract)) print(np.nonzero(np.sum(train_feas, axis=1))[0].shape) test_feas = vectorizer.transform(list(df_test.abstract)) print(np.nonzero(np.sum(test_feas, axis=1))[0].shape) gnd_train = sparse.csr_matrix(np.array(list(df_train.label))) gnd_test = sparse.csr_matrix(np.array(list(df_test.label))) assert(train_feas.shape[1] == test_feas.shape[1]) assert(gnd_train.shape[1] == gnd_test.shape[1]) assert(train_feas.shape[0] == gnd_train.shape[0]) assert(test_feas.shape[0] == gnd_test.shape[0]) data_path = '../dataset/clean/dblp' save_fn = os.path.join(data_path, 'ind.{}.mat'.format(dataset_name)) scipy.io.savemat(save_fn, mdict={'train': train_feas, 'test': test_feas, 'cv': test_feas, 'gnd_train': gnd_train, 'gnd_test': gnd_test, 'gnd_cv': gnd_test}) print('save data to {}'.format(save_fn)) ``` # Convert to dataframe with the format as doc_id, bow, label, and neighbors ``` # create a connection matrix n_train = train_feas.shape[0] row = [] col = [] for doc_id in train_graph: row += [doc_id] * len(train_graph[doc_id]) col += train_graph[doc_id] data = [1] * len(row) train_connections = sparse.csr_matrix((data, (row, col)), shape=(n_train, n_train)) n_test = test_feas.shape[0] row = [] col = [] for doc_id in test_graph: row += [doc_id] * len(test_graph[doc_id]) col += test_graph[doc_id] data = [1] * len(row) test_connections = sparse.csr_matrix((data, (row, col)), shape=(n_test, n_train)) # test graph points to train graph from tqdm import tqdm save_dir = os.path.join('../dataset/clean', dataset_name) ########################################################################################## train = [] for doc_id in tqdm(train_graph): doc = {'doc_id': doc_id, 'bow': train_feas[doc_id], 'label': gnd_train[doc_id], 'neighbors': train_connections[doc_id]} train.append(doc) train_df = pd.DataFrame.from_dict(train) train_df.set_index('doc_id', inplace=True) fn = os.path.join(save_dir, '{}.train.pkl'.format(dataset_name)) train_df.to_pickle(fn) ########################################################################################## test = [] for doc_id in tqdm(test_graph): doc = {'doc_id': doc_id, 'bow': test_feas[doc_id], 'label': gnd_test[doc_id], 'neighbors': test_connections[doc_id]} test.append(doc) test_df = pd.DataFrame.from_dict(test) test_df.set_index('doc_id', inplace=True) fn = os.path.join(save_dir, '{}.test.pkl'.format(dataset_name)) test_df.to_pickle(fn) ```
github_jupyter
# Introduction to PyCaret - An open source low-code ML library ## This notebook consists 2 parts - Classification part using Titanic DataSet - Regression part using House Price Regression DataSet ![](https://pycaret.org/wp-content/uploads/2020/03/Divi93_43.png) You can reach pycaret website and documentation from https://pycaret.org PyCaret is an open source, low-code machine learning library in Python that allows you to go from preparing your data to deploying your model within seconds in your choice of notebook environment. PyCaret being a low-code library makes you more productive. With less time spent coding, you and your team can now focus on business problems. PyCaret is simple and easy to use machine learning library that will help you to perform end-to-end ML experiments with less lines of code. PyCaret is a business ready solution. It allows you to do prototyping quickly and efficiently from your choice of notebook environment. # let's install pycaret ! ``` !pip install pycaret ``` # Part 1 Classification ![](https://www.sciencealert.com/images/articles/processed/titanic-1_1024.jpg) # We start by loading the libraries ``` import numpy as np import pandas as pd ``` # Read our files ``` train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') sub = pd.read_csv('../input/titanic/gender_submission.csv') ``` # Import whole classification ``` from pycaret.classification import * ``` # let's see what we're dealing with ``` train.head() train.info() ``` # Set up our dataset (preprocessing) ``` clf1 = setup(data = train, target = 'Survived', numeric_imputation = 'mean', categorical_features = ['Sex','Embarked'], ignore_features = ['Name','Ticket','Cabin'], silent = True) #quite intuitive isn't it ? ``` # Compare the models ``` compare_models() ``` # let's create a Light GBM Model ``` lgbm = create_model('lightgbm') ``` # Let's tune it! ``` tuned_lightgbm = tune_model('lightgbm') ``` # Learning Curve ``` plot_model(estimator = tuned_lightgbm, plot = 'learning') ``` # AUC Curve ``` plot_model(estimator = tuned_lightgbm, plot = 'auc') ``` # Confusion Matrix ``` plot_model(estimator = tuned_lightgbm, plot = 'confusion_matrix') ``` # Feature Importance ``` plot_model(estimator = tuned_lightgbm, plot = 'feature') ``` # whole thing! ``` evaluate_model(tuned_lightgbm) ``` # Interpretation ``` interpret_model(tuned_lightgbm) ``` # Predictions ``` predict_model(tuned_lightgbm, data=test) predictions = predict_model(tuned_lightgbm, data=test) predictions.head() sub['Survived'] = round(predictions['Score']).astype(int) sub.to_csv('submission.csv',index=False) sub.head() ``` # Extra: Blending made easy! ``` logr = create_model('lr'); xgb = create_model('xgboost'); #blending 3 models blend = blend_models(estimator_list=[tuned_lightgbm,logr,xgb]) ``` # Part2 - Regression ![](https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSYeyNpaoAW-3rFX9-ORmiJ-uLAAswYBRhszs2QzllV7MCfFPvk&usqp=CAU) # Import Whole Regression ``` from pycaret.regression import * ``` # let's see the data ``` train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') sample= pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') train.head() train.info() ``` # Set up our dataset (preprocessing) ``` reg = setup(data = train, target = 'SalePrice', numeric_imputation = 'mean', categorical_features = ['MSZoning','Exterior1st','Exterior2nd','KitchenQual','Functional','SaleType', 'Street','LotShape','LandContour','LotConfig','LandSlope','Neighborhood', 'Condition1','Condition2','BldgType','HouseStyle','RoofStyle','RoofMatl', 'MasVnrType','ExterQual','ExterCond','Foundation','BsmtQual','BsmtCond', 'BsmtExposure','BsmtFinType1','BsmtFinType2','Heating','HeatingQC','CentralAir', 'Electrical','GarageType','GarageFinish','GarageQual','GarageCond','PavedDrive', 'SaleCondition'] , ignore_features = ['Alley','PoolQC','MiscFeature','Fence','FireplaceQu','Utilities'], normalize = True, silent = True) ``` # let's compare different regression models! ``` compare_models() ``` # let's do CatBoost ``` cb = create_model('catboost') ``` # gotta tune it ``` tuned_cb = tune_model('catboost') ``` # SHAP Values (impact on model output) ``` interpret_model(tuned_cb) predictions = predict_model(tuned_cb, data = test) sample['SalePrice'] = predictions['Label'] sample.to_csv('submission_house_price.csv',index=False) sample.head() ``` # thank you very much for checking my notebook!
github_jupyter
# 2. Coding Style ## 2.1 Whitespace In Python, whitespace is used to structure code. Whitespace is important, so you have to be careful with how you use it. I'll be showing some examples, but don't worry about the code just yet. I just want you to know the use of whitespaces. We'll tackle more about the codes you'll see below a little later. ##### Avoid whitespaces in the following situations: - Immediately inside the parentheses, brackets, or braces ``` # Yes: spam(ham[1], {eggs: 2}) # No: spam( ham[ 1 ], { eggs: 2 } ) ``` - Immediately before a comma, semicolon, or colon: ``` # Yes: if x == 4: print x, y; x, y = y, x # No: if x == 4 : print x , y ; x , y = y , x ``` - Immediately before the open parenthesis that starts an indexing or slicing. ``` # Yes: dct['key'] = lst[index] # No: dct ['key'] = lst [index] ``` - More than once space around an assignment(or other) operator to align it with another. ``` x = 1 y = 2 long_variable = 3 x = 1 y = 2 long_variable = 3 ``` For other rules, please click [here](https://docs.python.org/3/reference/lexical_analysis.html) ## 2.2 Indentation Python provides no braces to indicate blocks of code for class and function definitions or flow control. Python programs get structured through indentation, i.e. code blocks are defined by their indentation. For example: ``` if True: print("True") else: print("False") ``` <b> Note:</b> Use 4 spaces per indentation level. So what would happen if the indentation is incorrect? Run the code below to find out! ``` def food(): eggs = 12 return eggs print spam() ``` <b>```IndentationError: expected an indented block```</b> We will get this error everytime our indentation is off. Make sure to indent the codes with four spaces like this: ``` def food(): eggs = 12 return eggs print(food()) ``` ## 2.3 Comments Comments in Python are used to explain what the code does. ### 2.3.1 Single-line comments Single-line comments begin with the hash character (<b> ```#```</b> ) and are terminated by the end of line. Python is ignoring all text that comes after the # to the end of the line, they are not part of the command. For example: ``` # This is a single line comment ``` ### 2.3.2 Multi-line comments Comments spanning more than one line are achieved by inserting a multi-line string (with <b>```"""```</b> or <b>```'''```</b> as the delimiter one each end) that is not used in assignment or otherwise evaluated, but sits in between other statements. They are meant as documentation for anyone reading the code. ``` ''' This is a multi-line comment ''' """ This is also a multi-line comment """ ``` ## 2.4 Single-line Statements From the term itself, these are statements within a single line. For example: ``` my_list = ['item1', 'item2', 'item3'] ``` ## 2.5 Multi-line Statements Statements in Python typically end with a new line. Python does, however, allow the use of the line continuation character (\) to denote that the line should continue. For example: ``` total = item_one + \ item_two + \ item_three ``` Statements contained within the ```[ ]```, ```{ }```, or ```( )``` do not need to use the line continuation character. For example: ``` my_list = [ 'item1', 'item2', ] ``` ## 2.6 PEP 8 PEP 8 is the official style guide for Python. For the PEP 8 official documentation, please check this [link](https://www.python.org/dev/peps/pep-0008/)
github_jupyter
## Boxplot plots _______ tg: @misha_grol and anna.petrovskaia@skoltech.ru Boxplots for features based on DEM and NDVI ``` # Uncomment for Google colab # !pip install maxvolpy # !pip install clhs # !git clone https://github.com/EDSEL-skoltech/maxvol_sampling # %cd maxvol_sampling/ import csv import seaborn as sns import argparse import numpy as np import osgeo.gdal as gdal import os import pandas as pd import matplotlib.cm as cm import matplotlib.pyplot as plt from numpy import genfromtxt import gdal import xarray as xr import clhs as cl from scipy.spatial import ConvexHull, convex_hull_plot_2d from scipy.spatial import voronoi_plot_2d, Voronoi from scipy.spatial import distance from scipy.stats import entropy from scipy.special import kl_div from scipy.stats import ks_2samp from scipy.stats import wasserstein_distance %matplotlib inline from src.util import MaxVolSampling # Uncoment "Times New Roman" and "science" stule plt if you have it # plt.rcParams["font.family"] = "Times New Roman" plt.rcParams.update({'font.size': 16}) #use science style for plots # plt.style.use(['science', 'grid']) plt.rcParams['xtick.labelsize'] = 15 plt.rcParams['ytick.labelsize'] = 20 ``` ## Interpolation plots ``` import matplotlib.pyplot as plt from matplotlib import gridspec from tqdm.notebook import tqdm from scipy.stats import ks_2samp dict_for_dict_wasserstein = {} csv_file_to_process = './src/data_v0.csv' df_name = list(pd.read_csv(csv_file_to_process, sep=',').columns) soil_parameters = df_name path_to_inter_npy_files = './experiments/cLHS_10_000/Interpolation_data/' np.random.seed(42) units = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %','Soil moisture 80 cm, %','Mean crop yield, c/ha', 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa','Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C','Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C'] interpolation_files = sorted(os.listdir('./experiments/cLHS_10_000/Interpolation_data/')) path = './experiments/cLHS_million_steps' for index, file in enumerate(interpolation_files): list_to_test_zeros = [] print('Parameter:', file) df_for_plots = pd.DataFrame(columns=['Sampling', 'Points', 'Value']) dict_for_parameter = {'MAXVOL':{}, 'cLHS':{}, 'Random':{}} dict_for_wasserstein = {'MAXVOL':{}, 'cLHS':{}, 'Random':{}} dict_for_plots = {'MAXVOL':{}, 'cLHS':{}, 'Random':{}} number_of_points = [10,15,20,25,30] from itertools import compress list_of_cLHS_million_runs = sorted(os.listdir('./experiments/cLHS_million_steps')) selection = ['NDVI' in name for name in list_of_cLHS_million_runs] cLHS_points_files = list(compress(list_of_cLHS_million_runs, selection)) for num_points, csv_file in zip(number_of_points, cLHS_points_files): dict_for_parameter['cLHS'][num_points] = np.genfromtxt(os.path.join(path, csv_file),delimiter=',', dtype=int) SAR = MaxVolSampling() SAR.soil_feature = soil_parameters[index] SAR.num_of_points = num_points SAR.soil_data = pd.read_csv('./src/data_v0.csv', sep=',') SAR.path_to_file_with_indices = None SAR.wd = './DEM_files/' SAR.path_to_interpolation_file = os.path.join(path_to_inter_npy_files, file) _ =SAR.data_preparation(SAR.wd, data_m=3, dem_dir = None) SAR.original_soil_data(SAR.soil_feature) #data from interpolation interpolation_map = SAR.interpolation_array #Points selection by MAXVOL MAXVOL = interpolation_map[SAR.i_am_maxvol_function()] print for value in MAXVOL: df_for_plots.loc[len(df_for_plots)]=['MAXVOL', num_points, value] cLHS = interpolation_map[dict_for_parameter['cLHS'][num_points]] for value in cLHS: df_for_plots.loc[len(df_for_plots)]=['cLHS', num_points, value] RANDOM = interpolation_map[SAR.i_am_random()] for value in RANDOM: df_for_plots.loc[len(df_for_plots)]=['Random', num_points, value] #original distribution df_original = pd.DataFrame(data={'Points':[51]*len(SAR.original_data), 'Value':SAR.original_data}) fig = plt.figure(figsize=(18,18)) gs = gridspec.GridSpec(4, 5, wspace=.25) ax_1 = fig.add_subplot(gs[:,:4]) ax_2 = fig.add_subplot(gs[:,4]) sns.boxplot(ax = ax_1, x="Points", y="Value", hue="Sampling", palette=["#1F77B4", "#2CA02C", "#FF7F0E"], data=df_for_plots, width=0.8) sns.boxplot(ax = ax_2, x='Points', y="Value", palette=["#CCCCCC"], data=df_original, width=0.25) fig.set_figwidth(16) fig.set_figheight(7) ax_2.set_xticklabels([]) ax_2.set_ylabel('') ax_2.set_xlabel('') ax_2.grid(True) ax_1.set_xticklabels([]) ax_1.set_xlabel('') ax_1.set_ylabel(units[index], fontsize = 17) ax_1.axhline(np.quantile(SAR.original_data, 0.25), color='grey', linestyle='--',zorder=0) ax_1.axhline(np.quantile(SAR.original_data, 0.50), color='grey', linestyle='--',zorder=0) ax_1.axhline(np.quantile(SAR.original_data, 0.75), color='grey', linestyle='--',zorder=0) ax_1.get_shared_y_axes().join(ax_1, ax_2) ax_1.get_legend().remove() ax_1.grid(True) ax_2.set_yticklabels([]) # plt.savefig('../plots/agricultural_systems_plots/boxplots_interpolation/'+str(soil_parameters[index])+'boxplot.svg') # plt.savefig('../plots/agricultural_systems_plots/boxplots_interpolation/'+str(soil_parameters[index])+'boxplot.png', dpi=300) plt.show() # break ``` ## Plots of Wasserstein distance evolution ``` fig, ((ax0, ax1), (ax2, ax3), (ax4, ax5), (ax6, ax7),(ax8, ax9)) = plt.subplots(nrows=5, ncols=2, sharex=True,figsize=(18, 25)) names_for_plots = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %', 'Soil moisture 80 cm, %','Mean crop yield, c/ha', 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa', 'Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C', 'Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C'] path = './experiments/cLHS_10_000/exp_fem_poins/npy_files/' files_with_points = os.listdir(path) range_files_allocation=[] for file in files_with_points: range_files_allocation.append(np.load(os.path.join(path,file), allow_pickle=True)[None]) res = np.load(os.path.join(path,file), allow_pickle=True) dict_for_indices = {'MAXVOL':[], 'cLHS':[], 'Random':[]} from collections import ChainMap for sampling in [*range_files_allocation[0][0].keys()]: loc_list = [dict(loc_dict[0][sampling]) for loc_dict in range_files_allocation] dict_for_indices[sampling] = dict(ChainMap(*loc_list)) n = 0 number_of_points = range(7,31) csv_file_to_process = './src/data_v0.csv' for row in ((ax0, ax1), (ax2, ax3), (ax4, ax5), (ax6, ax7),(ax8, ax9)): for col in row: # COMPUTE WASSERSTEIN DISTANCE df_name = list(pd.read_csv(csv_file_to_process, sep=',').columns) soil_parameters = df_name path_to_inter_npy_files = './experiments/cLHS_10_000/Interpolation_data/' np.random.seed(42) units = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %','Soil moisture 80 cm, %','Mean crop yield, c/ha', 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa','Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C','Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C'] interpolation_files = sorted(os.listdir('./experiments/cLHS_10_000/Interpolation_data/')) print('Parameter:', interpolation_files[n]) dict_for_plots = {'MAXVOL':{}, 'cLHS':{}, 'Random':{}} dict_for_new_maxvol = {'MAXVOL_NEW': {}} for points in number_of_points: SAR = MaxVolSampling() SAR.soil_feature = soil_parameters[n] SAR.num_of_points = points SAR.soil_data = pd.read_csv(csv_file_to_process, sep=',') SAR.path_to_file_with_indices = None SAR.wd = './DEM_files//' SAR.path_to_interpolation_file = os.path.join(path_to_inter_npy_files, interpolation_files[n]) _ =SAR.data_preparation(SAR.wd, data_m=3, dem_dir = None) SAR.original_soil_data(SAR.soil_feature) interpolation_map = SAR.interpolation_array[::-1] MAXVOL_ = interpolation_map[SAR.i_am_maxvol_function()] # List to iterate over 100 realization of cLHS and Random cLHS_ = [interpolation_map[dict_for_indices['cLHS'][points][i]] for i in range(100)] Random_ = [interpolation_map[dict_for_indices['Random'][points][i]] for i in range(100)] dict_for_plots['MAXVOL'][points] = wasserstein_distance(SAR.original_data, MAXVOL_) dict_for_plots['cLHS'][points] = [wasserstein_distance(SAR.original_data, mdt) for mdt in cLHS_] dict_for_plots['Random'][points] = [wasserstein_distance(SAR.original_data, mdt) for mdt in Random_] quantile_lower_random = np.array([np.quantile(dict_for_plots['Random'][i], .10) for i in number_of_points]) quantile_upper_random = np.array([np.quantile(dict_for_plots['Random'][i], .90) for i in number_of_points]) median_random = np.array([np.median(dict_for_plots['Random'][i]) for i in number_of_points]) quantile_lower_cLHS = np.array([np.quantile(dict_for_plots['cLHS'][i], .10) for i in number_of_points]) quantile_upper_cLHS = np.array([np.quantile(dict_for_plots['cLHS'][i], .90) for i in number_of_points]) median_cLHS = np.array([np.median(dict_for_plots['cLHS'][i]) for i in number_of_points]) col.plot(number_of_points, [*dict_for_plots['MAXVOL'].values()], '-.',label='Maxvol',linewidth=4,markersize=10 ) col.plot(number_of_points, median_random, label='Random median',linewidth=3,markersize=10 ) col.plot(number_of_points, median_cLHS,'--',label='cLHS median',linewidth=3,markersize=14) col.fill_between(number_of_points, quantile_lower_random, quantile_upper_random , alpha=0.1, color='orange', label='CI Random') col.fill_between(number_of_points, quantile_lower_cLHS, quantile_upper_cLHS , alpha=0.1, color='green', label='CI cLHS') col.set_xlim(min(number_of_points), max(number_of_points)) # col.set_xticks(number_of_points) col.set_title(names_for_plots[n]) col.grid(True) col.set(ylabel="Wasserstein distance") if n==8 or n==9: col.set(xlabel="Number of points for sampling", ylabel="Wasserstein distance") # plt.show() n+=1 # plt.legend() # plt.savefig('../plots/agricultural_systems_plots/plots_with_evolution_of_wassersterin/wasserstein_disctance_IQR.png', dpi=300) # plt.savefig('../plots/agricultural_systems_plots/plots_with_evolution_of_wassersterin/nwasserstein_disctance_IQR.svg') ```
github_jupyter
Doc title: **Amazon Advertising Targeting Report** Article notes: Data came from 'Reports/Advertising Reports/Sponsored Products/Targeting Report' @Amazon Seller Central. 文章备注:亚马逊后台广告目标投放报告分析 Last modified date: 2019-12-05 16:33:04 ``` # 引入pandas数据分析模块 import pandas as pd # 数据范例:美国站,月度数据 workdf = pd.read_excel('data/amz_ads_target_us_201911.xlsx', usecols=['广告活动名称', '广告组名称', '匹配类型', '投放', '展现量', '点击量', '花费', '7天总订单数(#)', '7天总销售额(¥)', '7天总销售量(#)']) ``` # 广告组数据排序(以销量为取值标准) ``` # Manipulating data and column names. impr_df = workdf.groupby(['广告活动名称', '广告组名称']).sum().sort_values(by=['7天总销售量(#)'], ascending=False).head(20) impr_df['总销售额'] = impr_df['7天总销售额(¥)'] impr_df['总销售量'] = impr_df['7天总销售量(#)'] impr_df = impr_df[['展现量', '点击量', '花费', '总销售额', '总销售量']] impr_df['点击率'] = round((impr_df['点击量'] / impr_df['展现量'] * 100), 2) impr_df['卖出1件商品的平均花费'] = round(impr_df['花费'] / impr_df['总销售量'], 2) impr_df.sort_values(by=['卖出1件商品的平均花费']) ``` 此表解决的问题为: - 找出销量最高的广告组。 - 找出最具性价比的广告组。(卖出1件商品花费越小,则性价比越高) # 投放数据排序(以展现量为取值标准) ``` # Manipulating data and column names. target_df = workdf.groupby(['广告组名称', '匹配类型', '投放']).sum().sort_values(by=['展现量'], ascending=False).head(20) target_df['总销售额'] = target_df['7天总销售额(¥)'] target_df['总销售量'] = target_df['7天总销售量(#)'] target_df = target_df[['展现量', '点击量', '花费', '总销售额', '总销售量']] target_df['性价比(总销售额/花费)'] = round(target_df['总销售额'] / target_df['花费'], 2) target_df.sort_values(by=['性价比(总销售额/花费)'], ascending=False) ``` 此表解决的问题为: - 找出最具性价比的投放项目。 # 性价比最高的Top 10广告组与全部广告组数据对比 *注:在某一项目组每投入1美元带来的销售额回报越高,则其性价比越高。* ## Top 10广告组 ``` # Top 10 Ads. top10_df = workdf.groupby(['广告组名称']).sum().sort_values(by=['7天总销售量(#)'], ascending=False) top10_df['总销售额'] = top10_df['7天总销售额(¥)'] top10_df['性价比'] = round(top10_df['总销售额'] / top10_df['花费'], 2) top10_df = top10_df[['展现量', '点击量', '花费', '总销售额', '性价比']].sort_values(by=['性价比'], ascending=False).head(10) print('Top 10 广告组\n') top10_df ``` ## 数据对比 ``` total_ds = workdf.sum() top10_ds = top10_df.sum() print('结论:') print('\nTop 10广告组花费为:{0:.2f}美元; 全部广告组花费为:{1:.2f}美元; Top 10广告组占比:{2:.2f}%'.format(top10_ds['花费'], total_ds['花费'], top10_ds['花费'] / total_ds['花费'] * 100)) print('\nTop 10广告组共销售:{0:.2f}美元; 全部广告组共销售:{1:.2f}美元; Top 10广告组占比:{2:.2f}%'.format(top10_ds['总销售额'], total_ds['7天总销售额(¥)'], top10_ds['总销售额'] / total_ds['7天总销售额(¥)'] * 100)) ``` # 产品广告活动订单率情况 ``` target_df = workdf.groupby(['广告活动名称']).sum().head(20) target_df['总订单量'] = target_df['7天总订单数(#)'] target_df['订单率'] = round(target_df['总订单量'] / target_df['点击量'] * 100, 2) target_df[['展现量', '点击量', '总订单量', '订单率']].sort_values(by=['订单率'], ascending=False).fillna(0) ``` *注:订单率 = 订单量 / 点击量* 此表解决的问题为: - 了解产品广告活动的订单率情况。品牌广告的相关情况可见 **[amz_ads_brand.ipynb](amz_ads_brand.ipynb)** 的相关部分说明。 # 产品广告组订单率情况 ``` target_df = workdf.groupby(['广告组名称']).sum().head(20) target_df['总订单量'] = target_df['7天总订单数(#)'] target_df['订单率'] = round(target_df['总订单量'] / target_df['点击量'] * 100, 2) target_df[['展现量', '点击量', '总订单量', '订单率']].sort_values(by=['订单率'], ascending=False).fillna(0) ``` *注:订单率 = 订单量 / 点击量* 此表解决的问题为: - 了解产品广告组的订单率情况。 **[返回目录](amz_ads_catalog.ipynb)**
github_jupyter
``` from tensorflow.python.keras import backend as K from tensorflow.python.keras.applications.resnet50 import ResNet50, preprocess_input from tensorflow.python.keras.preprocessing import image from tensorflow.python.keras.layers import Conv2D, GlobalAveragePooling2D, Input, Dropout, Dense from tensorflow.python.keras.utils import to_categorical from tensorflow.python.keras.models import Model from tensorflow.python.keras.datasets import cifar10 from tensorflow.python.keras.callbacks import Callback, TensorBoard from tensorflow.python.keras.backend import set_session from tensorflow.python.keras.models import load_model import tensorflow as tf from sklearn.model_selection import train_test_split from tqdm import tqdm from collections import defaultdict from matplotlib.pyplot import imshow from PIL import Image import datetime import numpy as np import os, glob, io import base64 %matplotlib inline sess = tf.Session() graph = tf.get_default_graph() set_session(sess) batch_size = 32 test_train_split = 0.2 max_epoch = 1 dropout_prob = 0.3 shape = (224, 224) train_size_per_label = 500 test_size_per_label = 100 test_train_split=0.2 image_path = "/Users/adammenges/Development/notebooks/basicClassifier/houses_120px_classes" def resize(arr, shape): return np.array(Image.fromarray(arr).resize(shape)) def decode_img(msg): # msg = msg[msg.find(b"<plain_txt_msg:img>")+len(b"<plain_txt_msg:img>"): # msg.find(b"<!plain_txt_msg>")] msg = base64.b64decode(msg) buf = io.BytesIO(msg) img = Image.open(buf) return img def preprocess(arr, shape=(224, 224)): arr = np.array([resize(arr[i], shape) for i in range(0, len(arr))]).astype('float32') arr = preprocess_input(arr) return arr def get_local_images(): classes = os.listdir(image_path) input_arr = [] target_labels = [] for class_idx in range(len(classes)): paths = glob.glob(os.path.join(image_path, classes[class_idx]) + "/*.png") for img_path in tqdm(paths, desc=f'Processing label {classes[class_idx]}: '): img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) # x = np.expand_dims(x, axis=0) x = preprocess_input(x) target_labels.append(class_idx) input_arr.append(x) X_train, X_test, y_train, y_test = train_test_split(input_arr, target_labels, test_size=test_train_split) X_train = np.array(X_train) X_test = np.array(X_test) y_train = np.array(y_train) y_test = np.array(y_test) return X_train, X_test, y_train, y_test, classes def get_cifar10(): (input_train, out_train), (input_test, out_test) = cifar10.load_data() return input_train, input_test, out_train, out_test, range(10) def get_resnet50(shape=(224, 224, 3)): return ResNet50(weights='imagenet', include_top=False, input_shape=shape) def restrain_data(input_train, out_train, input_test, out_test, num_class, num_train, num_test, shape=(224, 224)): train_dict = defaultdict(list) test_dict = defaultdict(list) [train_dict[out_train[idx][0]].append(input_train[idx]) for idx in range(input_train.shape[0])] [test_dict[out_test[idx][0]].append(input_test[idx]) for idx in range(input_test.shape[0])] restrain_class = range(num_class) restrain_train = [[train_dict[i][idx], i] for idx in range(num_train) for i in restrain_class] restrain_test = [[test_dict[i][idx], i] for idx in range(num_test) for i in restrain_class] rand_train_idx = np.random.choice(num_train * num_class, num_train * num_class) rand_test_idx = np.random.choice(num_test * num_class, num_test * num_class) i_train = np.array([restrain_train[idx][0] for idx in rand_train_idx]) o_train = np.array([[restrain_train[idx][1]] for idx in rand_train_idx]) i_test = np.array([restrain_test[idx][0] for idx in rand_test_idx]) o_test = np.array([[restrain_test[idx][1]] for idx in rand_test_idx]) i_train = preprocess(i_train, shape=shape) i_test = preprocess(i_test, shape=shape) return i_train, i_test, o_train, o_test, restrain_class input_train, input_test, out_train, out_test, classes = get_local_images() input_test.shape x = get_cifar10() x[0].shape # input_train, input_test, out_train, out_test, classes = restrain_data( # input_train, # out_train, # input_test, # out_test, # len(classes), # train_size_per_label, # test_size_per_label) # input_train = preprocess(input_train, shape=shape) # input_test = preprocess(input_test, shape=shape) total_train_steps = len(input_train) // batch_size out_train = to_categorical(out_train, len(classes)) out_test = to_categorical(out_test, len(classes)) def batch_generator(x, y, batch_size=32): while True: for step in range(len(x) // batch_size): yield x[step*batch_size:(step+1)*batch_size, ...], y[step*batch_size:(step+1)*batch_size, ...] class RecordAccuracy(Callback): def on_epoch_begin(self, epoch, logs=None): print(f'Running epoch {epoch}. Total {total_train_steps} batches') def on_batch_end(self, batch, logs=None): loss = logs['loss'] if not batch % 10: print(f'Running batch {batch}: train loss - {loss}') def on_epoch_end(self, epoch, logs=None): loss = logs["loss"] val_acc = logs["val_acc"] print(f'Epoch {epoch}: train loss - {loss}. test accuracy - {val_acc}') def freeze_layers(model, layer_num): for layer in model.layers[:layer_num]: layer.trainable = False def train_layers(model, layer_num): for layer in model.layers[layer_num:]: layer.trainable = True resnet50 = get_resnet50(shape=shape + (3,)) bottleneck_train_features = resnet50.predict(input_train) bottleneck_test_features = resnet50.predict(input_test) in_layer = Input(shape=(bottleneck_train_features.shape[1:])) x = Conv2D(filters=100, kernel_size=2)(in_layer) x = Dropout(0.4)(x) x = GlobalAveragePooling2D()(x) x = Dropout(0.3)(x) predictions = Dense(len(classes), activation='softmax')(x) model = Model(inputs=in_layer, outputs=predictions) model.summary() ``` ## Train the model! And now it's time to train the model! ``` model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) model.fit_generator(batch_generator(bottleneck_train_features, out_train), steps_per_epoch=len(bottleneck_train_features) // batch_size, validation_data=(bottleneck_test_features, out_test), verbose=2, epochs=max_epoch, callbacks=[RecordAccuracy(), TensorBoard()]) ``` # Server Okay now let's host a server for grasshopper ``` print(model.predict(resnet50.predict(np.array([input_test[0]])))) print(classes) print('----') print(input_test[0].shape) print(list(zip(model.predict(resnet50.predict(np.array([input_test[0]])))[0], classes))) out_test[0] from flask import Flask from flask import request app = Flask(__name__) @app.route('/predict', methods=['POST']) #GET requests will be blocked def hello_world(): req_data = request.get_json() img = req_data['image'] img = decode_img(img).resize((224,224)).convert('RGB') img = image.img_to_array(img) x = preprocess_input(img) print('----') print(x.shape) print('----') global sess global graph with graph.as_default(): set_session(sess) pred = model.predict(resnet50.predict(np.array([x])))[0] pred = [str(f) for f in pred] prediction = list(zip(pred, classes)) print('prediction') print(prediction) return { 'prediction': prediction } app.run(debug=True, use_reloader=False) ```
github_jupyter
``` import csv import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split RANDOM_SEED = 42 ``` # 各パス指定 ``` dataset = 'model/point_history_classifier/point_history_allkeypoints.csv' model_save_path = 'model/point_history_classifier/point_history_classifier_allkeypoints.hdf5' ``` # 分類数設定 ``` NUM_CLASSES = 3 # point_history_classifier_label_allkeypoints の分類数 ``` # 入力長 ``` TIME_STEPS = 16 # 時系列 NUM_KEYPOINTS = 21 # 全点数 DIMENSION = 2 * NUM_KEYPOINTS # [x,y]2要素 * 全点数 ``` # 学習データ読み込み ``` # "座標"のデータセット X_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32', usecols=list(range(1, (TIME_STEPS * DIMENSION) + 1))) # usecols=: どの列(縦列)を読み取るのか,ここでは1列目のindex列を抜かして入力長分を指定 # "index番号"のデータセット usecols=(0)は最初の縦列指定 y_dataset = np.loadtxt(dataset, delimiter=',', dtype='int32', usecols=(0)) X_train, X_test, y_train, y_test = train_test_split( X_dataset, y_dataset, train_size=0.75, random_state=RANDOM_SEED) # Xはデータの実体,yはindex番号 #これでcsvデータのうち4分の3が練習用trainにX(実体)とy(番号)を振り分けられながら入り、 # 本番testにも残り4分の1が同様に入った -> trainには 16 * 2 * 21 個の要素持ったデータの列がある ``` # モデル構築 ``` use_lstm = False model = None if use_lstm: model = tf.keras.models.Sequential([ tf.keras.layers.InputLayer(input_shape=(TIME_STEPS * DIMENSION, )), tf.keras.layers.Reshape((TIME_STEPS, DIMENSION), input_shape=(TIME_STEPS * DIMENSION, )), tf.keras.layers.Dropout(0.2), tf.keras.layers.LSTM(16, input_shape=[TIME_STEPS, DIMENSION]), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(NUM_CLASSES, activation='softmax') ]) else: model = tf.keras.models.Sequential([ tf.keras.layers.InputLayer(input_shape=(TIME_STEPS * DIMENSION, )), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(24, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(NUM_CLASSES, activation='softmax') ]) model.summary() # tf.keras.utils.plot_model(model, show_shapes=True) # モデルチェックポイントのコールバック cp_callback = tf.keras.callbacks.ModelCheckpoint( model_save_path, verbose=1, save_weights_only=False) # 早期打ち切り用コールバック es_callback = tf.keras.callbacks.EarlyStopping(patience=20, verbose=1) # エポック終了時にモデルを保存するModelCheckpointと、 # 改善が見られなくなった時点で訓練を終了するEarlyStoppingを指定している。 # モデルコンパイル model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) ``` # モデル訓練 ``` model.fit( X_train, y_train, epochs=1000, batch_size=128, validation_data=(X_test, y_test), callbacks=[cp_callback, es_callback] ) # 保存したモデルのロード model = tf.keras.models.load_model(model_save_path) # 推論テスト predict_result = model.predict(np.array([X_test[0]])) print(np.squeeze(predict_result)) print(np.argmax(np.squeeze(predict_result))) ``` # 混同行列 ``` import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, classification_report def print_confusion_matrix(y_true, y_pred, report=True): labels = sorted(list(set(y_true))) cmx_data = confusion_matrix(y_true, y_pred, labels=labels) df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels) fig, ax = plt.subplots(figsize=(7, 6)) sns.heatmap(df_cmx, annot=True, fmt='g' ,square=False) ax.set_ylim(len(set(y_true)), 0) plt.show() if report: print('Classification Report') print(classification_report(y_test, y_pred)) Y_pred = model.predict(X_test) y_pred = np.argmax(Y_pred, axis=1) print_confusion_matrix(y_test, y_pred) ``` <h3>ONNXモデルへの変換(追加Cell)</> <h5>save_modelの2つ目の引数がモデルのファイル名になります</> ``` import keras2onnx # convert model to ONNX onnx_model = keras2onnx.convert_keras(model, # keras model name="example", # the converted ONNX model internal name target_opset=9, # the ONNX version to export the model to channel_first_inputs=None # which inputs to transpose from NHWC to NCHW ) keras2onnx.save_model(onnx_model, "example_h1.onnx") # save as "example_h1.onnx" ``` # Tensorflow-Lite用のモデルへ変換 ``` # 推論専用のモデルとして保存 model.save(model_save_path, include_optimizer=False) model = tf.keras.models.load_model(model_save_path) tflite_save_path = 'model/point_history_classifier/point_history_classifier_allkeypoints.tflite' # モデルを変換(量子化 converter = tf.lite.TFLiteConverter.from_keras_model(model) # converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_quantized_model = converter.convert() open(tflite_save_path, 'wb').write(tflite_quantized_model) ``` # 推論テスト ``` interpreter = tf.lite.Interpreter(model_path=tflite_save_path) interpreter.allocate_tensors() # 入出力テンソルを取得 input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() print(input_details) interpreter.set_tensor(input_details[0]['index'], np.array([X_test[0]])) %%time # 推論実施 interpreter.invoke() tflite_results = interpreter.get_tensor(output_details[0]['index']) print(np.squeeze(tflite_results)) print(np.argmax(np.squeeze(tflite_results))) ```
github_jupyter
# Shor's algorithm, fully classical implementation ``` %matplotlib inline import random import math import itertools def period_finding_classical(a,N): # This is an inefficient classical algorithm to find the period of f(x)=a^x (mod N) # f(0) = a**0 (mod N) = 1, so we find the first x greater than 0 for which f(x) is also 1 for r in itertools.count(start=1): if (a**r) % N == 1: return r def shors_algorithm_classical(N): assert(N>0) assert(int(N)==N) while True: a=random.randint(0,N-1) g=math.gcd(a,N) if g!=1 or N==1: first_factor=g second_factor=int(N/g) return first_factor,second_factor else: r=period_finding_classical(a,N) if r % 2 != 0: continue elif a**(int(r/2)) % N == -1 % N: continue else: first_factor=math.gcd(a**int(r/2)+1,N) second_factor=math.gcd(a**int(r/2)-1,N) if first_factor==N or second_factor==N: continue return first_factor,second_factor # Testing it out. Note because of the probabilistic nature of the algorithm, different factors and different ordering is possible shors_algorithm_classical(15) shors_algorithm_classical(91) ``` # Shor's algorithm, working on a quantum implementation ## The following code will help give intuition for how to design a quantum circuit to do modular multiplication ``` def U_a_modN(a,N,binary=False): """ a and N are decimal This algorithm returns U_a where: U_a is a modular multiplication operator map from |x> to |ax mod N> If binary is set to True, the mapping is given in binary instead of in decimal notation. """ res={} l=[] for i in range(1,N): l+=[a*i%N] res=set() for i in range(1,N): mp=[i] end=i nxt=i-1 while l[nxt]!=end: mp+=[l[nxt]] nxt=l[nxt]-1 res.add(tuple(mp)) final_res=[] for item in res: dup=False for final_item in final_res: if set(item) == set(final_item): dup=True if not dup: final_res+=[item] if not binary: return final_res else: final_res_bin=[] for mapping in final_res: final_res_bin+=[tuple(['{0:06b}'.format(decimal) for decimal in mapping])] return final_res_bin print(U_a_modN(8,35)) print(U_a_modN(8,35,binary=True)) ``` # This code implements modular multiplication by 2 mod 15 ``` import qiskit import matplotlib from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, QISKitError from qiskit.tools.visualization import circuit_drawer from qiskit.extensions.standard import cx, cswap from qiskit import Aer from qiskit import IBMQ # Authenticate an account and add for use during this session. Replace string # argument with your private token. IBMQ.enable_account("INSERT_YOUR_API_TOKEN_HERE") def mult_2mod15_quantum(qr,qc): # Swap 0th qubit and 3rd qubit qc.cx(qr[0],qr[3]) qc.cx(qr[3],qr[0]) qc.cx(qr[0],qr[3]) # Swap 0th qubit and 1st qubit qc.cx(qr[1],qr[0]) qc.cx(qr[0],qr[1]) qc.cx(qr[1],qr[0]) # Swap 1st qubit and 2nd qubit qc.cx(qr[1],qr[2]) qc.cx(qr[2],qr[1]) qc.cx(qr[1],qr[2]) def mult_2mod15_quantum_test(x): qr = QuantumRegister(4) cr = ClassicalRegister(4) qc = QuantumCircuit(qr,cr) # input x_bin='{0:04b}'.format(x) for i,b in enumerate(x_bin): if int(b): qc.x(qr[i]) # run circuit mult_2mod15_quantum(qr,qc) # measure results for i in range(4): qc.measure(qr[i],cr[i]) import time from qiskit.tools.visualization import plot_histogram backend=Aer.get_backend('qasm_simulator') shots=50 job_exp = qiskit.execute(qc, backend=backend) result = job_exp.result() final=result.get_counts(qc) result_in_order=list(final.keys())[0] dec=0 for i,b in enumerate(result_in_order): if int(b): dec+=2**i return (x,dec) def mult_2mod15_classical_test(x): return (x,2*x%15) # testing! for i in range(1,15): quantum=mult_2mod15_quantum_test(i) classical=mult_2mod15_classical_test(i) if quantum!=classical: print(quantum,classical) ``` ## This code makes the previous an operation controlled by a control qubit ``` def controlled_mult_2mod15_quantum(qr,qc,control_qubit): """ Controlled quantum circuit for multiplication by 2 mod 15. Note: control qubit should an index greater than 3, and qubits 0,1,2,3 are reserved for circuit operations """ # Swap 0th qubit and 3rd qubit qc.cswap(control_qubit,qr[0],qr[3]) # Swap 0th qubit and 1st qubit qc.cswap(control_qubit,qr[1],qr[0]) # Swap 1st qubit and 2nd qubit qc.cswap(control_qubit,qr[1],qr[2]) ``` # This code performas the entire Shor's algorithm subroutine for multiplication by 2 mod 15 ``` import math def shors_subroutine_period_2mod15(qr,qc,cr): qc.x(qr[0]) qc.h(qr[4]) qc.h(qr[4]) qc.measure(qr[4],cr[0]) qc.h(qr[5]) qc.cx(qr[5],qr[0]) qc.cx(qr[5],qr[2]) if cr[0] == 1: qc.u1(math.pi/2,qr[4]) #pi/2 is 90 degrees in radians qc.h(qr[5]) qc.measure(qr[5],cr[1]) qc.h(qr[6]) controlled_mult_2mod15_quantum(qr,qc,qr[6]) if cr[1] == 1: qc.u1(math.pi/2,qr[6]) # pi/2 is 90 degrees in radians if cr[0] == 1: qc.u1(math.pi/4,qr[6]) #pi/4 is 45 degrees in radians qc.h(qr[6]) qc.measure(qr[6],cr[2]) ``` # This code will help us read out the results from our quantum Shor's subroutine. First, implementing the code to compute the period from the output of the quantum computation: ``` # see https://arxiv.org/pdf/quant-ph/0010034.pdf for more details (convergence relations on page 11) import math def continued_fraction(xi,max_steps=100): # stop_after is cutoff for algorithm, for debugging """ This function computes the continued fraction expansion of input xi per the recurrance relations on page 11 of https://arxiv.org/pdf/quant-ph/0010034.pdf """ #a and xi initial all_as=[] all_xis=[] a_0=math.floor(xi) xi_0=xi-a_0 all_as+=[a_0] all_xis+=[xi_0] # p and q initial all_ps=[] all_qs=[] p_0=all_as[0] q_0=1 all_ps+=[p_0] all_qs+=[q_0] xi_n=xi_0 while not numpy.isclose(xi_n,0,atol=1e-7): if len(all_as)>=max_steps: print("Warning: algorithm did not converge within max_steps %d steps, try increasing max_steps"%max_steps) break # computing a and xi a_nplus1=math.floor(1/xi_n) xi_nplus1=1/xi_n-a_nplus1 all_as+=[a_nplus1] all_xis+=[xi_nplus1] xi_n=xi_nplus1 # computing p and q n=len(all_as)-1 if n==1: p_1=all_as[1]*all_as[0]+1 q_1=all_as[1] all_ps+=[p_1] all_qs+=[q_1] else: p_n=all_as[n]*all_ps[n-1]+all_ps[n-2] q_n=all_as[n]*all_qs[n-1]+all_qs[n-2] all_ps+=[p_n] all_qs+=[q_n] return all_ps,all_qs,all_as,all_xis import numpy def test_continued_fraction(): """ Testing the continued fraction see https://arxiv.org/pdf/quant-ph/0010034.pdf, step 2.5 chart page 20 NOTE: I believe there is a mistake in this chart at the last row, and that n should range as in my code below their chart is missing one line. Please contact me if you find differently! """ xi=13453/16384 all_ps,all_qs,all_as,all_xis=continued_fraction(xi) ## step 2.5 chart in https://arxiv.org/pdf/quant-ph/0010034.pdf page 20 #n_13453_16384=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] #a_n_13453_16384=[0,1,4,1,1,2,3,1,1,3,1,1,1,1,3] #p_n_13453_16384=[0,1,4,5,9,23,78,101,179,638,817,1455,2272,3727,13453] #q_n_13453_16384=[1,1,5,6,11,28,95,123,218,777,995,1772,2767,4539,16384] ## what I find instead: n_13453_16384=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] a_n_13453_16384=[0,1,4,1,1,2,3,1,1,3,1,1,1,1,2,1] p_n_13453_16384=[0,1,4,5,9,23,78,101,179,638,817,1455,2272,3727,9726,13453] q_n_13453_16384=[1,1,5,6,11,28,95,123,218,777,995,1772,2767,4539,11845,16384] for tup in [("ns",range(len(all_ps)),range(len(n_13453_16384))), ("as",all_as,a_n_13453_16384), ("ps",all_ps,p_n_13453_16384), ("qs",all_qs,q_n_13453_16384), ]: if not numpy.array_equal(tup[1],tup[2]): print(tup[0]) print("act:",tup[1]) print("exp:",tup[2]) print() from IPython.display import display, Math def pretty_print_continued_fraction(results,raw_latex=False): all_ps,all_qs,all_as,all_xis=results for i,vals in enumerate(zip(all_ps,all_qs,all_as,all_xis)): p,q,a,xi=vals if raw_latex: print(r'\frac{p_%d}{q_%d}=\frac{%d}{%d}'%(i,i,p,q)) else: display(Math(r'$\frac{p_%d}{q_%d}=\frac{%d}{%d}$'%(i,i,p,q))) test_continued_fraction() #pretty_print_continued_fraction(continued_fraction(5/8),raw_latex=True) #pretty_print_continued_fraction(continued_fraction(0/8)) pretty_print_continued_fraction(continued_fraction(6/8)) ``` # Next we will integrate the check for whether we have found the period into the continued fraction code, so that we can stop computing the continued fraction as soon as we've found the period ``` import math def period_from_quantum_measurement(quantum_measurement, number_qubits, a_shor, N_shor, max_steps=100): # stop_after is cutoff for algorithm, for debugging """ This function computes the continued fraction expansion of input xi per the recurrance relations on page 11 of https://arxiv.org/pdf/quant-ph/0010034.pdf a_shor is the random number chosen as part of Shor's algorithm N_shor is the number Shor's algorithm is trying to factor """ xi=quantum_measurement/2**number_qubits #a and xi initial all_as=[] all_xis=[] a_0=math.floor(xi) xi_0=xi-a_0 all_as+=[a_0] all_xis+=[xi_0] # p and q initial all_ps=[] all_qs=[] p_0=all_as[0] q_0=1 all_ps+=[p_0] all_qs+=[q_0] xi_n=xi_0 while not numpy.isclose(xi_n,0,atol=1e-7): if len(all_as)>=max_steps: print("Warning: algorithm did not converge within max_steps %d steps, try increasing max_steps"%max_steps) break # computing a and xi a_nplus1=math.floor(1/xi_n) xi_nplus1=1/xi_n-a_nplus1 all_as+=[a_nplus1] all_xis+=[xi_nplus1] xi_n=xi_nplus1 # computing p and q n=len(all_as)-1 if n==1: p_1=all_as[1]*all_as[0]+1 q_1=all_as[1] all_ps+=[p_1] all_qs+=[q_1] else: p_n=all_as[n]*all_ps[n-1]+all_ps[n-2] q_n=all_as[n]*all_qs[n-1]+all_qs[n-2] all_ps+=[p_n] all_qs+=[q_n] # check the q to see if it is our answer (note with this we skip the first q, as a trivial case) if a_shor**all_qs[-1]%N_shor == 1 % N_shor: return all_qs[-1] period_from_quantum_measurement(13453,14,3,91) #should return, for example 6 per page 20 of https://arxiv.org/pdf/quant-ph/0010034.pdf # Testing this: import qiskit from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister def binary_string_to_decimal(s): dec=0 for i in s[::-1]: if int(i): dec+=2**int(i) return dec def run_shors_subroutine_period2_mod15(): qr = QuantumRegister(7) cr = ClassicalRegister(3) qc = QuantumCircuit(qr,cr) # initialize x to be a superposition of all possible r quibit values #for i in range(4): # qc.h(qr[i]) # run circuit (which includes measurement steps) shors_subroutine_period_2mod15(qr,qc,cr) import time from qiskit.tools.visualization import plot_histogram backend=Aer.get_backend('qasm_simulator') job_exp = qiskit.execute(qc, backend=backend,shots=1) result = job_exp.result() final=result.get_counts(qc) # convert final result to decimal measurement=binary_string_to_decimal(list(final.keys())[0]) period_r=period_from_quantum_measurement(measurement,3,2,15) return period_r print(run_shors_subroutine_period2_mod15()) ``` # The last thing to do will be to implement the full Shor's algorithm and check if the r is correct by plugging it in, getting factors and checking results. If not, rerun the algorithm. ``` def period_finding_quantum(a,N): # for the sake of example we will not implement this algorithm in full generality # rather, we will create an example with one specific a and one specific N # extension work could be done to impl if a==2 and N==15: return run_shors_subroutine_period2_mod15() else: raise Exception("Not implemented for N=%d, a=%d" % (N,a)) def shors_algorithm_quantum(N,fixed_a=None): assert(N>0) assert(int(N)==N) while True: if not fixed_a: a=random.randint(0,N-1) else: a=fixed_a g=math.gcd(a,N) if g!=1 or N==1: first_factor=g second_factor=int(N/g) return first_factor,second_factor else: r=period_finding_quantum(a,N) if not r: continue if r % 2 != 0: continue elif a**(int(r/2)) % N == -1 % N: continue else: first_factor=math.gcd(a**int(r/2)+1,N) second_factor=math.gcd(a**int(r/2)-1,N) if first_factor==N or second_factor==N: continue if first_factor*second_factor!=N: # checking our work continue return first_factor,second_factor # Here's our final result shors_algorithm_quantum(15,fixed_a=2) # Now trying it out to see how the algorithm would function if we let it choose a given random a: for a in range(15): # Here's the result for a given a: try: print("randomly chosen a=%d would result in %s"%(a,shors_algorithm_quantum(15,fixed_a=a))) except: print("FINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=%d at this stage"%a) ```
github_jupyter
``` 16 import random import math import numpy as np import pandas as pd import matplotlib.pyplot as plt from timeit import Timer my_list = list(range(10**6)) my_array = np.array(my_list) def for_add(): return [item + 1 for item in my_list] def vec_add(): return my_array + 1 print("For loop addition :") print(min(Timer(for_add).repeat(10, 10))) print("Vectorized addition :") print(min(Timer(vec_add).repeat(10,10))) def for_mul(): return [item * 2 for item in my_list] def vec_mul(): return my_array * 2 print('For-loop multiplication:') print(min(Timer(for_mul).repeat(10, 10))) print('Vectorized multiplication:') print(min(Timer(vec_mul).repeat(10, 10))) import math def for_sqrt(): return [math.sqrt(item) for item in my_list] def vec_sqrt(): return np.sqrt(my_array) print('For-loop square root:') print(min(Timer(for_sqrt).repeat(10, 10))) print('Vectorized square root:') print(min(Timer(vec_sqrt).repeat(10, 10))) sample = np.random.normal() sample sample = np.random.normal(loc=100, scale=10) sample sample = np.random.normal(loc=100, scale=10,size=(2,3)) sample samples = np.random.poisson(lam=10, size=(2,2)) samples np.random.randint(low=0, high=5, size=(2,5)) np.random.choice([1,3,4,-6], size=(2, 2)) a = [1,2,3,4] for _ in range(3): np.random.shuffle(a) print(a) import random random.seed(0) import pandas as pd my_dict = {'col1': [1, 2], 'col2': np.array([3, 4]),'col3': [5, 6]} df = pd.DataFrame(my_dict) df my_array = np.array([[1, 3, 5], [2, 4, 6]]) alt_df = pd.DataFrame(my_array, columns=['col1', 'col2', 'col3']) alt_df df.loc[1] df.loc[0] df.loc[[1,0]] df.loc[[0,1]] df.loc[0, ['col2', 'col3']] for item in df.loc[:,'col3']: print(item) df.loc[0] = [3,6,9] df df['col2'] = [0,0] df df['col4'] = [10,10] df df.loc[3] = [1,2,3,4] df df = pd.read_csv('D:\RESEARCH\Dasarnya\MATH\Step_4_Statistics and Calculus Python\Source Code\The-Statistics-and-Calculus-with-Python-Workshop\Chapter02\Exercise2.02\dataset.csv',index_col='id') df df = df.rename(columns = {'x':'col_x','y':'col_y','z':'col_z'}) df df = df.fillna(0) df df = df.astype(int) df df = df.drop([1,3,4], axis=0) df zero_df = pd.DataFrame(np.zeros((2,3)),columns=['col_x','col_y','col_z']) zero_df df = pd.concat([df, zero_df], axis=0) df df = df.sort_values('col_x', axis=0) df df = df.astype(int) df.to_csv('D:\RESEARCH\Dasarnya\MATH\Step_4_Statistics and Calculus Python\My Source Code\Work_2\output.csv', index=False) df = pd.DataFrame({'x':[1,2,-1],'y':[-3,6,5],'z':[1,3,2]}) df df['x_squared'] = df['x'].apply(lambda x: x**2) df def parity_str(x): if x%2 == 0: return 'even' return 'odd' df['x_parity'] = df['x'].apply(parity_str) df df['x_parity'] pd.get_dummies(df['x_parity']) print(df['x_parity'].value_counts()) import pandas as pd student_df = pd.DataFrame({'name':['Alice','Bob','Carol','Dan','Eli','Fran'],'gender':['female','male','female','male','male','female'],'class':['FY','SO','SR','SO','JR','SR'],'gpa':[90,93,97,89,95,92],'num_classes':[4,3,4,4,3,2]}) student_df student_df['female_flag'] = student_df['gender'].apply(lambda x:x == 'female') student_df student_df['female_flag'] = student_df['gender'] == 'female' student_df student_df = student_df.drop('gender',axis=1) student_df pd.get_dummies(student_df['class']) student_df = pd.concat([student_df.drop('class',axis=1),pd.get_dummies(student_df['class'])],axis=1) student_df gender_group = student_df.groupby('female_flag') gender_group gender_group['gpa'].mean() gender_group['num_classes'].sum() x = [1,2,3,1.5,2] y = [-1,5,2,3,0] import matplotlib.pyplot as plt plt.scatter(x,y) plt.show() sizes = [10, 40, 60, 80, 100] colors = ['r', 'b', 'y', 'g', 'k'] plt.scatter(x, y, s=sizes, c=colors) plt.show() import numpy as np x = np.linspace(0,10,1000) y = np.sin(x) plt.plot(x,y) plt.show() x y x = np.linspace(1,10,1000) linear_line = x sin_wave = np.sin(x) log_curve = np.log(x) curves = [linear_line, log_curve, sin_wave] colors = ['k','r','b'] styles = ['-','--',':'] for curve,color,style in zip(curves, colors, styles): plt.plot(x, curve, c=color, linestyle=style) plt.show() labels = ['Type 1', 'Type 2', 'Type 3'] counts = [2,3,5] plt.bar(labels,counts) plt.show() import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt samples = np.random.normal(0,1,size=1000) x = np.linspace(samples.min(),samples.max(),1000) y = stats.norm.pdf(x) x y plt.hist(samples,alpha=0.2, bins=30, density=True) plt.plot(x,y) plt.show() samples = np.random.beta(2,5,size=1000) x = np.linspace(samples.min(),samples.max(),1000) y = stats.beta.pdf(x,2,5) plt.hist(samples, bins=20, alpha=0.2, density=True) plt.plot(x,y) plt.show() samples = np.random.gamma(1, size=1000) x = np.linspace(samples.min(), samples.max(), 1000) y = stats.gamma.pdf(x, 1) plt.hist(samples, alpha=0.2, bins=20, density=True) plt.plot(x, y) plt.show() x = np.random.normal(0,1,1000) y = np.random.normal(5,2,1000) df = pd.DataFrame({'Column 1':x,'Column 2':y}) df import seaborn as sns sns.jointplot(x='Column 1', y='Column 2', data=df) plt.show() student_df = pd.DataFrame({ 'name': ['Alice', 'Bob', 'Carol', 'Dan', 'Eli', 'Fran', \ 'George', 'Howl', 'Ivan', 'Jack', 'Kate'],\ 'gender': ['female', 'male', 'female', 'male', \ 'male', 'female', 'male', 'male', \ 'male', 'male', 'female'],\ 'class': ['JR', 'SO', 'SO', 'SO', 'JR', 'SR', \ 'FY', 'SO', 'SR', 'JR', 'FY'],\ 'gpa': [90, 93, 97, 89, 95, 92, 90, 87, 95, 100, 95],\ 'num_classes': [4, 3, 4, 4, 3, 2, 2, 3, 3, 4, 2]}) student_df sns.catplot(x='class',y='gpa',hue='gender',kind='bar',data=student_df) plt.show() student_df['gpa'].plot.hist() plt.show() student_df['class'].value_counts().plot.pie() plt.show() df = pd.read_csv('D:\RESEARCH\Dasarnya\MATH\Step_4_Statistics and Calculus Python\My Source Code\Work_2\CommViolPredUnnormalizedData.txt') df for column in df.columns: print(column) print(len(df.columns)) df = df.replace('?',np.nan) df.isnull().sum() print(df.isnull().sum()['NumStreet']) print(df.isnull().sum()['PolicPerPop']) state_count = df['state'].value_counts() f, ax = plt.subplots(figsize=(15,10)) state_count.plot.bar() plt.show() f, ax = plt.subplots(figsize=(15,10)) state_count.plot.pie() plt.show() f, ax = plt.subplots(figsize=(15,10)) df['population'].hist(bins=200) plt.show() f, ax = plt.subplots(figsize=(15, 10)) df['householdsize'].hist(bins=200) plt.show() ```
github_jupyter
## ELIZA Copyright (C) 2019 Szymon Jessa ### Kod Elizy Importujemy biblioteki: ``` import doctest import re ``` Tworzymy zmienną globalną, która będzie zapisywała wypowiedzi podczas konwersacji. ``` memstack = [] ``` Funkcja odpowiadająca za przetworzenie wypowiedzi użytkownika i zaproponowanie odpowiedzi. ``` def get_keystack(user_input): # scan all words in the utterance for keywords one-by-one # and put tuples (keyword, rank) in the list # ***CHALLENGE*** modify the code to handle UPPERCASE LETTERS and punctuations keystack = [] for w in user_input.split(): if w in script: keystack.append((w, script[w].get("rank", 0))) # sort keywords by rank, descending keystack = sorted(keystack, key=lambda i: i[1]) # extract keywords from tuples keystack = [w for w, r in keystack] return keystack ``` Funkcja "zapamiętująca" słowa kluczowe pojawiające się we wcześniejszej rozmowie: ``` def memorize_user_input(user_input, user_input_trans): # scan all words for memory keywords memory_keywords = [] for w in user_input.split(): if w in script_memory: memory_keywords.append(w) memory_keywords = list(set(memory_keywords)) for k in memory_keywords: # use first reassembly rule # ***CHALLENGE*** modify the code to use more than one reassembly rule! memresp = re.sub(script_memory[k]["decomposition"], script_memory[k]["reassembly"][0], user_input_trans) memstack.append(memresp) ``` Funkcja wybierająca odpowiedź Elizy: ``` def process(user_input): """ (str) -> str Return Eliza's answer for given input Return responses associated with the matched keyword Return responses for "none" keyword or use memory if other keywords not found """ # extract keywords keystack = get_keystack(user_input) # transform user input using substitutions user_input_trans = " ".join(map(lambda w: substitutions.get(w, w), user_input.split())) # find a response based on the keywords resp = "" if keystack: # get response associated with the highest ranked keyword kw = keystack[-1] # get top ranked keyword rule = script[kw] # get script rule (decomposition & reassembly) for this keyword # check if the decomposition rule cane be applied for the utterance if re.search(rule["decomposition"], user_input_trans): # if yes, generate response using the first reassembly rule trans = rule["reassembly"].pop(0) # pop first reassembly from list rule["reassembly"].append(trans) # append the reassembly rule to the end of the list resp = re.sub(rule["decomposition"], trans, user_input_trans) # generate response # if none keywords or rules were found, try to use memory if resp == "": if memstack: resp = memstack.pop(0) # if there are no responses in memory, # use default responses associated with special keyword "none" else: # ***CHALLENGE*** modify the code to support more than one default answer (reassembly rule) # ***CHALLENGE*** ensure the code won't break if "none" key doesn't exist resp = script["none"]["reassembly"][0] # if possible, generate new responses to be stored in memory for later memorize_user_input(user_input, user_input_trans) return str(resp) ``` Funkcja do uruchomiania czatu: ``` def chat(): """ () -> None Allows interaction with Eliza in a loop: - read input from console - get Eliza's response using process function - write Eliza's response to console - exit if input string length is 0 """ print("<hit enter with no input to exit>") print("Eliza: How do you do. Please tell me your problem") while True: utt = input("Student: ") if not utt: break resp = process(utt) print("Eliza: %s" % resp) ``` ### Testy Tutaj puszczamy testy dla konkretnych zapytań dla Elizy. Doc testy puszczają dany kod oznaczony ```>>>``` i oczekują odpowiedzi zgodnej z tym, co jest poniżej. Oto przykład: ``` def task0_true(): """ >>> 2+2 4 """ def task0_false(): """ >>> 2+2 5 """ doctest.run_docstring_examples(task0_true, globals(), name="task0", verbose=True) doctest.run_docstring_examples(task0_false, globals(), name="task0", verbose=True) def task1(): """ >>> process("I have no problems") "Are you saying 'no' just to be negative?" >>> process("no") 'You are being a bit negative' >>> process("no") 'Why not' """ pass def task2(): """ >>> process("hmm") 'I am not sure I understand you fully' """ pass def task3(): """ >>> process("no, I am not a negative person") 'Is it because you are not a negative person that you came to me?' >>> process("no") 'You are being a bit negative' """ pass def task4(): """ >>> process("you are repeating yourself") 'What makes you think I am repeating myself?' >>> process("you are kidding me") 'Does it please you to believe I am kidding you?' """ pass def task5(): """ >>> process("my wife said I am optimistic") 'Tell me more about your family' >>> process("now I am sad") 'Is it because you are sad that you came to me?' >>> process("maybe") 'But your wife said you are optimistic?' """ pass ``` ### Skrypt rozmowy Tutaj znajduje się skrypt rozmowy. W słowniku 'decomposition' to szukane wyrażenia regularne, natomiast 'reassembly' to odpowiedź Elizy. ``` script = { "no": {"decomposition": r"^.*$", "reassembly": [ "Are you saying 'no' just to be negative?"]}} ``` Zastępstwa - tutaj możemy zmieniać pewne wyrażenia, żeby płynniej prowadzić rozmowę. ``` substitutions = {} ``` Script memory - tutaj tworzymy skrypt rozmowy (j.w.) ale wykorzystując zapamiętane wcześniej słowa. ``` script_memory = {} ``` W przypadku niektórych testów poniżej mamy do czynienia z losowością, więc czasem trzeba powtórzyć wykonanie go, aby wynik był właściwy. ``` #doctest.run_docstring_examples(task1, globals(), name="task1", verbose=True) #doctest.run_docstring_examples(task2, globals(), name="task2", verbose=True) #doctest.run_docstring_examples(task3, globals(), name="task3", verbose=True) #doctest.run_docstring_examples(task4, globals(), name="task4", verbose=True) #doctest.run_docstring_examples(task4, globals(), name="task5", verbose=True) ``` Tutaj można odpalić rozmowę z Elizą. ``` chat() ```
github_jupyter
<a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width="400" align="center"></a> <h1 align=center><font size="5"> SVM (Support Vector Machines)</font></h1> In this notebook, you will use SVM (Support Vector Machines) to build and train a model using human cell records, and classify cells to whether the samples are benign or malignant. SVM works by mapping data to a high-dimensional feature space so that data points can be categorized, even when the data are not otherwise linearly separable. A separator between the categories is found, then the data is transformed in such a way that the separator could be drawn as a hyperplane. Following this, characteristics of new data can be used to predict the group to which a new record should belong. <h1>Table of contents</h1> <div class="alert alert-block alert-info" style="margin-top: 20px"> <ol> <li><a href="#load_dataset">Load the Cancer data</a></li> <li><a href="#modeling">Modeling</a></li> <li><a href="#evaluation">Evaluation</a></li> <li><a href="#practice">Practice</a></li> </ol> </div> <br> <hr> ``` import pandas as pd import pylab as pl import numpy as np import scipy.optimize as opt from sklearn import preprocessing from sklearn.model_selection import train_test_split %matplotlib inline import matplotlib.pyplot as plt ``` <h2 id="load_dataset">Load the Cancer data</h2> The example is based on a dataset that is publicly available from the UCI Machine Learning Repository (Asuncion and Newman, 2007)[http://mlearn.ics.uci.edu/MLRepository.html]. The dataset consists of several hundred human cell sample records, each of which contains the values of a set of cell characteristics. The fields in each record are: |Field name|Description| |--- |--- | |ID|Clump thickness| |Clump|Clump thickness| |UnifSize|Uniformity of cell size| |UnifShape|Uniformity of cell shape| |MargAdh|Marginal adhesion| |SingEpiSize|Single epithelial cell size| |BareNuc|Bare nuclei| |BlandChrom|Bland chromatin| |NormNucl|Normal nucleoli| |Mit|Mitoses| |Class|Benign or malignant| <br> <br> For the purposes of this example, we're using a dataset that has a relatively small number of predictors in each record. To download the data, we will use `!wget` to download it from IBM Object Storage. __Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC) ``` #Click here and press Shift+Enter !wget -O cell_samples.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/cell_samples.csv ``` ### Load Data From CSV File ``` cell_df = pd.read_csv("cell_samples.csv") cell_df.head() ``` The ID field contains the patient identifiers. The characteristics of the cell samples from each patient are contained in fields Clump to Mit. The values are graded from 1 to 10, with 1 being the closest to benign. The Class field contains the diagnosis, as confirmed by separate medical procedures, as to whether the samples are benign (value = 2) or malignant (value = 4). Lets look at the distribution of the classes based on Clump thickness and Uniformity of cell size: ``` ax = cell_df[cell_df['Class'] == 4][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='DarkBlue', label='malignant'); cell_df[cell_df['Class'] == 2][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='Yellow', label='benign', ax=ax); plt.show() ``` ## Data pre-processing and selection Lets first look at columns data types: ``` cell_df.dtypes ``` It looks like the __BareNuc__ column includes some values that are not numerical. We can drop those rows: ``` cell_df = cell_df[pd.to_numeric(cell_df['BareNuc'], errors='coerce').notnull()] cell_df['BareNuc'] = cell_df['BareNuc'].astype('int') cell_df.dtypes feature_df = cell_df[['Clump', 'UnifSize', 'UnifShape', 'MargAdh', 'SingEpiSize', 'BareNuc', 'BlandChrom', 'NormNucl', 'Mit']] X = np.asarray(feature_df) X[0:5] ``` We want the model to predict the value of Class (that is, benign (=2) or malignant (=4)). As this field can have one of only two possible values, we need to change its measurement level to reflect this. ``` cell_df['Class'] = cell_df['Class'].astype('int') y = np.asarray(cell_df['Class']) y [0:5] ``` ## Train/Test dataset Okay, we split our dataset into train and test set: ``` X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4) print ('Train set:', X_train.shape, y_train.shape) print ('Test set:', X_test.shape, y_test.shape) ``` <h2 id="modeling">Modeling (SVM with Scikit-learn)</h2> The SVM algorithm offers a choice of kernel functions for performing its processing. Basically, mapping data into a higher dimensional space is called kernelling. The mathematical function used for the transformation is known as the kernel function, and can be of different types, such as: 1.Linear 2.Polynomial 3.Radial basis function (RBF) 4.Sigmoid Each of these functions has its characteristics, its pros and cons, and its equation, but as there's no easy way of knowing which function performs best with any given dataset, we usually choose different functions in turn and compare the results. Let's just use the default, RBF (Radial Basis Function) for this lab. ``` from sklearn import svm clf = svm.SVC(kernel='rbf') clf.fit(X_train, y_train) ``` After being fitted, the model can then be used to predict new values: ``` yhat = clf.predict(X_test) yhat [0:5] ``` <h2 id="evaluation">Evaluation</h2> ``` from sklearn.metrics import classification_report, confusion_matrix import itertools def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cnf_matrix = confusion_matrix(y_test, yhat, labels=[2,4]) np.set_printoptions(precision=2) print (classification_report(y_test, yhat)) # Plot non-normalized confusion matrix plt.figure() plot_confusion_matrix(cnf_matrix, classes=['Benign(2)','Malignant(4)'],normalize= False, title='Confusion matrix') ``` You can also easily use the __f1_score__ from sklearn library: ``` from sklearn.metrics import f1_score f1_score(y_test, yhat, average='weighted') ``` Lets try jaccard index for accuracy: ``` from sklearn.metrics import jaccard_similarity_score jaccard_similarity_score(y_test, yhat) ``` <h2 id="practice">Practice</h2> Can you rebuild the model, but this time with a __linear__ kernel? You can use __kernel='linear'__ option, when you define the svm. How the accuracy changes with the new kernel function? ``` # write your code here ``` Double-click __here__ for the solution. <!-- Your answer is below: clf2 = svm.SVC(kernel='linear') clf2.fit(X_train, y_train) yhat2 = clf2.predict(X_test) print("Avg F1-score: %.4f" % f1_score(y_test, yhat2, average='weighted')) print("Jaccard score: %.4f" % jaccard_similarity_score(y_test, yhat2)) --> <h2>Want to learn more?</h2> IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a> Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a> <h3>Thanks for completing this lesson!</h3> <h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a></h4> <p><a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p> <hr> <p>Copyright &copy; 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
github_jupyter
``` %reset ``` # Simulate particles translating through OAM beam Liz Strong 4/17/2020 ``` import sys sys.path.append('../slvel') import pandas as pd import numpy as np import matplotlib.pyplot as plt from calc_intensity import calculate_e_field_intensity from scattering_particle import Particle import scattering_sim as scatsim import random ``` ### make pretty plots ``` %matplotlib notebook ``` ### calculate intensity ``` xval = 986 # grid x size [pixels] yval = 616 # grid y size [pixels] l = 4 # OAM azimuthal mode number w0 = 93.8458 # beam waist [pixels] intensity, intensity_shape = calculate_e_field_intensity(l=l, p=0, w0=w0, x=xval, y=yval, petaledbeam=True) plt.figure() plt.imshow(intensity) plt.colorbar() plt.title('Intensity field') ``` ### make particle to sample the intensity field ``` r = 20 # particle radius [pixels] p1 = Particle(intensity_shape, particle_radius=r, orbit_radius=75, orbit_offset_x=75, orbit_offset_y=0, v=5000, sample_rate=100000, direction=-1) ``` ### presum intensities particle will experience This is slow, so calculate the file once and then save it to load the file later. ``` #presummed_intensity = p1.calculate_sampled_intensities_throughout(Intensity_normalized) #np.save('psi_11_24_19_w0_93.8458.npy', presummed_intensity) psi = np.load('psi_11_24_19_w0_93.8458.npy') ``` ### calculate intensities on particle's orbit ### set parameters for simulation Total number of simulations: num_theta * num_radii * num_d * num_angvels ``` offset_x = [355, 425] # x coordinate of D [pixels] offset_y = [0,0] # y coordinate of D [pixels] orbit_radius = {} # orbit radius [pixels] D = [np.sqrt(x**2+y**2) for x,y in zip(offset_x, offset_y)] # D [pixels] radius = [490, 325] # orbit radius [pixels] for dist, rad in zip(D, radius): orbit_radius[dist] = [rad] orbit_radius = [490, 325] angle = np.arctan2(offset_y, offset_x) # phi [rad] angular_velocities = [250, 250] # Omega [rad/s] ``` ### Make time series. ``` data = scatsim.simulate_time_series(p1, psi, intensity_shape, offset_x, offset_y, D, angle, angular_velocities, orbit_radius, xval, yval) time_keep = data[0] intensities_keep = data[1] positions_keep = data[2] angular_velocities_keep = data[3] R_keep = data[4] d_keep = data[5] offset_x_keep = data[6] offset_y_keep = data[7] theta_keep = data[8] ``` ### plot results ``` plt.figure() plt.imshow(intensity) for selxn in range(2): plt.plot(positions_keep[selxn].T[0],positions_keep[selxn].T[1],'g') plt.plot(positions_keep[selxn].T[0][0],positions_keep[selxn].T[1][0],'ms') plt.colorbar() plt.figure() for selxn in range(2): plt.plot(intensities_keep[selxn],'.',label='R='+str(orbit_radius[selxn])) plt.legend() ``` ### add noise & concatenate into a long time series ``` timeseries_time, timeseries_intensity, intensities_extended = scatsim.concat_timeseries(intensities_keep, time_keep, ext_length=420) plt.figure() plt.plot(timeseries_time, timeseries_intensity) plt.xlabel('Time, t [sec]') plt.ylabel('Signal, y [summed intensity]') ``` ### Save angular velocities with times to correlate them later for ML training purposes ``` # save angular velocities corresponding to each seg vs = scatsim.save_series_info(angular_velocities_keep, intensities_keep, intensities_extended) # save orbit radii corresponding to each seg Rs = scatsim.save_series_info(R_keep, intensities_keep, intensities_extended) # save x offset radii corresponding to each seg delta_xs = scatsim.save_series_info(offset_x_keep, intensities_keep, intensities_extended) # save y offset radii corresponding to each seg delta_ys = scatsim.save_series_info(offset_y_keep, intensities_keep, intensities_extended) ``` ### save data ``` data_to_save = np.array([timeseries_time,timeseries_intensity,angular_velocities_keep,vs,Rs,delta_xs,delta_ys]) np.save('example_simulated_signal.npy',data_to_save) ```
github_jupyter
## Dependencies ``` from openvaccine_scripts import * import warnings, json from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold import tensorflow.keras.layers as L import tensorflow.keras.backend as K from tensorflow.keras import optimizers, losses, Model from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau SEED = 0 seed_everything(SEED) warnings.filterwarnings('ignore') ``` # Model parameters ``` config = { "BATCH_SIZE": 32, "EPOCHS": 70, "LEARNING_RATE": 1e-3, "ES_PATIENCE": 10, "N_FOLDS": 5, "N_USED_FOLDS": 5, "PB_SEQ_LEN": 107, "PV_SEQ_LEN": 130, } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) config ``` # Load data ``` database_base_path = '/kaggle/input/stanford-covid-vaccine/' train = pd.read_json(database_base_path + 'train.json', lines=True) test = pd.read_json(database_base_path + 'test.json', lines=True) print('Train samples: %d' % len(train)) display(train.head()) print(f'Test samples: {len(test)}') display(test.head()) ``` ## Data augmentation ``` def aug_data(df): target_df = df.copy() new_df = aug_df[aug_df['id'].isin(target_df['id'])] del target_df['structure'] del target_df['predicted_loop_type'] new_df = new_df.merge(target_df, on=['id','sequence'], how='left') df['cnt'] = df['id'].map(new_df[['id','cnt']].set_index('id').to_dict()['cnt']) df['log_gamma'] = 100 df['score'] = 1.0 new_df['augmented'] = True df['augmented'] = False df = df.append(new_df[df.columns]) return df # Augmented data aug_df = pd.read_csv('/kaggle/input/augmented-data-for-stanford-covid-vaccine/48k_augment.csv') print(f'Augmented samples: {len(aug_df)}') display(aug_df.head()) print(f"Samples in train before augmentation: {len(train)}") print(f"Samples in test before augmentation: {len(test)}") train = aug_data(train) train.drop('index', axis=1, inplace=True) train = train.reset_index() test = aug_data(test) test.drop('index', axis=1, inplace=True) test = test.reset_index() print(f"Samples in train after augmentation: {len(train)}") print(f"Samples in test after augmentation: {len(test)}") print(f"Unique id in train: {len(train['id'].unique())}") print(f"Unique sequences in train: {len(train['sequence'].unique())}") print(f"Unique structure in train: {len(train['structure'].unique())}") print(f"Unique predicted_loop_type in train: {len(train['predicted_loop_type'].unique())}") print(f"Unique id in test: {len(test['id'].unique())}") print(f"Unique sequences in test: {len(test['sequence'].unique())}") print(f"Unique structure in test: {len(test['structure'].unique())}") print(f"Unique predicted_loop_type in test: {len(test['predicted_loop_type'].unique())}") ``` ## Auxiliary functions ``` def get_dataset(x, y=None, sample_weights=None, labeled=True, shuffled=True, repeated=False, batch_size=32, buffer_size=-1, seed=0): input_map = {'inputs_seq': x['sequence'], 'inputs_struct': x['structure'], 'inputs_loop': x['predicted_loop_type'], 'inputs_bpps_max': x['bpps_max'], 'inputs_bpps_sum': x['bpps_sum'], 'inputs_bpps_scaled': x['bpps_scaled']} if labeled: output_map = {'output_react': y['reactivity'], 'output_mg_ph': y['deg_Mg_pH10'], 'output_ph': y['deg_pH10'], 'output_mg_c': y['deg_Mg_50C'], 'output_c': y['deg_50C']} if sample_weights is not None: dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map, sample_weights)) else: dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map)) else: dataset = tf.data.Dataset.from_tensor_slices((input_map)) if repeated: dataset = dataset.repeat() if shuffled: dataset = dataset.shuffle(2048, seed=seed) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(buffer_size) return dataset def get_dataset_sampling(x, y=None, sample_weights=None, labeled=True, shuffled=True, repeated=False, batch_size=32, buffer_size=-1, seed=0): input_map = {'inputs_seq': x['sequence'], 'inputs_struct': x['structure'], 'inputs_loop': x['predicted_loop_type'], 'inputs_bpps_max': x['bpps_max'], 'inputs_bpps_sum': x['bpps_sum'], 'inputs_bpps_scaled': x['bpps_scaled']} if labeled: output_map = {'output_react': y['reactivity'], 'output_mg_ph': y['deg_Mg_pH10'], 'output_ph': y['deg_pH10'], 'output_mg_c': y['deg_Mg_50C'], 'output_c': y['deg_50C']} if sample_weights is not None: dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map, sample_weights)) else: dataset = tf.data.Dataset.from_tensor_slices((input_map, output_map)) else: dataset = tf.data.Dataset.from_tensor_slices((input_map)) if repeated: dataset = dataset.repeat() if shuffled: dataset = dataset.shuffle(2048, seed=seed) return dataset ``` # Pre-process ``` # Add bpps as features train = add_bpps_features(train, database_base_path) test = add_bpps_features(test, database_base_path) feature_cols = ['sequence', 'structure', 'predicted_loop_type', 'bpps_max', 'bpps_sum', 'bpps_scaled'] pred_cols = ['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C', 'deg_50C'] encoder_list = [token2int_seq, token2int_struct, token2int_loop, None, None, None] public_test = test.query("seq_length == 107").copy() private_test = test.query("seq_length == 130").copy() x_test_public = get_features_dict(public_test, feature_cols, encoder_list, public_test.index) x_test_private = get_features_dict(private_test, feature_cols, encoder_list, private_test.index) # To use as stratified col train['signal_to_noise_int'] = train['signal_to_noise'].astype(int) ``` # Model ``` def model_fn(hidden_dim=384, dropout=.5, pred_len=68, n_outputs=5): inputs_seq = L.Input(shape=(None, 1), name='inputs_seq') inputs_struct = L.Input(shape=(None, 1), name='inputs_struct') inputs_loop = L.Input(shape=(None, 1), name='inputs_loop') inputs_bpps_max = L.Input(shape=(None, 1), name='inputs_bpps_max') inputs_bpps_sum = L.Input(shape=(None, 1), name='inputs_bpps_sum') inputs_bpps_scaled = L.Input(shape=(None, 1), name='inputs_bpps_scaled') def _one_hot(x, num_classes): return K.squeeze(K.one_hot(K.cast(x, 'uint8'), num_classes=num_classes), axis=2) ohe_seq = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_seq)}, input_shape=(None, 1))(inputs_seq) ohe_struct = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_struct)}, input_shape=(None, 1))(inputs_struct) ohe_loop = L.Lambda(_one_hot, arguments={'num_classes': len(token2int_loop)}, input_shape=(None, 1))(inputs_loop) ### Encoder block # Conv block conv_seq = L.Conv1D(filters=64, kernel_size=3, padding='same')(ohe_seq) conv_struct = L.Conv1D(filters=64, kernel_size=3, padding='same')(ohe_struct) conv_loop = L.Conv1D(filters=64, kernel_size=3, padding='same')(ohe_loop) conv_bpps_max = L.Conv1D(filters=64, kernel_size=3, padding='same')(inputs_bpps_max) conv_bpps_sum = L.Conv1D(filters=64, kernel_size=3, padding='same')(inputs_bpps_sum) conv_bpps_scaled = L.Conv1D(filters=64, kernel_size=3, padding='same')(inputs_bpps_scaled) x_concat = L.concatenate([conv_seq, conv_struct, conv_loop, conv_bpps_max, conv_bpps_sum, conv_bpps_scaled], axis=-1, name='conv_concatenate') # Recurrent block encoder, encoder_state_f, encoder_state_b = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, return_state=True, kernel_initializer='orthogonal'), name='Encoder_RNN')(x_concat) ### Decoder block decoder = L.Bidirectional(L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal'), name='Decoder')(encoder, initial_state=[encoder_state_f, encoder_state_b]) # Since we are only making predictions on the first part of each sequence, we have to truncate it decoder_truncated = decoder[:, :pred_len] output_react = L.Dense(1, name='output_react')(decoder_truncated) output_mg_ph = L.Dense(1, name='output_mg_ph')(decoder_truncated) output_ph = L.Dense(1, name='output_ph')(decoder_truncated) output_mg_c = L.Dense(1, name='output_mg_c')(decoder_truncated) output_c = L.Dense(1, name='output_c')(decoder_truncated) model = Model(inputs=[inputs_seq, inputs_struct, inputs_loop, inputs_bpps_max, inputs_bpps_sum, inputs_bpps_scaled], outputs=[output_react, output_mg_ph, output_ph, output_mg_c, output_c]) opt = optimizers.Adam(learning_rate=config['LEARNING_RATE']) model.compile(optimizer=opt, loss={'output_react': MCRMSE, 'output_mg_ph': MCRMSE, 'output_ph': MCRMSE, 'output_mg_c': MCRMSE, 'output_c': MCRMSE}, loss_weights={'output_react': 5., 'output_mg_ph': 5., 'output_ph': 1., 'output_mg_c': 5., 'output_c': 1.}) return model model = model_fn() model.summary() ``` # Training ``` AUTO = tf.data.experimental.AUTOTUNE skf = GroupKFold(n_splits=config['N_FOLDS']) history_list = [] oof = train[['id', 'SN_filter', 'signal_to_noise'] + pred_cols].copy() oof_preds = np.zeros((len(train), 68, len(pred_cols))) test_public_preds = np.zeros((len(public_test), config['PB_SEQ_LEN'], len(pred_cols))) test_private_preds = np.zeros((len(private_test), config['PV_SEQ_LEN'], len(pred_cols))) for fold,(train_idx, valid_idx) in enumerate(skf.split(train, train['signal_to_noise_int'], train['id'])): if fold >= config['N_USED_FOLDS']: break print(f'\nFOLD: {fold+1}') # Create clean and noisy datasets valid_clean_idxs = np.intersect1d(train[(train['SN_filter'] == 1) & (train['augmented'] == False)].index, valid_idx) ### Create datasets # x_train = get_features_dict(train, feature_cols, encoder_list, train_idx) # y_train = get_targets_dict(train, pred_cols, train_idx) # w_train = np.log(train.iloc[train_idx]['signal_to_noise'].values+1.2)+1 x_valid = get_features_dict(train, feature_cols, encoder_list, valid_clean_idxs) y_valid = get_targets_dict(train, pred_cols, valid_clean_idxs) w_valid = np.log(train.iloc[valid_clean_idxs]['signal_to_noise'].values+1.2)+1 # train_ds = get_dataset(x_train, y_train, w_train, labeled=True, shuffled=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) valid_ds = get_dataset(x_valid, y_valid, w_valid, labeled=True, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) oof_ds = get_dataset(get_features_dict(train, feature_cols, encoder_list, valid_idx), labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) test_public_ds = get_dataset(x_test_public, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) test_private_ds = get_dataset(x_test_private, labeled=False, shuffled=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) # Create clean and noisy datasets normal_idxs = np.intersect1d(train[train['augmented'] == False].index, train_idx) x_train_normal = get_features_dict(train, feature_cols, encoder_list, normal_idxs) y_train_normal = get_targets_dict(train, pred_cols, normal_idxs) w_train_normal = np.log(train.iloc[normal_idxs]['signal_to_noise'].values+1.2)+1 normal_ds = get_dataset_sampling(x_train_normal, y_train_normal, w_train_normal, labeled=True, shuffled=True, repeated=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) augmented_idxs = np.intersect1d(train[train['augmented'] == True].index, train_idx) x_train_augmented = get_features_dict(train, feature_cols, encoder_list, augmented_idxs) y_train_augmented = get_targets_dict(train, pred_cols, augmented_idxs) w_train_augmented = np.log(train.iloc[augmented_idxs]['signal_to_noise'].values+1.2)+1 augmented_ds = get_dataset_sampling(x_train_augmented, y_train_augmented, w_train_augmented, labeled=True, shuffled=True, repeated=True, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, seed=SEED) # Resampled TF Dataset resampled_ds = tf.data.experimental.sample_from_datasets([normal_ds, augmented_ds], weights=[.5, .5]) resampled_ds = resampled_ds.batch(config['BATCH_SIZE']).prefetch(AUTO) ### Model K.clear_session() model = model_fn() model_path = f'model_{fold}.h5' es = EarlyStopping(monitor='val_loss', mode='min', patience=config['ES_PATIENCE'], restore_best_weights=True, verbose=1) rlrp = ReduceLROnPlateau(monitor='val_loss', mode='min', factor=0.1, patience=5, verbose=1) ### Train history = model.fit(resampled_ds, validation_data=valid_ds, callbacks=[es, rlrp], epochs=config['EPOCHS'], batch_size=config['BATCH_SIZE'], steps_per_epoch=int(len(normal_idxs)//(config['BATCH_SIZE']* .5)), verbose=2).history history_list.append(history) # Save last model weights model.save_weights(model_path) ### Inference oof_ds_preds = np.array(model.predict(oof_ds)).reshape((len(pred_cols), len(valid_idx), 68)).transpose((1, 2, 0)) oof_preds[valid_idx] = oof_ds_preds # Short sequence (public test) model = model_fn(pred_len=config['PB_SEQ_LEN']) model.load_weights(model_path) test_public_ds_preds = np.array(model.predict(test_public_ds)).reshape((len(pred_cols), len(public_test), config['PB_SEQ_LEN'])).transpose((1, 2, 0)) test_public_preds += test_public_ds_preds * (1 / config['N_USED_FOLDS']) # Long sequence (private test) model = model_fn(pred_len=config['PV_SEQ_LEN']) model.load_weights(model_path) test_private_ds_preds = np.array(model.predict(test_private_ds)).reshape((len(pred_cols), len(private_test), config['PV_SEQ_LEN'])).transpose((1, 2, 0)) test_private_preds += test_private_ds_preds * (1 / config['N_USED_FOLDS']) ``` ## Model loss graph ``` for fold, history in enumerate(history_list): print(f'\nFOLD: {fold+1}') min_valid_idx = np.array(history['val_loss']).argmin() print(f"Train {np.array(history['loss'])[min_valid_idx]:.5f} Validation {np.array(history['val_loss'])[min_valid_idx]:.5f}") plot_metrics_agg(history_list) ``` # Post-processing ``` # Assign preds to OOF set for idx, col in enumerate(pred_cols): val = oof_preds[:, :, idx] oof = oof.assign(**{f'{col}_pred': list(val)}) oof.to_csv('oof.csv', index=False) oof_preds_dict = {} for col in pred_cols: oof_preds_dict[col] = oof_preds[:, :, idx] # Assign values to test set preds_ls = [] for df, preds in [(public_test, test_public_preds), (private_test, test_private_preds)]: for i, uid in enumerate(df.id): single_pred = preds[i] single_df = pd.DataFrame(single_pred, columns=pred_cols) single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])] preds_ls.append(single_df) preds_df = pd.concat(preds_ls) # Averaging over augmented predictions preds_df = pd.concat(preds_ls).groupby('id_seqpos').mean().reset_index() ``` # Model evaluation ``` y_true_dict = get_targets_dict(train, pred_cols, train.index) y_true = np.array([y_true_dict[col] for col in pred_cols]).transpose((1, 2, 0, 3)).reshape(oof_preds.shape) display(evaluate_model(train, y_true, oof_preds, pred_cols)) display(evaluate_model(train, y_true, oof_preds, pred_cols, use_cols=['reactivity', 'deg_Mg_pH10', 'deg_Mg_50C'])) ``` # Visualize test predictions ``` submission = pd.read_csv(database_base_path + 'sample_submission.csv') submission = submission[['id_seqpos']].merge(preds_df, on=['id_seqpos']) ``` # Test set predictions ``` display(submission.head(10)) display(submission.describe()) submission.to_csv('submission.csv', index=False) ```
github_jupyter
# 02. Custom Dataset 만들어보기 - Dataset Generation! - 폴더별로 사진들이 모여있다면, 그 dataset을 우리가 원하는 형태로 바꿔봅시다! ``` import numpy as np import os from scipy.misc import imread, imresize import matplotlib.pyplot as plt %matplotlib inline print ("Package loaded") cwd = os.getcwd() print ("Current folder is %s" % (cwd) ) # 학습할 폴더 경로 설정 paths = {"../../img_dataset/celebs/Arnold_Schwarzenegger" , "../../img_dataset/celebs/Junichiro_Koizumi" , "../../img_dataset/celebs/Vladimir_Putin" , "../../img_dataset/celebs/George_W_Bush"} categories = ["Arnold","Koizumi","Putin","Bush"] # The reshape size imgsize = [64, 64] # Grayscale use_gray = 1 # Save name data_name = "custom_data" print ("Your images should be at") for i, path in enumerate(paths): print (" [%d/%d] %s/%s" % (i, len(paths), cwd, path)) print ("Data will be saved to %s" % (cwd + '/data/' + data_name + '.npz')) ``` # RGB 2 GRAY ``` def rgb2gray(rgb): if len(rgb.shape) is 3: return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) else: # print ("Current Image if GRAY!") return rgb ``` # LOAD Image ``` nclass = len(paths) valid_exts = [".jpg",".gif",".png",".tga", ".jpeg"] imgcnt = 0 for i, relpath in zip(range(nclass), paths): path = cwd + "/" + relpath flist = os.listdir(path) for f in flist: if os.path.splitext(f)[1].lower() not in valid_exts: continue fullpath = os.path.join(path, f) currimg = imread(fullpath) # Convert to grayscale if use_gray: grayimg = rgb2gray(currimg) else: grayimg = currimg # Reshape graysmall = imresize(grayimg, [imgsize[0], imgsize[1]])/255. grayvec = np.reshape(graysmall, (1, -1)) # Save curr_label = np.eye(nclass, nclass)[i:i+1, :] # np.eye : 단위행렬을 구함 -> One Hot Vector를 만듬 if imgcnt is 0: totalimg = grayvec totallabel = curr_label else: totalimg = np.concatenate((totalimg, grayvec), axis=0) totallabel = np.concatenate((totallabel, curr_label), axis=0) imgcnt = imgcnt + 1 print ("Total %d images loaded." % (imgcnt)) def print_shape(string, x): print ("Shape of '%s' is %s" % (string, x.shape,)) randidx = np.random.randint(imgcnt, size=imgcnt) trainidx = randidx[0:int(3*imgcnt/5)] testidx = randidx[int(3*imgcnt/5):imgcnt] trainimg = totalimg[trainidx, :] trainlabel = totallabel[trainidx, :] testimg = totalimg[testidx, :] testlabel = totallabel[testidx, :] print_shape("trainimg", trainimg) print_shape("trainlabel", trainlabel) print_shape("testimg", testimg) print_shape("testlabel", testlabel) ``` # Save ``` savepath = cwd + "/data/" + data_name + ".npz" np.savez(savepath, trainimg=trainimg, trainlabel=trainlabel , testimg=testimg, testlabel=testlabel, imgsize=imgsize, use_gray=use_gray) print ("Saved to %s" % (savepath)) ``` # Load to Check ``` # Load them! cwd = os.getcwd() loadpath = cwd + "/data/" + data_name + ".npz" l = np.load(loadpath) # See what's in here l.files # Parse data trainimg_loaded = l['trainimg'] trainlabel_loaded = l['trainlabel'] testimg_loaded = l['testimg'] testlabel_loaded = l['testlabel'] print ("%d train images loaded" % (trainimg_loaded.shape[0])) print ("%d test images loaded" % (testimg_loaded.shape[0])) print ("Loaded from to %s" % (savepath)) ``` # Plot randomly train images ``` # Load them! cwd = os.getcwd() loadpath = cwd + "/data/" + data_name + ".npz" l = np.load(loadpath) # See what's in here l.files # Parse data trainimg_loaded = l['trainimg'] trainlabel_loaded = l['trainlabel'] testimg_loaded = l['testimg'] testlabel_loaded = l['testlabel'] print ("%d train images loaded" % (trainimg_loaded.shape[0])) print ("%d test images loaded" % (testimg_loaded.shape[0])) print ("Loaded from to %s" % (savepath)) ``` # Plot randomly test images ``` # Do batch stuff using loaded data ntest_loaded = testimg_loaded.shape[0] batch_size = 3; randidx = np.random.randint(ntest_loaded, size=batch_size) for i in randidx: currimg = np.reshape(testimg_loaded[i, :], (imgsize[0], -1)) currlabel_onehot = testlabel_loaded[i, :] currlabel = np.argmax(currlabel_onehot) if use_gray: currimg = np.reshape(testimg[i, :], (imgsize[0], -1)) plt.matshow(currimg, cmap=plt.get_cmap('gray')) plt.colorbar() else: currimg = np.reshape(testimg[i, :], (imgsize[0], imgsize[1], 3)) plt.imshow(currimg) title_string = "[%d] %d-class" % (i, currlabel) plt.title(title_string) plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/tiffanysn/general_learning/blob/dev/Quantium/Quantium_task_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` from google.colab import drive drive.mount('/content/drive') ``` ## Load required libraries and datasets ``` ! cp drive/My\ Drive/QVI_data.csv . import pandas as pd import plotly.express as px import numpy as np df=pd.read_csv('QVI_data.csv') df.shape df.info() df.describe(include= 'all') df.info() ``` # Trial store 77 ## Select control store #### Add Month column ``` import datetime df['year'] = pd.DatetimeIndex(df['DATE']).year df['month']=pd.DatetimeIndex(df['DATE']).month df['year_month']=pd.to_datetime(df['DATE']).dt.floor('d') - pd.offsets.MonthBegin(1) df ``` #### Monthly calculation for each store ``` totSales= df.groupby(['STORE_NBR','year_month'])['TOT_SALES'].sum().reset_index() totSales measureOverTime2 = pd.DataFrame(data=totSales) nTxn= df.groupby(['STORE_NBR','year_month'])['TXN_ID'].count().reset_index(drop=True) nTxn sorted(df['year_month'].unique()) measureOverTime2['nCustomers'] = df.groupby(['STORE_NBR','year_month','LYLTY_CARD_NBR'])['DATE'].count().groupby(['STORE_NBR','year_month']).count().reset_index(drop=True) measureOverTime2.head() measureOverTime2['nTxnPerCust'] = nTxn/measureOverTime2['nCustomers'] measureOverTime2.head() totQty = df.groupby(['STORE_NBR','year_month'])['PROD_QTY'].sum().reset_index(drop=True) totQty measureOverTime2['nChipsPerTxn'] = totQty/nTxn measureOverTime2 measureOverTime2['avgPricePerUnit'] = totSales['TOT_SALES']/totQty measureOverTime2 ``` #### Filter pre-trial & stores with full obs ``` measureOverTime2.set_index('year_month', inplace=True) preTrialMeasures = measureOverTime2.loc['2018-06-01':'2019-01-01'].reset_index() preTrialMeasures ``` #### Owen's *Solution* ``` measureOverTime = df.groupby(['STORE_NBR','year_month','LYLTY_CARD_NBR']).\ agg( totSalesPerCust=('TOT_SALES', sum), nTxn=('TXN_ID', "count"), nChips=('PROD_QTY', sum) ).\ groupby(['STORE_NBR','year_month']).\ agg( totSales=("totSalesPerCust", sum), nCustomers=("nTxn", "count"), nTxnPerCust=("nTxn", lambda x: x.sum()/x.count()), totChips=("nChips", sum), totTxn=("nTxn", sum)).\ reset_index() measureOverTime['nChipsPerTxn'] = measureOverTime['totChips']/measureOverTime['totTxn'] measureOverTime['avgPricePerUnit'] = measureOverTime['totSales']/measureOverTime['totChips'] measureOverTime.drop(['totChips', 'totTxn'], axis=1, inplace=True) ``` #### Calculate correlation ``` preTrialMeasures # Input inputTable = preTrialMeasures metricCol = 'TOT_SALES' storeComparison = 77 x = 1 corr = preTrialMeasures.\ loc[preTrialMeasures['STORE_NBR'].\ isin([x,storeComparison])].\ loc[:, ['year_month', 'STORE_NBR', metricCol]].\ pivot(index='year_month', columns='STORE_NBR', values=metricCol).\ corr().\ iloc[0, 1] preTrialMeasures.loc[preTrialMeasures['STORE_NBR'].isin([x,storeComparison])].loc[:, ['year_month', 'STORE_NBR', metricCol]].\ pivot(index='year_month', columns='STORE_NBR', values=metricCol).corr() df = pd.DataFrame(columns=['Store1', 'Store2', 'corr_measure']) df.append({'Store1':x, 'Store2':storeComparison, 'corr_measure':corr}, ignore_index=True) def calculateCorrelation(inputTable, metricCol, storeComparison): df = pd.DataFrame(columns=['Store1', 'Store2', 'corr_measure']) for x in inputTable.STORE_NBR.unique(): if x in [77, 86, 88]: pass else: corr = inputTable.\ loc[inputTable['STORE_NBR'].\ isin([x,storeComparison])].\ loc[:, ['year_month', 'STORE_NBR', metricCol]].\ pivot(index='year_month', columns='STORE_NBR', values=metricCol).\ corr().\ iloc[0, 1] df = df.append({'Store1':storeComparison, 'Store2':x, 'corr_measure':corr}, ignore_index=True) return(df) calcCorrTable = calculateCorrelation(inputTable=preTrialMeasures, metricCol='nCustomers', storeComparison=77) calcCorrTable ``` #### Calculate magnitude distance ``` inputTable = preTrialMeasures metricCol = 'TOT_SALES' storeComparison = '77' x='2' mag = preTrialMeasures.\ loc[preTrialMeasures['STORE_NBR'].isin([x, storeComparison])].\ loc[:, ['year_month', 'STORE_NBR', metricCol]].\ pivot(index='year_month', columns='STORE_NBR', values=metricCol).\ reset_index().rename_axis(None, axis=1) mag mag.columns = mag.columns.map(str) mag mag['measures'] = mag.apply(lambda row: row[x]-row[storeComparison], axis=1).abs() mag mag['Store1'] = x mag['Store2'] = storeComparison df_temp = mag.loc[:, ['Store1', 'Store2', 'year_month','measures']] df_temp df = pd.DataFrame(columns=['Store1', 'Store2', 'year_month','measures']) df inputTable = preTrialMeasures metricCol = 'TOT_SALES' storeComparison = '77' df = pd.DataFrame(columns=['Store1', 'Store2', 'year_month','measures']) for x in inputTable.STORE_NBR.unique(): if x in [77, 86, 88]: pass else: mag = preTrialMeasures.\ loc[preTrialMeasures['STORE_NBR'].\ isin([x, storeComparison])].\ loc[:, ['year_month', 'STORE_NBR', metricCol]].\ pivot(index='year_month', columns='STORE_NBR', values=metricCol).\ reset_index().rename_axis(None, axis=1) mag.columns = ['year_month', 'Store1', 'Store2'] mag['measures'] = mag.apply(lambda row: row['Store1']-row['Store2'], axis=1).abs() mag['Store1'] = x mag['Store2'] = storeComparison df_temp = mag.loc[:, ['Store1', 'Store2', 'year_month','measures']] df = pd.concat([df, df_temp]) df def calculateMagnitudeDistance(inputTable, metricCol, storeComparison): df = pd.DataFrame(columns=['Store1', 'Store2', 'year_month','measures']) for x in inputTable.STORE_NBR.unique(): if x in [77, 86, 88]: pass else: mag = preTrialMeasures.\ loc[preTrialMeasures['STORE_NBR'].\ isin([x, storeComparison])].\ loc[:, ['year_month', 'STORE_NBR', metricCol]].\ pivot(index='year_month', columns='STORE_NBR', values=metricCol).\ reset_index().rename_axis(None, axis=1) mag.columns = ['year_month', 'Store1', 'Store2'] mag['measures'] = mag.apply(lambda row: row['Store1']-row['Store2'], axis=1).abs() mag['Store1'] = storeComparison mag['Store2'] = x df_temp = mag.loc[:, ['Store1', 'Store2', 'year_month','measures']] df = pd.concat([df, df_temp]) return df def finalDistTable(inputTable, metricCol, storeComparison): calcDistTable = calculateMagnitudeDistance(inputTable, metricCol, storeComparison) minMaxDist = calcDistTable.groupby(['Store1','year_month'])['measures'].agg(['max','min']).reset_index() distTable = calcDistTable.merge(minMaxDist, on=['year_month', 'Store1']) distTable['magnitudeMeasure']= distTable.apply(lambda row: 1- (row['measures']-row['min'])/(row['max']-row['min']),axis=1) finalDistTable = distTable.groupby(['Store1','Store2'])['magnitudeMeasure'].mean().reset_index() finalDistTable.columns = ['Store1','Store2','mag_measure'] return finalDistTable calcDistTable = calculateMagnitudeDistance(inputTable=preTrialMeasures, metricCol='nCustomers', storeComparison='77') calcDistTable ``` #### Standardise the magnitude distance ``` #calcDistTable.groupby(['Store1','year_month'])['measures'].apply(lambda g: g.max() - g.min()).reset_index() minMaxDist = calcDistTable.groupby(['Store1','year_month'])['measures'].agg(['max','min']).reset_index() minMaxDist calcDistTable.merge(minMaxDist, on=['year_month', 'Store1']) distTable = calcDistTable.merge(minMaxDist, on=['year_month', 'Store1']) distTable distTable['magnitudeMeasure']= distTable.apply(lambda row: 1- (row['measures']-row['min'])/(row['max']-row['min']),axis=1) distTable ``` #### Merge nTotSals & nCustomers ``` corr_nSales = calculateCorrelation(inputTable=preTrialMeasures, metricCol='TOT_SALES',storeComparison='77') corr_nSales corr_nCustomers = calculateCorrelation(inputTable=preTrialMeasures, metricCol='nCustomers',storeComparison='77') corr_nCustomers magnitude_nSales = finalDistTable(inputTable=preTrialMeasures, metricCol='TOT_SALES',storeComparison='77') magnitude_nSales magnitude_nCustomers = finalDistTable(inputTable=preTrialMeasures, metricCol='nCustomers',storeComparison='77') magnitude_nCustomers ``` #### Get control store ``` score_nSales = corr_nSales.merge(magnitude_nSales, on=['Store1','Store2']) score_nSales['scoreNSales'] = score_nSales.apply(lambda row: row['corr_measure']*0.5 + row['mag_measure']*0.5, axis=1) score_nSales = score_nSales.loc[:,['Store1','Store2', 'scoreNSales']] score_nSales score_nCustomers = corr_nCustomers.merge(magnitude_nCustomers, on=['Store1','Store2']) score_nCustomers['scoreNCust'] = score_nCustomers.apply(lambda row: row['corr_measure']*0.5 + row['mag_measure']*0.5, axis=1) score_nCustomers = score_nCustomers.loc[:,['Store1','Store2','scoreNCust']] score_nCustomers score_Control = score_nSales.merge(score_nCustomers, on=['Store1','Store2']) score_Control score_Control['finalControlScore'] = score_Control.apply(lambda row: row['scoreNSales']*0.5 + row['scoreNCust']*0.5, axis=1) score_Control final_control_store = score_Control['finalControlScore'].max() score_Control[score_Control['finalControlScore']==final_control_store] ``` #### Visualization the control store ``` measureOverTime['Store_type'] = measureOverTime.apply(lambda row: 'Trail' if row['STORE_NBR']==77 else ('Control' if row['STORE_NBR']==233 else 'Other stores'), axis=1) measureOverTime measureOverTime['Store_type'].unique() measureOverTimeSales = measureOverTime.groupby(['year_month','Store_type'])['totSales'].mean().reset_index() measureOverTimeSales measureOverTimeSales.set_index('year_month',inplace=True) pastSales = measureOverTimeSales.loc['2018-06-01':'2019-01-01'].reset_index() pastSales px.line(data_frame=pastSales, x='year_month', y='totSales', color='Store_type', title='Total sales by month',labels={'year_month':'Month of operation','totSales':'Total sales'}) measureOverTimeCusts = measureOverTime.groupby(['year_month','Store_type'])['nCustomers'].mean().reset_index() measureOverTimeCusts measureOverTimeCusts.set_index('year_month',inplace=True) pastCustomers = measureOverTimeCusts.loc['2018-06-01':'2019-01-01'].reset_index() pastCustomers px.line(data_frame=pastCustomers, x='year_month', y='nCustomers', color='Store_type', title='Total customers by month',labels={'year_month':'Month of operation','nCustomers':'Total customers'}) ``` ## Assessment of trial period ### Calculate for totSales #### Scale sales ``` preTrialMeasures preTrialMeasures.loc[preTrialMeasures['STORE_NBR']==77, 'TOT_SALES'].sum() preTrialMeasures.loc[preTrialMeasures['STORE_NBR']==233, 'TOT_SALES'].sum() scalingFactorForControlSales = preTrialMeasures.loc[preTrialMeasures['STORE_NBR']==77, 'TOT_SALES'].sum() / preTrialMeasures.loc[preTrialMeasures['STORE_NBR']==233, 'TOT_SALES'].sum() scalingFactorForControlSales ``` #### Apply the scaling factor ``` scaledControlSales = measureOverTimeSales.loc[measureOverTimeSales['Store_type']=='Control','totSales'].reset_index() scaledControlSales scaledControlSales['scaledControlSales'] = scaledControlSales.apply(lambda row: row['totSales']*scalingFactorForControlSales,axis=1) scaledControlSales TrailStoreSales = measureOverTimeSales.loc[measureOverTimeSales['Store_type']=='Trail',['totSales']] TrailStoreSales TrailStoreSales.columns = ['trailSales'] TrailStoreSales ``` #### %Diff between scaled control and trial for sales ``` percentageDiff = scaledControlSales.merge(TrailStoreSales, on='year_month',) percentageDiff percentageDiff['percentDiff'] = percentageDiff.apply(lambda row: (row['scaledControlSales']-row['trailSales'])/row['scaledControlSales'], axis=1) percentageDiff ``` #### Get standard deviation ``` stdDev = percentageDiff.loc[percentageDiff['year_month']< '2019-02-01', 'percentDiff'].std(ddof=8-1) stdDev ``` #### Calculate the t-values for the trial months ``` from scipy.stats import ttest_ind control = percentageDiff.loc[percentageDiff['year_month']>'2019-01-01',['scaledControlSales']] control trail = percentageDiff.loc[percentageDiff['year_month']>'2019-01-01',['trailSales']] trail ttest_ind(control,trail) ``` The null hypothesis here is "the sales between control and trial stores has **NO** significantly difference in trial period." The pvalue is 0.32, which is 32% that they are same in sales,which is much greater than 5%. Fail to reject the null hypothesis. Therefore, we are not confident to say "the trial period impact trial store sales." ``` percentageDiff['t-value'] = percentageDiff.apply(lambda row: (row['percentDiff']- 0) / stdDev,axis=1) percentageDiff ``` We can observe that the t-value is much larger than the 95th percentile value of the t-distribution for March and April. i.e. the increase in sales in the trial store in March and April is statistically greater than in the control store. #### 95th & 5th percentile of control store ``` measureOverTimeSales pastSales_Controls95 = measureOverTimeSales.loc[measureOverTimeSales['Store_type']=='Control'] pastSales_Controls95['totSales'] = pastSales_Controls95.apply(lambda row: row['totSales']*(1+stdDev*2),axis=1) pastSales_Controls95.iloc[0:13,0] = 'Control 95th % confidence interval' pastSales_Controls95.reset_index() pastSales_Controls5 = measureOverTimeSales.loc[measureOverTimeSales['Store_type']=='Control'] pastSales_Controls5['totSales'] = pastSales_Controls95.apply(lambda row: row['totSales']*(1-stdDev*2),axis=1) pastSales_Controls5.iloc[0:13,0] = 'Control 5th % confidence interval' pastSales_Controls5.reset_index() trialAssessment = pd.concat([measureOverTimeSales,pastSales_Controls5,pastSales_Controls95]) trialAssessment = trialAssessment.sort_values(by=['year_month']) trialAssessment = trialAssessment.reset_index() trialAssessment ``` #### Visualization Trial ``` px.line(data_frame=trialAssessment, x='year_month', y='totSales', color='Store_type', title='Total sales by month',labels={'year_month':'Month of operation','totSales':'Total sales'}) ``` ### Calculate for nCustomers #### Scales nCustomers ``` preTrialMeasures preTrialMeasures.loc[preTrialMeasures['STORE_NBR']==77,'nCustomers'].sum() preTrialMeasures.loc[preTrialMeasures['STORE_NBR']==233,'nCustomers'].sum() scalingFactorForControlnCustomers = preTrialMeasures.loc[preTrialMeasures['STORE_NBR']==77,'nCustomers'].sum() / preTrialMeasures.loc[preTrialMeasures['STORE_NBR']==233,'nCustomers'].sum() scalingFactorForControlnCustomers ``` #### Apply the scaling factor ``` measureOverTime scaledControlNcustomers = measureOverTime.loc[measureOverTime['Store_type']=='Control',['year_month','nCustomers']] scaledControlNcustomers scaledControlNcustomers['scaledControlNcus'] = scaledControlNcustomers.apply(lambda row: row['nCustomers']*scalingFactorForControlnCustomers, axis=1) scaledControlNcustomers ``` #### %Diff between scaled control & trail for nCustomers ``` measureOverTime.loc[measureOverTime['Store_type']=='Trail',['year_month','nCustomers']] percentageDiff = scaledControlNcustomers.merge(measureOverTime.loc[measureOverTime['Store_type']=='Trail',['year_month','nCustomers']],on='year_month') percentageDiff percentageDiff.columns=['year_month','controlCustomers','scaledControlNcus','trialCustomers'] percentageDiff percentageDiff['%Diff'] = percentageDiff.apply(lambda row: (row['scaledControlNcus']-row['trialCustomers'])/row['scaledControlNcus'],axis=1) percentageDiff ``` #### Get standard deviation ``` stdDev = percentageDiff.loc[percentageDiff['year_month']< '2019-02-01', '%Diff'].std(ddof=8-1) stdDev ``` #### Calculate the t-values for the trial months ``` percentageDiff['t-value'] = percentageDiff.apply(lambda row: (row['%Diff']- 0) / stdDev,axis=1) percentageDiff ``` #### 95th & 5th percentile of control store ``` measureOverTimeCusts pastNcus_Controls95 = measureOverTimeCusts.loc[measureOverTimeCusts['Store_type']=='Control'] pastNcus_Controls95['nCustomers'] = pastNcus_Controls95.apply(lambda row: row['nCustomers']*(1+stdDev*2),axis=1) pastNcus_Controls95.iloc[0:13,0] = 'Control 95th % confidence interval' pastNcus_Controls95.reset_index() pastNcus_Controls5 = measureOverTimeCusts.loc[measureOverTimeCusts['Store_type']=='Control'] pastNcus_Controls5['nCustomers'] = pastNcus_Controls5.apply(lambda row: row['nCustomers']*(1-stdDev*2),axis=1) pastNcus_Controls5.iloc[0:13,0] = 'Control 5th % confidence interval' pastNcus_Controls5.reset_index() trialAssessment = pd.concat([measureOverTimeCusts,pastNcus_Controls5,pastNcus_Controls95]) trialAssessment = trialAssessment.sort_values(by=['year_month']) trialAssessment = trialAssessment.reset_index() trialAssessment ``` #### Visualization Trial ``` px.line(data_frame=trialAssessment, x='year_month', y='nCustomers', color='Store_type', title='Total nCustomers by month',labels={'year_month':'Month of operation','nCustomers':'Total nCustomers'}) ``` # Trial store 86 ## Select control store #### corr_nSales ``` measureOverTime ```
github_jupyter
# 501R Lab1 ## Part 1 ## Program to generate random image ``` import cairo import numpy as np # Set the random seed seed = None # Populate for using specific value for consistency random = np.random.RandomState(seed) # Function to draw random integers in a range def randinteger(n, m=1): if m == 1: return random.randint(1, n, m)[0] else: return random.randint(1, n, m) # Generate random colors def randcolor(): r = random.rand() g = random.rand() b = random.rand() a = random.rand() cr.set_source_rgba(r, g, b, a) # Get random line width def linewidth(): cr.set_line_width(randinteger(30)) # Draw random curve def curve(): x, x1, x2, x3 = randinteger(512, 4) y, y1, y2, y3 = randinteger(288, 4) randcolor() linewidth() cr.move_to(x, y) cr.curve_to(x1, y1, x2, y2, x3, y3) cr.set_line_join(cairo.LINE_JOIN_ROUND) cr.stroke() # Draw random line def line(): randcolor() linewidth() x,x1 = randinteger(512, 2) y,y1 = randinteger(288, 2) cr.move_to(x, y) cr.line_to(x1, y1) cr.stroke() # Draw random arc def arc(): randcolor() linewidth() c1 = randinteger(512) c2 = randinteger(288) r = randinteger(30) a1 = np.pi * random.randint(0, 3) * random.rand() a2 = np.pi * random.randint(0, 3) * random.rand() cr.arc(c1, c2, r, a1, a2) cr.fill() cr.stroke() # Draw border def border(): # Setting line width and color randcolor() cr.set_line_width(5.0) cr.rectangle(0, 0, 512, 288) cr.set_line_join(cairo.LINE_JOIN_ROUND) cr.set_source_rgba(0.0, 0.0, 0.0, 0.3) cr.fill() # Filling all the commands cr.stroke() # Draw random rectangle def rectangle(): randcolor() linewidth() x,x1 = randinteger(512, 2) y,y1 = randinteger(288, 2) cr.rectangle(x,y,x1,y1) cr.set_line_join(cairo.LINE_JOIN_ROUND) cr.fill() cr.stroke() # Drawing the objects def draw(): border() for i in range(60): curve() line() arc() #rectangle() def nbimage(data): from IPython.display import display from PIL.Image import fromarray # Creating image data from numpy array image = fromarray(data) # Saving the image to the disk image.save('shape.png') # Displaying the image in Notebook display(image) WIDTH = 512 HEIGHT = 288 data = np.zeros((HEIGHT, WIDTH, 4), dtype=np.uint8) # Setting up cairo ims = cairo.ImageSurface.create_for_data(data, cairo.FORMAT_ARGB32, WIDTH, HEIGHT) cr = cairo.Context(ims) draw() nbimage(data) ``` ## Part 2 ### Tensorplaygound with Spiral Dataset #### Experiment 1 ![Spiral Dataset Experiment 1 ](https://s33.postimg.cc/xufji3cbj/tensor_board.png "Experiment 1") #### Experiment 2 ![Expeirment 2](https://s33.postimg.cc/gtwn9f6zz/tensor_board1.png "Experiment 2") #### Experiment 3 ![Expeirment 3](https://s33.postimg.cc/k0r6t1z5p/tensorboard_3.png "Experiment 3")
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Migrate your TensorFlow 1 code to TensorFlow 2 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/guide/migrate"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/migrate.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/migrate.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This doc for users of low level TensorFlow APIs. If you are using the high level APIs (`tf.keras`) there may be little or no action you need to take to make your code fully TensorFlow 2.0 compatible: * Check your [optimizer's default learning rate](#keras_optimizer_lr). * Note that the "name" that metrics are logged to [may have changed](#keras_metric_names). It is still possible to run 1.X code, unmodified ([except for contrib](https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md)), in TensorFlow 2.0: ``` import tensorflow.compat.v1 as tf tf.disable_v2_behavior() ``` However, this does not let you take advantage of many of the improvements made in TensorFlow 2.0. This guide will help you upgrade your code, making it simpler, more performant, and easier to maintain. ## Automatic conversion script The first step, before attempting to implement the changes described in this doc, is to try running the [upgrade script](./upgrade.md). This will do an initial pass at upgrading your code to TensorFlow 2.0. But it can't make your code idiomatic to 2.0. Your code may still make use of `tf.compat.v1` endpoints to access placeholders, sessions, collections, and other 1.x-style functionality. ## Top-level behavioral changes If your code works in TensorFlow 2.0 using `tf.compat.v1.disable_v2_behavior()`, there are still global behavioral changes you may need to address. The major changes are: * *Eager execution, `v1.enable_eager_execution()`* : Any code that implicitly uses a `tf.Graph` will fail. Be sure to wrap this code in a `with tf.Graph().as_default()` context. * *Resource variables, `v1.enable_resource_variables()`*: Some code may depends on non-deterministic behaviors enabled by TF reference variables. Resource variables are locked while being written to, and so provide more intuitive consistency guarantees. * This may change behavior in edge cases. * This may create extra copies and can have higher memory usage. * This can be disabled by passing `use_resource=False` to the `tf.Variable` constructor. * *Tensor shapes, `v1.enable_v2_tensorshape()`*: TF 2.0 simplifies the behavior of tensor shapes. Instead of `t.shape[0].value` you can say `t.shape[0]`. These changes should be small, and it makes sense to fix them right away. See [TensorShape](#tensorshape) for examples. * *Control flow, `v1.enable_control_flow_v2()`*: The TF 2.0 control flow implementation has been simplified, and so produces different graph representations. Please [file bugs](https://github.com/tensorflow/tensorflow/issues) for any issues. ## Make the code 2.0-native This guide will walk through several examples of converting TensorFlow 1.x code to TensorFlow 2.0. These changes will let your code take advantage of performance optimizations and simplified API calls. In each case, the pattern is: ### 1. Replace `v1.Session.run` calls Every `v1.Session.run` call should be replaced by a Python function. * The `feed_dict` and `v1.placeholder`s become function arguments. * The `fetches` become the function's return value. * During conversion eager execution allows easy debugging with standard Python tools like `pdb`. After that add a `tf.function` decorator to make it run efficiently in graph. See the [Autograph Guide](function.ipynb) for more on how this works. Note that: * Unlike `v1.Session.run` a `tf.function` has a fixed return signature, and always returns all outputs. If this causes performance problems, create two separate functions. * There is no need for a `tf.control_dependencies` or similar operations: A `tf.function` behaves as if it were run in the order written. `tf.Variable` assignments and `tf.assert`s, for example, are executed automatically. ### 2. Use Python objects to track variables and losses All name-based variable tracking is strongly discouraged in TF 2.0. Use Python objects to to track variables. Use `tf.Variable` instead of `v1.get_variable`. Every `v1.variable_scope` should be converted to a Python object. Typically this will be one of: * `tf.keras.layers.Layer` * `tf.keras.Model` * `tf.Module` If you need to aggregate lists of variables (like `tf.Graph.get_collection(tf.GraphKeys.VARIABLES)`), use the `.variables` and `.trainable_variables` attributes of the `Layer` and `Model` objects. These `Layer` and `Model` classes implement several other properties that remove the need for global collections. Their `.losses` property can be a replacement for using the `tf.GraphKeys.LOSSES` collection. See the [keras guides](keras.ipynb) for details. Warning: Many `tf.compat.v1` symbols use the global collections implicitly. ### 3. Upgrade your training loops Use the highest level API that works for your use case. Prefer `tf.keras.Model.fit` over building your own training loops. These high level functions manage a lot of the low-level details that might be easy to miss if you write your own training loop. For example, they automatically collect the regularization losses, and set the `training=True` argument when calling the model. ### 4. Upgrade your data input pipelines Use `tf.data` datasets for data input. These objects are efficient, expressive, and integrate well with tensorflow. They can be passed directly to the `tf.keras.Model.fit` method. ``` model.fit(dataset, epochs=5) ``` They can be iterated over directly standard Python: ``` for example_batch, label_batch in dataset: break ``` #### 5. Migrate off `compat.v1` symbols The `tf.compat.v1` module contains the complete TensorFlow 1.x API, with its original semantics. The [TF2 upgrade script](upgrade.ipynb) will convert symbols to their 2.0 equivalents if such a conversion is safe, i.e., if it can determine that the behavior of the 2.0 version is exactly equivalent (for instance, it will rename `v1.arg_max` to `tf.argmax`, since those are the same function). After the upgrade script is done with a piece of code, it is likely there are many mentions of `compat.v1`. It is worth going through the code and converting these manually to the 2.0 equivalent (it should be mentioned in the log if there is one). ## Converting models ### Setup ``` import tensorflow as tf import tensorflow_datasets as tfds ``` ### Low-level variables & operator execution Examples of low-level API use include: * using variable scopes to control reuse * creating variables with `v1.get_variable`. * accessing collections explicitly * accessing collections implicitly with methods like : * `v1.global_variables` * `v1.losses.get_regularization_loss` * using `v1.placeholder` to set up graph inputs * executing graphs with `Session.run` * initializing variables manually #### Before converting Here is what these patterns may look like in code using TensorFlow 1.x. ```python in_a = tf.placeholder(dtype=tf.float32, shape=(2)) in_b = tf.placeholder(dtype=tf.float32, shape=(2)) def forward(x): with tf.variable_scope("matmul", reuse=tf.AUTO_REUSE): W = tf.get_variable("W", initializer=tf.ones(shape=(2,2)), regularizer=tf.contrib.layers.l2_regularizer(0.04)) b = tf.get_variable("b", initializer=tf.zeros(shape=(2))) return W * x + b out_a = forward(in_a) out_b = forward(in_b) reg_loss=tf.losses.get_regularization_loss(scope="matmul") with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a, out_b, reg_loss], feed_dict={in_a: [1, 0], in_b: [0, 1]}) ``` #### After converting In the converted code: * The variables are local Python objects. * The `forward` function still defines the calculation. * The `Session.run` call is replaced with a call to `forward` * The optional `tf.function` decorator can be added for performance. * The regularizations are calculated manually, without referring to any global collection. * **No sessions or placeholders.** ``` W = tf.Variable(tf.ones(shape=(2,2)), name="W") b = tf.Variable(tf.zeros(shape=(2)), name="b") @tf.function def forward(x): return W * x + b out_a = forward([1,0]) print(out_a) out_b = forward([0,1]) regularizer = tf.keras.regularizers.l2(0.04) reg_loss=regularizer(W) ``` ### Models based on `tf.layers` The `v1.layers` module is used to contain layer-functions that relied on `v1.variable_scope` to define and reuse variables. #### Before converting ```python def model(x, training, scope='model'): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): x = tf.layers.conv2d(x, 32, 3, activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.04)) x = tf.layers.max_pooling2d(x, (2, 2), 1) x = tf.layers.flatten(x) x = tf.layers.dropout(x, 0.1, training=training) x = tf.layers.dense(x, 64, activation=tf.nn.relu) x = tf.layers.batch_normalization(x, training=training) x = tf.layers.dense(x, 10) return x train_out = model(train_data, training=True) test_out = model(test_data, training=False) ``` #### After converting * The simple stack of layers fits neatly into `tf.keras.Sequential`. (For more complex models see [custom layers and models](keras/custom_layers_and_models.ipynb), and [the functional API](keras/functional.ipynb).) * The model tracks the variables, and regularization losses. * The conversion was one-to-one because there is a direct mapping from `v1.layers` to `tf.keras.layers`. Most arguments stayed the same. But notice the differences: * The `training` argument is passed to each layer by the model when it runs. * The first argument to the original `model` function (the input `x`) is gone. This is because object layers separate building the model from calling the model. Also note that: * If you were using regularizers of initializers from `tf.contrib`, these have more argument changes than others. * The code no longer writes to collections, so functions like `v1.losses.get_regularization_loss` will no longer return these values, potentially breaking your training loops. ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.04), input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(10) ]) train_data = tf.ones(shape=(1, 28, 28, 1)) test_data = tf.ones(shape=(1, 28, 28, 1)) train_out = model(train_data, training=True) print(train_out) test_out = model(test_data, training=False) print(test_out) # Here are all the trainable variables. len(model.trainable_variables) # Here is the regularization loss. model.losses ``` ### Mixed variables & `v1.layers` Existing code often mixes lower-level TF 1.x variables and operations with higher-level `v1.layers`. #### Before converting ```python def model(x, training, scope='model'): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): W = tf.get_variable( "W", dtype=tf.float32, initializer=tf.ones(shape=x.shape), regularizer=tf.contrib.layers.l2_regularizer(0.04), trainable=True) if training: x = x + W else: x = x + W * 0.5 x = tf.layers.conv2d(x, 32, 3, activation=tf.nn.relu) x = tf.layers.max_pooling2d(x, (2, 2), 1) x = tf.layers.flatten(x) return x train_out = model(train_data, training=True) test_out = model(test_data, training=False) ``` #### After converting To convert this code, follow the pattern of mapping layers to layers as in the previous example. A `v1.variable_scope` is effectively a layer of its own. So rewrite it as a `tf.keras.layers.Layer`. See [the guide](keras/custom_layers_and_models.ipynb) for details. The general pattern is: * Collect layer parameters in `__init__`. * Build the variables in `build`. * Execute the calculations in `call`, and return the result. The `v1.variable_scope` is essentially a layer of its own. So rewrite it as a `tf.keras.layers.Layer`. See [the guide](keras/custom_layers_and_models.ipynb) for details. ``` # Create a custom layer for part of the model class CustomLayer(tf.keras.layers.Layer): def __init__(self, *args, **kwargs): super(CustomLayer, self).__init__(*args, **kwargs) def build(self, input_shape): self.w = self.add_weight( shape=input_shape[1:], dtype=tf.float32, initializer=tf.keras.initializers.ones(), regularizer=tf.keras.regularizers.l2(0.02), trainable=True) # Call method will sometimes get used in graph mode, # training will get turned into a tensor @tf.function def call(self, inputs, training=None): if training: return inputs + self.w else: return inputs + self.w * 0.5 custom_layer = CustomLayer() print(custom_layer([1]).numpy()) print(custom_layer([1], training=True).numpy()) train_data = tf.ones(shape=(1, 28, 28, 1)) test_data = tf.ones(shape=(1, 28, 28, 1)) # Build the model including the custom layer model = tf.keras.Sequential([ CustomLayer(input_shape=(28, 28, 1)), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), ]) train_out = model(train_data, training=True) test_out = model(test_data, training=False) ``` Some things to note: * Subclassed Keras models & layers need to run in both v1 graphs (no automatic control dependencies) and in eager mode * Wrap the `call()` in a `tf.function()` to get autograph and automatic control dependencies * Don't forget to accept a `training` argument to `call`. * Sometimes it is a `tf.Tensor` * Sometimes it is a Python boolean. * Create model variables in constructor or `Model.build` using `self.add_weight()`. * In `Model.build` you have access to the input shape, so can create weights with matching shape. * Using `tf.keras.layers.Layer.add_weight` allows Keras to track variables and regularization losses. * Don't keep `tf.Tensors` in your objects. * They might get created either in a `tf.function` or in the eager context, and these tensors behave differently. * Use `tf.Variable`s for state, they are always usable from both contexts * `tf.Tensors` are only for intermediate values. ### A note on Slim & contrib.layers A large amount of older TensorFlow 1.x code uses the [Slim](https://ai.googleblog.com/2016/08/tf-slim-high-level-library-to-define.html) library, which was packaged with TensorFlow 1.x as `tf.contrib.layers`. As a `contrib` module, this is no longer available in TensorFlow 2.0, even in `tf.compat.v1`. Converting code using Slim to TF 2.0 is more involved than converting repositories that use `v1.layers`. In fact, it may make sense to convert your Slim code to `v1.layers` first, then convert to Keras. * Remove `arg_scopes`, all args need to be explicit * If you use them, split `normalizer_fn` and `activation_fn` into their own layers * Separable conv layers map to one or more different Keras layers (depthwise, pointwise, and separable Keras layers) * Slim and `v1.layers` have different arg names & default values * Some args have different scales * If you use Slim pre-trained models, try out Keras's pre-traimed models from `tf.keras.applications` or [TF Hub](https://tfhub.dev/s?q=slim%20tf2)'s TF2 SavedModels exported from the original Slim code. Some `tf.contrib` layers might not have been moved to core TensorFlow but have instead been moved to the [TF add-ons package](https://github.com/tensorflow/addons). ## Training There are many ways to feed data to a `tf.keras` model. They will accept Python generators and Numpy arrays as input. The recommended way to feed data to a model is to use the `tf.data` package, which contains a collection of high performance classes for manipulating data. If you are still using `tf.queue`, these are now only supported as data-structures, not as input pipelines. ### Using Datasets The [TensorFlow Datasets](https://tensorflow.org/datasets) package (`tfds`) contains utilities for loading predefined datasets as `tf.data.Dataset` objects. For this example, load the MNISTdataset, using `tfds`: ``` datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True) mnist_train, mnist_test = datasets['train'], datasets['test'] ``` Then prepare the data for training: * Re-scale each image. * Shuffle the order of the examples. * Collect batches of images and labels. ``` BUFFER_SIZE = 10 # Use a much larger value for real code. BATCH_SIZE = 64 NUM_EPOCHS = 5 def scale(image, label): image = tf.cast(image, tf.float32) image /= 255 return image, label ``` To keep the example short, trim the dataset to only return 5 batches: ``` train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) test_data = mnist_test.map(scale).batch(BATCH_SIZE) STEPS_PER_EPOCH = 5 train_data = train_data.take(STEPS_PER_EPOCH) test_data = test_data.take(STEPS_PER_EPOCH) image_batch, label_batch = next(iter(train_data)) ``` ### Use Keras training loops If you don't need low level control of your training process, using Keras's built-in `fit`, `evaluate`, and `predict` methods is recommended. These methods provide a uniform interface to train the model regardless of the implementation (sequential, functional, or sub-classed). The advantages of these methods include: * They accept Numpy arrays, Python generators and, `tf.data.Datasets` * They apply regularization, and activation losses automatically. * They support `tf.distribute` [for multi-device training](distributed_training.ipynb). * They support arbitrary callables as losses and metrics. * They support callbacks like `tf.keras.callbacks.TensorBoard`, and custom callbacks. * They are performant, automatically using TensorFlow graphs. Here is an example of training a model using a `Dataset`. (For details on how this works see [tutorials](../tutorials).) ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.02), input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(10) ]) # Model is the full model w/o custom layers model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.fit(train_data, epochs=NUM_EPOCHS) loss, acc = model.evaluate(test_data) print("Loss {}, Accuracy {}".format(loss, acc)) ``` ### Write your own loop If the Keras model's training step works for you, but you need more control outside that step, consider using the `tf.keras.Model.train_on_batch` method, in your own data-iteration loop. Remember: Many things can be implemented as a `tf.keras.callbacks.Callback`. This method has many of the advantages of the methods mentioned in the previous section, but gives the user control of the outer loop. You can also use `tf.keras.Model.test_on_batch` or `tf.keras.Model.evaluate` to check performance during training. Note: `train_on_batch` and `test_on_batch`, by default return the loss and metrics for the single batch. If you pass `reset_metrics=False` they return accumulated metrics and you must remember to appropriately reset the metric accumulators. Also remember that some metrics like `AUC` require `reset_metrics=False` to be calculated correctly. To continue training the above model: ``` # Model is the full model w/o custom layers model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) for epoch in range(NUM_EPOCHS): #Reset the metric accumulators model.reset_metrics() for image_batch, label_batch in train_data: result = model.train_on_batch(image_batch, label_batch) metrics_names = model.metrics_names print("train: ", "{}: {:.3f}".format(metrics_names[0], result[0]), "{}: {:.3f}".format(metrics_names[1], result[1])) for image_batch, label_batch in test_data: result = model.test_on_batch(image_batch, label_batch, # return accumulated metrics reset_metrics=False) metrics_names = model.metrics_names print("\neval: ", "{}: {:.3f}".format(metrics_names[0], result[0]), "{}: {:.3f}".format(metrics_names[1], result[1])) ``` <a name="custom_loop"></a> ### Customize the training step If you need more flexibility and control, you can have it by implementing your own training loop. There are three steps: 1. Iterate over a Python generator or `tf.data.Dataset` to get batches of examples. 2. Use `tf.GradientTape` to collect gradients. 3. Use one of the `tf.keras.optimizers` to apply weight updates to the model's variables. Remember: * Always include a `training` argument on the `call` method of subclassed layers and models. * Make sure to call the model with the `training` argument set correctly. * Depending on usage, model variables may not exist until the model is run on a batch of data. * You need to manually handle things like regularization losses for the model. Note the simplifications relative to v1: * There is no need to run variable initializers. Variables are initialized on creation. * There is no need to add manual control dependencies. Even in `tf.function` operations act as in eager mode. ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.02), input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(10) ]) optimizer = tf.keras.optimizers.Adam(0.001) loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) @tf.function def train_step(inputs, labels): with tf.GradientTape() as tape: predictions = model(inputs, training=True) regularization_loss=tf.math.add_n(model.losses) pred_loss=loss_fn(labels, predictions) total_loss=pred_loss + regularization_loss gradients = tape.gradient(total_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) for epoch in range(NUM_EPOCHS): for inputs, labels in train_data: train_step(inputs, labels) print("Finished epoch", epoch) ``` ### New-style metrics and losses In TensorFlow 2.0, metrics and losses are objects. These work both eagerly and in `tf.function`s. A loss object is callable, and expects the (y_true, y_pred) as arguments: ``` cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True) cce([[1, 0]], [[-1.0,3.0]]).numpy() ``` A metric object has the following methods: * `Metric.update_state()` — add new observations * `Metric.result()` —get the current result of the metric, given the observed values * `Metric.reset_states()` — clear all observations. The object itself is callable. Calling updates the state with new observations, as with `update_state`, and returns the new result of the metric. You don't have to manually initialize a metric's variables, and because TensorFlow 2.0 has automatic control dependencies, you don't need to worry about those either. The code below uses a metric to keep track of the mean loss observed within a custom training loop. ``` # Create the metrics loss_metric = tf.keras.metrics.Mean(name='train_loss') accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') @tf.function def train_step(inputs, labels): with tf.GradientTape() as tape: predictions = model(inputs, training=True) regularization_loss=tf.math.add_n(model.losses) pred_loss=loss_fn(labels, predictions) total_loss=pred_loss + regularization_loss gradients = tape.gradient(total_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) # Update the metrics loss_metric.update_state(total_loss) accuracy_metric.update_state(labels, predictions) for epoch in range(NUM_EPOCHS): # Reset the metrics loss_metric.reset_states() accuracy_metric.reset_states() for inputs, labels in train_data: train_step(inputs, labels) # Get the metric results mean_loss=loss_metric.result() mean_accuracy = accuracy_metric.result() print('Epoch: ', epoch) print(' loss: {:.3f}'.format(mean_loss)) print(' accuracy: {:.3f}'.format(mean_accuracy)) ``` <a id="keras_metric_names"></a> ### Keras metric names In TensorFlow 2.0 keras models are more consistent about handling metric names. Now when you pass a string in the list of metrics, that _exact_ string is used as the metric's `name`. These names are visible in the history object returned by `model.fit`, and in the logs passed to `keras.callbacks`. is set to the string you passed in the metric list. ``` model.compile( optimizer = tf.keras.optimizers.Adam(0.001), loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics = ['acc', 'accuracy', tf.keras.metrics.SparseCategoricalAccuracy(name="my_accuracy")]) history = model.fit(train_data) history.history.keys() ``` This differs from previous versions where passing `metrics=["accuracy"]` would result in `dict_keys(['loss', 'acc'])` ### Keras optimizers The optimizers in `v1.train`, like `v1.train.AdamOptimizer` and `v1.train.GradientDescentOptimizer`, have equivalents in `tf.keras.optimizers`. #### Convert `v1.train` to `keras.optimizers` Here are things to keep in mind when converting your optimizers: * Upgrading your optimizers [may make old checkpoints incompatible](#checkpoints). * All epsilons now default to `1e-7` instead of `1e-8` (which is negligible in most use cases). * `v1.train.GradientDescentOptimizer` can be directly replaced by `tf.keras.optimizers.SGD`. * `v1.train.MomentumOptimizer` can be directly replaced by the `SGD` optimizer using the momentum argument: `tf.keras.optimizers.SGD(..., momentum=...)`. * `v1.train.AdamOptimizer` can be converted to use `tf.keras.optimizers.Adam`. The `beta1` and `beta2` arguments have been renamed to `beta_1` and `beta_2`. * `v1.train.RMSPropOptimizer` can be converted to `tf.keras.optimizers.RMSprop`. The `decay` argument has been renamed to `rho`. * `v1.train.AdadeltaOptimizer` can be converted directly to `tf.keras.optimizers.Adadelta`. * `tf.train.AdagradOptimizer` can be converted directly to `tf.keras.optimizers.Adagrad`. * `tf.train.FtrlOptimizer` can be converted directly to `tf.keras.optimizers.Ftrl`. The `accum_name` and `linear_name` arguments have been removed. * The `tf.contrib.AdamaxOptimizer` and `tf.contrib.NadamOptimizer`, can be converted directly to `tf.keras.optimizers.Adamax` and `tf.keras.optimizers.Nadam`. The `beta1`, and `beta2` arguments have been renamed to `beta_1` and `beta_2`. #### New defaults for some `tf.keras.optimizers` <a id="keras_optimizer_lr"></a> Warning: If you see a change in convergence behavior for your models, check the default learning rates. There are no changes for `optimizers.SGD`, `optimizers.Adam`, or `optimizers.RMSprop`. The following default learning rates have changed: * `optimizers.Adagrad` from 0.01 to 0.001 * `optimizers.Adadelta` from 1.0 to 0.001 * `optimizers.Adamax` from 0.002 to 0.001 * `optimizers.Nadam` from 0.002 to 0.001 ### TensorBoard TensorFlow 2 includes significant changes to the `tf.summary` API used to write summary data for visualization in TensorBoard. For a general introduction to the new `tf.summary`, there are [several tutorials available](https://www.tensorflow.org/tensorboard/get_started) that use the TF 2 API. This includes a [TensorBoard TF 2 Migration Guide](https://www.tensorflow.org/tensorboard/migrate) ## Saving & Loading <a id="checkpoints"></a> ### Checkpoint compatibility TensorFlow 2.0 uses [object-based checkpoints](checkpoint.ipynb). Old-style name-based checkpoints can still be loaded, if you're careful. The code conversion process may result in variable name changes, but there are workarounds. The simplest approach it to line up the names of the new model with the names in the checkpoint: * Variables still all have a `name` argument you can set. * Keras models also take a `name` argument as which they set as the prefix for their variables. * The `v1.name_scope` function can be used to set variable name prefixes. This is very different from `tf.variable_scope`. It only affects names, and doesn't track variables & reuse. If that does not work for your use-case, try the `v1.train.init_from_checkpoint` function. It takes an `assignment_map` argument, which specifies the mapping from old names to new names. Note: Unlike object based checkpoints, which can [defer loading](checkpoint.ipynb#loading_mechanics), name-based checkpoints require that all variables be built when the function is called. Some models defer building variables until you call `build` or run the model on a batch of data. The [TensorFlow Estimator repository](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py) includes a [conversion tool](#checkpoint_converter) to upgrade the checkpoints for premade estimators from TensorFlow 1.X to 2.0. It may serve as an example of how to build a tool fr a similar use-case. ### Saved models compatibility There are no significant compatibility concerns for saved models. * TensorFlow 1.x saved_models work in TensorFlow 2.x. * TensorFlow 2.x saved_models work in TensorFlow 1.x—if all the ops are supported. ### A Graph.pb or Graph.pbtxt There is no straightforward way to upgrade a raw `Graph.pb` file to TensorFlow 2.0. Your best bet is to upgrade the code that generated the file. But, if you have a "Frozen graph" (a `tf.Graph` where the variables have been turned into constants), then it is possible to convert this to a [`concrete_function`](https://tensorflow.org/guide/concrete_function) using `v1.wrap_function`: ``` def wrap_frozen_graph(graph_def, inputs, outputs): def _imports_graph_def(): tf.compat.v1.import_graph_def(graph_def, name="") wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, []) import_graph = wrapped_import.graph return wrapped_import.prune( tf.nest.map_structure(import_graph.as_graph_element, inputs), tf.nest.map_structure(import_graph.as_graph_element, outputs)) ``` For example, here is a frozed graph for Inception v1, from 2016: ``` path = tf.keras.utils.get_file( 'inception_v1_2016_08_28_frozen.pb', 'http://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz', untar=True) ``` Load the `tf.GraphDef`: ``` graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(open(path,'rb').read()) ``` Wrap it into a `concrete_function`: ``` inception_func = wrap_frozen_graph( graph_def, inputs='input:0', outputs='InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu:0') ``` Pass it a tensor as input: ``` input_img = tf.ones([1,224,224,3], dtype=tf.float32) inception_func(input_img).shape ``` ## Estimators ### Training with Estimators Estimators are supported in TensorFlow 2.0. When you use estimators, you can use `input_fn()`, `tf.estimator.TrainSpec`, and `tf.estimator.EvalSpec` from TensorFlow 1.x. Here is an example using `input_fn` with train and evaluate specs. #### Creating the input_fn and train/eval specs ``` # Define the estimator's input_fn def input_fn(): datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True) mnist_train, mnist_test = datasets['train'], datasets['test'] BUFFER_SIZE = 10000 BATCH_SIZE = 64 def scale(image, label): image = tf.cast(image, tf.float32) image /= 255 return image, label[..., tf.newaxis] train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) return train_data.repeat() # Define train & eval specs train_spec = tf.estimator.TrainSpec(input_fn=input_fn, max_steps=STEPS_PER_EPOCH * NUM_EPOCHS) eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=STEPS_PER_EPOCH) ``` ### Using a Keras model definition There are some differences in how to construct your estimators in TensorFlow 2.0. We recommend that you define your model using Keras, then use the `tf.keras.estimator.model_to_estimator` utility to turn your model into an estimator. The code below shows how to use this utility when creating and training an estimator. ``` def make_model(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.02), input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(10) ]) model = make_model() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) estimator = tf.keras.estimator.model_to_estimator( keras_model = model ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) ``` Note: We do not support creating weighted metrics in Keras and converting them to weighted metrics in the Estimator API using `model_to_estimator` You will have to create these metrics directly on the estimator spec using the `add_metrics` function. ### Using a custom `model_fn` If you have an existing custom estimator `model_fn` that you need to maintain, you can convert your `model_fn` to use a Keras model. However, for compatibility reasons, a custom `model_fn` will still run in 1.x-style graph mode. This means there is no eager execution and no automatic control dependencies. <a name="minimal_changes"></a> #### Custom model_fn with minimal changes To make your custom `model_fn` work in TF 2.0, if you prefer minimal changes to the existing code, `tf.compat.v1` symbols such as `optimizers` and `metrics` can be used. Using a Keras models in a custom `model_fn` is similar to using it in a custom training loop: * Set the `training` phase appropriately, based on the `mode` argument. * Explicitly pass the model's `trainable_variables` to the optimizer. But there are important differences, relative to a [custom loop](#custom_loop): * Instead of using `Model.losses`, extract the losses using `Model.get_losses_for`. * Extract the model's updates using `Model.get_updates_for`. Note: "Updates" are changes that need to be applied to a model after each batch. For example, the moving averages of the mean and variance in a `layers.BatchNormalization` layer. The following code creates an estimator from a custom `model_fn`, illustrating all of these concerns. ``` def my_model_fn(features, labels, mode): model = make_model() optimizer = tf.compat.v1.train.AdamOptimizer() loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) training = (mode == tf.estimator.ModeKeys.TRAIN) predictions = model(features, training=training) if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) reg_losses = model.get_losses_for(None) + model.get_losses_for(features) total_loss=loss_fn(labels, predictions) + tf.math.add_n(reg_losses) accuracy = tf.compat.v1.metrics.accuracy(labels=labels, predictions=tf.math.argmax(predictions, axis=1), name='acc_op') update_ops = model.get_updates_for(None) + model.get_updates_for(features) minimize_op = optimizer.minimize( total_loss, var_list=model.trainable_variables, global_step=tf.compat.v1.train.get_or_create_global_step()) train_op = tf.group(minimize_op, update_ops) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=total_loss, train_op=train_op, eval_metric_ops={'accuracy': accuracy}) # Create the Estimator & Train estimator = tf.estimator.Estimator(model_fn=my_model_fn) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) ``` #### Custom `model_fn` with TF 2.0 symbols If you want to get rid of all TF 1.x symbols and upgrade your custom `model_fn` to native TF 2.0, you need to update the optimizer and metrics to `tf.keras.optimizers` and `tf.keras.metrics`. In the custom `model_fn`, besides the above [changes](#minimal_changes), more upgrades need to be made: * Use [`tf.keras.optimizers`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/optimizers) instead of `v1.train.Optimizer`. * Explicitly pass the model's `trainable_variables` to the `tf.keras.optimizers`. * To compute the `train_op/minimize_op`, * Use `Optimizer.get_updates()` if the loss is scalar loss `Tensor`(not a callable). The first element in the returned list is the desired `train_op/minimize_op`. * If the loss is a callable (such as a function), use `Optimizer.minimize()` to get the `train_op/minimize_op`. * Use [`tf.keras.metrics`](https://www.tensorflow.org/api_docs/python/tf/keras/metrics) instead of `tf.compat.v1.metrics` for evaluation. For the above example of `my_model_fn`, the migrated code with 2.0 symbols is shown as: ``` def my_model_fn(features, labels, mode): model = make_model() training = (mode == tf.estimator.ModeKeys.TRAIN) loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) predictions = model(features, training=training) # Get both the unconditional losses (the None part) # and the input-conditional losses (the features part). reg_losses = model.get_losses_for(None) + model.get_losses_for(features) total_loss=loss_obj(labels, predictions) + tf.math.add_n(reg_losses) # Upgrade to tf.keras.metrics. accuracy_obj = tf.keras.metrics.Accuracy(name='acc_obj') accuracy = accuracy_obj.update_state( y_true=labels, y_pred=tf.math.argmax(predictions, axis=1)) train_op = None if training: # Upgrade to tf.keras.optimizers. optimizer = tf.keras.optimizers.Adam() # Manually assign tf.compat.v1.global_step variable to optimizer.iterations # to make tf.compat.v1.train.global_step increased correctly. # This assignment is a must for any `tf.train.SessionRunHook` specified in # estimator, as SessionRunHooks rely on global step. optimizer.iterations = tf.compat.v1.train.get_or_create_global_step() # Get both the unconditional updates (the None part) # and the input-conditional updates (the features part). update_ops = model.get_updates_for(None) + model.get_updates_for(features) # Compute the minimize_op. minimize_op = optimizer.get_updates( total_loss, model.trainable_variables)[0] train_op = tf.group(minimize_op, *update_ops) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=total_loss, train_op=train_op, eval_metric_ops={'Accuracy': accuracy_obj}) # Create the Estimator & Train. estimator = tf.estimator.Estimator(model_fn=my_model_fn) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) ``` ### Premade Estimators [Premade Estimators](https://www.tensorflow.org/guide/premade_estimators) in the family of `tf.estimator.DNN*`, `tf.estimator.Linear*` and `tf.estimator.DNNLinearCombined*` are still supported in the TensorFlow 2.0 API, however, some arguments have changed: 1. `input_layer_partitioner`: Removed in 2.0. 2. `loss_reduction`: Updated to `tf.keras.losses.Reduction` instead of `tf.compat.v1.losses.Reduction`. Its default value is also changed to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` from `tf.compat.v1.losses.Reduction.SUM`. 3. `optimizer`, `dnn_optimizer` and `linear_optimizer`: this arg has been updated to `tf.keras.optimizers` instead of the `tf.compat.v1.train.Optimizer`. To migrate the above changes: 1. No migration is needed for `input_layer_partitioner` since [`Distribution Strategy`](https://www.tensorflow.org/guide/distributed_training) will handle it automatically in TF 2.0. 2. For `loss_reduction`, check [`tf.keras.losses.Reduction`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/losses/Reduction) for the supported options. 3. For `optimizer` args, if you do not pass in an `optimizer`, `dnn_optimizer` or `linear_optimizer` arg, or if you specify the `optimizer` arg as a `string` in your code, you don't need to change anything. `tf.keras.optimizers` is used by default. Otherwise, you need to update it from `tf.compat.v1.train.Optimizer` to its corresponding [`tf.keras.optimizers`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/optimizers) #### Checkpoint Converter <a id="checkpoint_converter"></a> The migration to `keras.optimizers` will break checkpoints saved using TF 1.x, as `tf.keras.optimizers` generates a different set of variables to be saved in checkpoints. To make old checkpoint reusable after your migration to TF 2.0, try the [checkpoint converter tool](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py). ``` ! curl -O https://raw.githubusercontent.com/tensorflow/estimator/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py ``` The tool has builtin help: ``` ! python checkpoint_converter.py -h ``` <a id="tensorshape"></a> ## TensorShape This class was simplified to hold `int`s, instead of `tf.compat.v1.Dimension` objects. So there is no need to call `.value()` to get an `int`. Individual `tf.compat.v1.Dimension` objects are still accessible from `tf.TensorShape.dims`. The following demonstrate the differences between TensorFlow 1.x and TensorFlow 2.0. ``` # Create a shape and choose an index i = 0 shape = tf.TensorShape([16, None, 256]) shape ``` If you had this in TF 1.x: ```python value = shape[i].value ``` Then do this in TF 2.0: ``` value = shape[i] value ``` If you had this in TF 1.x: ```python for dim in shape: value = dim.value print(value) ``` Then do this in TF 2.0: ``` for value in shape: print(value) ``` If you had this in TF 1.x (Or used any other dimension method): ```python dim = shape[i] dim.assert_is_compatible_with(other_dim) ``` Then do this in TF 2.0: ``` other_dim = 16 Dimension = tf.compat.v1.Dimension if shape.rank is None: dim = Dimension(None) else: dim = shape.dims[i] dim.is_compatible_with(other_dim) # or any other dimension method shape = tf.TensorShape(None) if shape: dim = shape.dims[i] dim.is_compatible_with(other_dim) # or any other dimension method ``` The boolean value of a `tf.TensorShape` is `True` if the rank is known, `False` otherwise. ``` print(bool(tf.TensorShape([]))) # Scalar print(bool(tf.TensorShape([0]))) # 0-length vector print(bool(tf.TensorShape([1]))) # 1-length vector print(bool(tf.TensorShape([None]))) # Unknown-length vector print(bool(tf.TensorShape([1, 10, 100]))) # 3D tensor print(bool(tf.TensorShape([None, None, None]))) # 3D tensor with no known dimensions print() print(bool(tf.TensorShape(None))) # A tensor with unknown rank. ``` ## Other Changes * Remove `tf.colocate_with`: TensorFlow's device placement algorithms have improved significantly. This should no longer be necessary. If removing it causes a performance degredation [please file a bug](https://github.com/tensorflow/tensorflow/issues). * Replace `v1.ConfigProto` usage with the equivalent functions from `tf.config`. ## Conclusions The overall process is: 1. Run the upgrade script. 2. Remove contrib symbols. 3. Switch your models to an object oriented style (Keras). 4. Use `tf.keras` or `tf.estimator` training and evaluation loops where you can. 5. Otherwise, use custom loops, but be sure to avoid sessions & collections. It takes a little work to convert code to idiomatic TensorFlow 2.0, but every change results in: * Fewer lines of code. * Increased clarity and simplicity. * Easier debugging.
github_jupyter
# Introduction In a sport like Football, each player contributes to the team's success. It's important to understand the player's overall performance. In this report we will look into various factors that impact the player's overall performance. ``` import pandas as pd import numpy as np #to replace values in columns import re # To build and evaluate model from sklearn.linear_model import LinearRegression from sklearn.neighbors import KNeighborsRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error,mean_absolute_error,r2_score from sklearn.model_selection import train_test_split # to make plots import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline ``` ## 1. Business Understanding 1. Best players in various aspects? 2. Most preferred foot? 3. Effect of Football Foot on Player's Potential? 4. Does Age have an Impact on Potential? 5. Predicting Overall player's performance ## 2. Data Understanding In this section, we load the data, check the data attributes for analysis ``` # Reading FIFA 2019 complete player dataset data = pd.read_csv('/Users/prof.lock/Desktop/Data Science/data.csv') # Sample Data data.head() data.info() data.describe() ``` ## 3. Data Preparation Clean Converting the data types into suitable types. Since Wage and Value cannot be str type. so we convert them to float. Droping columns We drop all the columns which we do not need for any manupulations i.e from which data we cannot make out any thing. ``` data[data.columns[data.isna().any()]].isna().sum() # Gets the columns with na values and its count # Majority of columns have 48 na values, check if they have common indices print("The columns with 48 na values have same indices: {}" .format(data[data['Stamina'].isna()].index.tolist()==data[data['GKKicking'].isna()].index.tolist())) data.drop(data[data['Stamina'].isna()].index.tolist(),inplace = True) # remove the indices that have na values # drop the columns which we do not need columns = ['Unnamed: 0','Photo','Flag','Club Logo','Release Clause','Nationality','ID','Club'] try: data.drop(data.columns[18:54],axis=1,inplace=True) data.drop(columns,axis=1,inplace=True) except Exception as e: print(e) replace = lambda x: re.sub("[€MK]","",x) # Wage and values in the columns are string like €10M,€1000K, use re # convert the Wage and Value columns to float data['Wage'] = data['Wage'].apply(replace).astype("float") data['Value'] = data['Value'].apply(replace).astype("float") ``` ### Best Player in Various Aspects? ``` # best players stores the players name, with their score best_players = pd.DataFrame() columns = data.columns.tolist() # Preferred Foot, Name is dropped as iti cannot be considered as best player attribute columns.remove("Preferred Foot") columns.remove("Name") for column in columns: try: best_players= best_players.append(pd.DataFrame({"Name":data.loc[data[column].idxmax()]['Name'],"Score":data[column].max()},index=[column])) except Exception as e: print(e) # Keeping only performnace indicators score of best players and dropping other columns best_players.drop(['Special','Weak Foot','International Reputation','Age','Wage','Value'],axis=0) ``` ### Most Preferred Foot? ``` # plot counts, number of lefty and rigthy sns.countplot(data['Preferred Foot']) plt.title('Most Preferred Foot of the Players') ``` ### Effect of Football Foot on Player's Potential? ``` # plot to see the efefct on player potential based on lefty or rigthy ax = sns.catplot(x="Preferred Foot",y="Potential",data=data) plt.title("Relation between Preferred Foot and Potential") # potential is hardly matters whetehr a player is lefty or righty ``` ### Does Age have an Impact on Potential? ``` # bar-plot for Age and Potential plt.bar(data['Age'],data['Potential'],color='red') plt.xlabel('Age') plt.ylabel('Potential') plt.title("Age vs Potential") # Potential falls with increase in age ``` ## 4. Modeling Analyse the data, we build Linear regression and sees performing well. Predict the Player's Overall Performance ``` data.drop(['Name'],axis=1,inplace=True) # Name column is not required for Overall prediction # Overall performance correlation with performance indicators sns.heatmap(data.corr().loc[['Overall'],:]).set_title("Overall correlation to Performance indicators") data = pd.get_dummies(data) # Preferred foot is a categorical value with Left/Right # split the target and other columns and split them to train and test sets X = data.drop(['Overall'],axis=1) y = data[['Overall']] X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state = 42) # Linear regression model lm = LinearRegression(normalize=True) lm.fit(X_train,y_train) # Predict the values for test X sample y_pred = lm.predict(X_test) # R squared error score = r2_score(y_test,y_pred) print(score) ``` ## 5. Evaluation Linear regression model performs well, we further analyse how RandomForestRegressor, DecisionTreeRegressor, Linear Regression and KNearestNeighbors perform on the data ``` def evaluation(clf,X_train=X_train,y_train=y_train,X_test=X_test,y_test=y_test): """ Parameters: clf (Linear Regression,Random Forest,Decision Tree, KNearest Neighbours) : A machine learning model X_train: train sample y_train: target train sample X_test: test sample y_test: target test sample Fits data into the model and predicts the target value on test sample, Evaluates the model using mean absolute error,mean squared error, r2 score """ print('\nModel : {}'.format(clf)) clf.fit(X_train,y_train.values.ravel()) y_pred =clf.predict(X_test) try: print('MSE : {}'.format(mean_squared_error(y_test,y_pred))) print('MAE : {}'.format(mean_absolute_error(y_test,y_pred))) print('R2 : {}'.format(r2_score(y_test,y_pred))) except Exception as e: print(e) # knn,dt,rf,lr models knn = KNeighborsRegressor(n_neighbors=7) dt = DecisionTreeRegressor(max_depth=7) rf = RandomForestRegressor(max_depth=7) lr = LinearRegression(normalize=True) # Iterating and calling evaluate function on models models = [knn,dt,rf,lr] for i in models: evaluation(i) ``` ## 6. Conclusion 1. We saw various visualizatons, How age has an impact on player's potential?, Which player is best at what aspect? How overall is related to performance indicators? 2. Random Forest peforms better on the data, then decision tree followed by K nearest neighbors and Linear regression
github_jupyter
**Principal Component Analysis (PCA)** is widely used in Machine Learning pipelines as a means to compress data or help visualization. This notebook aims to walk through the basic idea of the PCA and build the algorithm from scratch in Python. Before diving directly into the PCA, let's first talk about several import concepts - the **"eigenvectors & eigenvalues"** and **"Singular Value Decomposition (SVD)"**. An **eigenvector** of a square matrix is a column vector that satisfies: $$Av=\lambda v$$ Where A is a $[n\times n]$ square matrix, v is a $[n\times 1]$ **eigenvector**, and $\lambda$ is a scalar value which is also known as the **eigenvalue**. If A is both a square and symmetric matrix (like a typical variance-covariance matrix), then we can write A as: $$A=U\Sigma U^T$$ Here columns of matrix U are eigenvectors of matrix A; and $\Sigma$ is a diaonal matrix containing the corresponding eigenvalues. This is also a special case of the well-known theorem **"Singular Value Decomposition" (SVD)**, where a rectangular matrix M can be expressed as: $$M=U\Sigma V^T$$ ####With SVD, we can calcuate the eigenvectors and eigenvalues of a square & symmetric matrix. This will be the key to solve the PCA. The goal of the PCA is to find a lower dimension surface to maxmize total variance of the projection, or in other means, to minimize the projection error. The entire algorithm can be summarized as the following: 1) Given a data matrix **$X$** with **$m$** rows (number of records) and **$n$** columns (number of dimensions), we should first substract the column mean for each dimension. 2) Then we can calculate the variance-covariance matrix using the equation (X here already has zero mean for each column from step 1): $$cov=\frac{1}{m}X^TX$$ 3) We can then use SVD to compute the eigenvectors and corresponding eigenvalues of the above covariance matrix "$cov$": $$cov=U\Sigma U^T$$ 4) If our target dimension is $p$ ($p<n$), then we will select the first $p$ columns of the $U$ matrix and get matrix $U_{reduce}$. 5) To get the compressed data set, we can do the transformation as below: $$X_{reduce}=XU_{reduce}$$ 6) To appoximate the original data set given the compressed data, we can use: $$X=X_{reduce}U_{reduce}^T$$ Note this is true because $U_{reduce}^{-1}=U_{reduce}^T$ (in this case, all the eigenvectors are unit vectors). ####In practice, it is also important to choose the proper number of principal components. For data compression, we want to retain as much variation in the original data while reducing the dimension. Luckily, with SVD, we can get a estimate of the retained variation by: $$\%\ of\ variance\ retained = \frac{\sum_{i=1}^{p}S_{ii}}{\sum_{i=1}^{n}S_{ii}}$$ Where $S_{ii}$ is the $ith$ diagonal element of the $\Sigma$ matrix, $p$ is the number of reduced dimension, and $n$ is the dimension of the original data. ####For data visulization purposes, we usually choose 2 or 3 dimensions to plot the compressed data. ####The following class PCA() implements the idea of principal component analysis. ``` import numpy as np class PCA(): def __init__(self, num_components): self.num_components = num_components self.U = None self.S = None def fit(self, X): # perform pca m = X.shape[0] X_mean = np.mean(X, axis=0) X -= X_mean cov = X.T.dot(X) * 1.0 / m self.U, self.S, _ = np.linalg.svd(cov) return self def project(self, X): # project data based on reduced dimension U_reduce = self.U[:, :self.num_components] X_reduce = X.dot(U_reduce) return X_reduce def inverse(self, X_reduce): # recover the original data based on the reduced form U_reduce = self.U[:, :self.num_components] X = X_reduce.dot(U_reduce.T) return X def explained_variance(self): # print the ratio of explained variance with the pca explained = np.sum(self.S[:self.num_components]) total = np.sum(self.S) return explained * 1.0 / total ``` ####Now we can use a demo data set to show dimensionality reduction and data visualization. We will use the Iris Data set as always. ``` from sklearn.datasets import load_iris iris = load_iris() X = iris['data'] y = iris['target'] print X.shape ``` We can find that the dimension of the original $X$ matrix is 4. We can then compress it to 2 using PCA technique with the **PCA()** class that we defined above. ``` pca = PCA(num_components=2) pca.fit(X) X_reduce = pca.project(X) print X_reduce.shape ``` Now that the data has been compressed, we can check the ratianed variance. ``` print "{:.2%}".format(pca.explained_variance()) ``` We have 97.76% of variance retained. This is okay for data visulization purposes. But if we used PCA in supervised learning pipelines, we might want to add more dimension to keep more than 99% of the variation from the original data. Finally, with the compressed dimension, we can plot to see the distribution of iris dataset. ``` %pylab inline pylab.rcParams['figure.figsize'] = (10, 6) from matplotlib import pyplot as plt for c, marker, class_num in zip(['green', 'r', 'cyan'], ['o', '^', 's'], np.unique(y)): plt.scatter(x=X_reduce[:, 0][y == class_num], y=X_reduce[:, 1][y == class_num], c=c, marker=marker, label="Class {}".format(class_num), alpha=0.7, s=30) plt.xlabel("Component 1") plt.ylabel("Component 2") plt.legend() plt.show() ``` From the above example, we can see that PCA can help us visualize data with more than 3 feature dimensions. The general use of PCA is for dimensionality reductions in Machine Learning Pipelines. It can speed up the learning process and save memory when running supervised and unsupervised algorithms on large dataset. However, it also throws away some information when reducing the feature dimension. Thus it is always beneficial to test whether using PCA on top of something else since it's pretty easy to set up.
github_jupyter